diff --git a/.dockerignore b/.dockerignore deleted file mode 100644 index 1361b2e462..0000000000 --- a/.dockerignore +++ /dev/null @@ -1 +0,0 @@ -/hack/tools diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 82331a2d15..30e551a122 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -29,8 +29,8 @@ Please provide the following details: - Platform: (`Kubernetes`, `OpenShift`, `Rancher`, `GKE`, `EKS`, `AKS` etc.) - Platform Version: (e.g. `1.20.3`, `4.7.0`) -- PGO Image Tag: (e.g. `ubi8-5.0.2-0`) -- Postgres Version (e.g. `13`) +- PGO Image Tag: (e.g. `ubi8-5.x.y-0`) +- Postgres Version (e.g. `15`) - Storage: (e.g. `hostpath`, `nfs`, or the name of your storage class) ## Steps to Reproduce diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md index 5e1399c3ed..4de2077c77 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -32,8 +32,8 @@ Please provide the following details: - Platform: (`Kubernetes`, `OpenShift`, `Rancher`, `GKE`, `EKS`, `AKS` etc.) - Platform Version: (e.g. `1.20.3`, `4.7.0`) -- PGO Image Tag: (e.g. `ubi8-5.0.2-0`) -- Postgres Version (e.g. `13`) +- PGO Image Tag: (e.g. `ubi8-5.x.y-0`) +- Postgres Version (e.g. `15`) - Storage: (e.g. `hostpath`, `nfs`, or the name of your storage class) - Number of Postgres clusters: (`XYZ`) diff --git a/.github/ISSUE_TEMPLATE/support---question-and-answer.md b/.github/ISSUE_TEMPLATE/support---question-and-answer.md index c9ab84f53c..271caa9029 100644 --- a/.github/ISSUE_TEMPLATE/support---question-and-answer.md +++ b/.github/ISSUE_TEMPLATE/support---question-and-answer.md @@ -10,3 +10,26 @@ If you have a feature request, please open up a [Feature Request](https://github You can find information about general PGO [support](https://access.crunchydata.com/documentation/postgres-operator/latest/support/) at: [https://access.crunchydata.com/documentation/postgres-operator/latest/support/](https://access.crunchydata.com/documentation/postgres-operator/latest/support/) + +## Questions + +For questions that are neither bugs nor feature requests, please be sure to + +- [ ] Provide information about your environment (see below for more information). +- [ ] Provide any steps or other relevant details related to your question. +- [ ] Attach logs, where applicable. Please do not attach screenshots showing logs unless you are unable to copy and paste the log data. +- [ ] Ensure any code / output examples are [properly formatted](https://docs.github.com/en/github/writing-on-github/basic-writing-and-formatting-syntax#quoting-code) for legibility. + +Besides Pod logs, logs may also be found in the `/pgdata/pg/log` directory on your Postgres instance. + +If you are looking for [general support](https://access.crunchydata.com/documentation/postgres-operator/latest/support/), please view the [support](https://access.crunchydata.com/documentation/postgres-operator/latest/support/) page for where you can ask questions. + +### Environment + +Please provide the following details: + +- Platform: (`Kubernetes`, `OpenShift`, `Rancher`, `GKE`, `EKS`, `AKS` etc.) +- Platform Version: (e.g. `1.20.3`, `4.7.0`) +- PGO Image Tag: (e.g. `ubi8-5.x.y-0`) +- Postgres Version (e.g. `15`) +- Storage: (e.g. `hostpath`, `nfs`, or the name of your storage class) diff --git a/.github/actions/awk-matcher.json b/.github/actions/awk-matcher.json new file mode 100644 index 0000000000..852a723577 --- /dev/null +++ b/.github/actions/awk-matcher.json @@ -0,0 +1,13 @@ +{ + "problemMatcher": [ + { + "owner": "awk", + "pattern": [ + { + "regexp": "^([^:]+):([^ ]+) (([^:]+):.*)$", + "file": 1, "line": 2, "message": 3, "severity": 4 + } + ] + } + ] +} diff --git a/.github/actions/k3d/action.yaml b/.github/actions/k3d/action.yaml new file mode 100644 index 0000000000..395d5f1116 --- /dev/null +++ b/.github/actions/k3d/action.yaml @@ -0,0 +1,94 @@ +name: k3d +description: Start k3s using k3d +inputs: + k3d-tag: + default: latest + required: true + description: > + Git tag from https://github.com/k3d-io/k3d/releases or "latest" + k3s-channel: + default: latest + required: true + description: > + https://docs.k3s.io/upgrades/manual#release-channels + prefetch-images: + required: true + description: > + Each line is the name of an image to fetch onto all Kubernetes nodes + prefetch-timeout: + default: 90s + required: true + description: > + Amount of time to wait for images to be fetched + +outputs: + k3d-version: + value: ${{ steps.k3d.outputs.k3d }} + description: > + K3d version + kubernetes-version: + value: ${{ steps.k3s.outputs.server }} + description: > + Kubernetes server version, as reported by the Kubernetes API + pause-image: + value: ${{ steps.k3s.outputs.pause-image }} + description: > + Pause image for prefetch images DaemonSet + +runs: + using: composite + steps: + - id: k3d + name: Install k3d + shell: bash + env: + K3D_TAG: ${{ inputs.k3d-tag }} + run: | + curl --fail --silent https://raw.githubusercontent.com/k3d-io/k3d/main/install.sh | + TAG="${K3D_TAG#latest}" bash + k3d version | awk '{ print "${tolower($1)}=${$3}" >> $GITHUB_OUTPUT }' + + - id: k3s + name: Start k3s + shell: bash + run: | + k3d cluster create --image '+${{ inputs.k3s-channel }}' --no-lb --timeout=2m --wait + kubectl version | awk '{ print "${tolower($1)}=${$3}" >> $GITHUB_OUTPUT }' + + PAUSE_IMAGE=$(docker exec $(k3d node list --output json | jq --raw-output 'first.name') \ + k3s agent --help | awk '$1 == "--pause-image" { + match($0, /default: "[^"]*"/); + print substr($0, RSTART+10, RLENGTH-11) + }') + echo "pause-image=${PAUSE_IMAGE}" >> $GITHUB_OUTPUT + + - name: Prefetch container images + shell: bash + env: + INPUT_IMAGES: ${{ inputs.prefetch-images }} + INPUT_TIMEOUT: ${{ inputs.prefetch-timeout }} + run: | + jq <<< "$INPUT_IMAGES" --raw-input 'select(. != "")' | + jq --slurp \ + --arg pause '${{ steps.k3s.outputs.pause-image }}' \ + --argjson labels '{"name":"image-prefetch"}' \ + --argjson name '"image-prefetch"' \ + '{ + apiVersion: "apps/v1", kind: "DaemonSet", + metadata: { name: $name, labels: $labels }, + spec: { + selector: { matchLabels: $labels }, + template: { + metadata: { labels: $labels }, + spec: { + initContainers: to_entries | map({ + name: "c\(.key)", image: .value, command: ["true"], + }), + containers: [{ name: "pause", image: $pause }] + } + } + } + }' | + kubectl create --filename=- + kubectl rollout status daemonset.apps/image-prefetch --timeout "$INPUT_TIMEOUT" || + kubectl describe daemonset.apps/image-prefetch diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000000..639a059edc --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,16 @@ +# https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file +# https://docs.github.com/code-security/dependabot/dependabot-version-updates/customizing-dependency-updates +# +# See: https://www.github.com/dependabot/dependabot-core/issues/4605 +--- +# yaml-language-server: $schema=https://json.schemastore.org/dependabot-2.0.json +version: 2 +updates: + - package-ecosystem: github-actions + directory: / + schedule: + interval: weekly + day: tuesday + groups: + all-github-actions: + patterns: ['*'] diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 009442f462..b03369bf09 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -4,24 +4,27 @@ - [ ] Have you added an explanation of what your changes do and why you'd like them to be included? - [ ] Have you updated or added documentation for the change, as applicable? - [ ] Have you tested your changes on all related environments with successful results, as applicable? + - [ ] Have you added automated tests? **Type of Changes:** - - [ ] Bug fix (non-breaking change which fixes an issue) - - [ ] New feature (non-breaking change which adds functionality) - - [ ] Breaking change (fix or feature that would cause existing functionality to change) + - [ ] New feature + - [ ] Bug fix + - [ ] Documentation + - [ ] Testing enhancement + - [ ] Other - -**What is the current behavior? (link to any open issues here)** +**What is the current behavior (link to any open issues here)?** **What is the new behavior (if this is a feature change)?** +- [ ] Breaking change (fix or feature that would cause existing functionality to change) -**Other information**: +**Other Information**: diff --git a/.github/workflows/codeql-analysis.yaml b/.github/workflows/codeql-analysis.yaml index 15e90dbe21..ae4d24d122 100644 --- a/.github/workflows/codeql-analysis.yaml +++ b/.github/workflows/codeql-analysis.yaml @@ -1,15 +1,20 @@ name: CodeQL on: + pull_request: push: branches: - - master + - main schedule: - cron: '10 18 * * 2' +env: + # Use the Go toolchain installed by setup-go + # https://github.com/actions/setup-go/issues/457 + GOTOOLCHAIN: local + jobs: analyze: - name: Analyze runs-on: ubuntu-latest permissions: actions: read @@ -19,19 +24,17 @@ jobs: if: ${{ github.repository == 'CrunchyData/postgres-operator' }} steps: - - uses: actions/checkout@v2 - - uses: actions/setup-go@v2 - with: { go-version: 1.x } + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: { go-version: stable } - name: Initialize CodeQL - uses: github/codeql-action/init@v1 + uses: github/codeql-action/init@v3 with: { languages: go } - name: Autobuild - uses: github/codeql-action/autobuild@v1 + # This action calls `make` which runs our "help" target. + uses: github/codeql-action/autobuild@v3 - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v1 - # This action calls `make` which runs our "all" target and tries to build - # container images. 😖 That fails, but the action ignores it and proceeds. - # See "CODEQL_EXTRACTOR_GO_BUILD_COMMAND" in https://github.com/github/codeql-go + uses: github/codeql-action/analyze@v3 diff --git a/.github/workflows/lint.yaml b/.github/workflows/lint.yaml index 40ee71c8e9..c715f2a1d7 100644 --- a/.github/workflows/lint.yaml +++ b/.github/workflows/lint.yaml @@ -1,14 +1,39 @@ +name: Linters + on: pull_request: - branches: - - master + +env: + # Use the Go toolchain installed by setup-go + # https://github.com/actions/setup-go/issues/457 + GOTOOLCHAIN: local jobs: golangci-lint: runs-on: ubuntu-latest + permissions: + contents: read + checks: write steps: - - uses: actions/checkout@v2 - - uses: golangci/golangci-lint-action@v2 + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: { go-version: stable } + + - uses: golangci/golangci-lint-action@v6 with: - version: v1.32 + version: latest args: --timeout=5m + + # Count issues reported by disabled linters. The command always + # exits zero to ensure it does not fail the pull request check. + - name: Count non-blocking issues + run: | + golangci-lint run --config .golangci.next.yaml \ + --issues-exit-code 0 \ + --max-issues-per-linter 0 \ + --max-same-issues 0 \ + --out-format json | + jq --sort-keys 'reduce .Issues[] as $i ({}; .[$i.FromLinter] += 1)' | + awk >> "${GITHUB_STEP_SUMMARY}" ' + NR == 1 { print "```json" } { print } END { if (NR > 0) print "```" } + ' || true diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 37704764b6..e8174e4f95 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -1,36 +1,53 @@ +name: Tests + on: pull_request: - branches: - - master push: branches: - - master + - main + +env: + # Use the Go toolchain installed by setup-go + # https://github.com/actions/setup-go/issues/457 + GOTOOLCHAIN: local jobs: go-test: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 - - uses: actions/setup-go@v2 - with: - go-version: 1.x + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: { go-version: stable } - run: make check - run: make check-generate + - name: Ensure go.mod is tidy + run: go mod tidy && git diff --exit-code -- go.mod + kubernetes-api: runs-on: ubuntu-latest needs: [go-test] strategy: fail-fast: false matrix: - kubernetes: [default] + kubernetes: ['default'] steps: - - uses: actions/checkout@v2 - - uses: actions/setup-go@v2 - with: { go-version: 1.x } + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: { go-version: stable } - run: go mod download - - env: { KUBERNETES: "${{ matrix.kubernetes }}" } - run: ENVTEST_K8S_VERSION="${KUBERNETES#default}" make check-envtest + - run: ENVTEST_K8S_VERSION="${KUBERNETES#default}" make check-envtest + env: + KUBERNETES: "${{ matrix.kubernetes }}" + GO_TEST: go test --coverprofile 'envtest.coverage' --coverpkg ./internal/... + + # Upload coverage to GitHub + - run: gzip envtest.coverage + - uses: actions/upload-artifact@v4 + with: + name: "~coverage~kubernetes-api=${{ matrix.kubernetes }}" + path: envtest.coverage.gz + retention-days: 1 kubernetes-k3d: if: "${{ github.repository == 'CrunchyData/postgres-operator' }}" @@ -39,14 +56,154 @@ jobs: strategy: fail-fast: false matrix: - kubernetes: [latest, v1.18] + kubernetes: [v1.31, v1.28] + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: { go-version: stable } + + - name: Start k3s + uses: ./.github/actions/k3d + with: + k3s-channel: "${{ matrix.kubernetes }}" + prefetch-images: | + registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi8-2.53.1-0 + registry.developers.crunchydata.com/crunchydata/crunchy-pgbouncer:ubi8-1.23-0 + registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-16.4-2 + + - run: make createnamespaces check-envtest-existing + env: + PGO_TEST_TIMEOUT_SCALE: 1.2 + GO_TEST: go test --coverprofile 'envtest-existing.coverage' --coverpkg ./internal/... + + # Upload coverage to GitHub + - run: gzip envtest-existing.coverage + - uses: actions/upload-artifact@v4 + with: + name: "~coverage~kubernetes-k3d=${{ matrix.kubernetes }}" + path: envtest-existing.coverage.gz + retention-days: 1 + + kuttl-k3d: + runs-on: ubuntu-latest + needs: [go-test] + strategy: + fail-fast: false + matrix: + kubernetes: [v1.31, v1.30, v1.29, v1.28] + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: { go-version: stable } + + - name: Start k3s + uses: ./.github/actions/k3d + with: + k3s-channel: "${{ matrix.kubernetes }}" + prefetch-images: | + registry.developers.crunchydata.com/crunchydata/crunchy-pgadmin4:ubi8-4.30-31 + registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi8-2.53.1-0 + registry.developers.crunchydata.com/crunchydata/crunchy-pgbouncer:ubi8-1.23-0 + registry.developers.crunchydata.com/crunchydata/crunchy-postgres-exporter:latest + registry.developers.crunchydata.com/crunchydata/crunchy-upgrade:latest + registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-16.4-2 + registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-16.4-3.3-2 + registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-16.4-3.4-2 + registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-17.0-0 + registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-17.0-3.4-0 + - run: go mod download + - name: Build executable + run: PGO_VERSION='${{ github.sha }}' make build-postgres-operator + + - name: Get pgMonitor files. + run: make get-pgmonitor + env: + PGMONITOR_DIR: "${{ github.workspace }}/hack/tools/pgmonitor" + QUERIES_CONFIG_DIR: "${{ github.workspace }}/hack/tools/queries" + + # Start a Docker container with the working directory mounted. + - name: Start PGO + run: | + kubectl apply --server-side -k ./config/namespace + kubectl apply --server-side -k ./config/dev + hack/create-kubeconfig.sh postgres-operator pgo + docker run --detach --network host --read-only \ + --volume "$(pwd):/mnt" --workdir '/mnt' --env 'PATH=/mnt/bin' \ + --env 'CHECK_FOR_UPGRADES=false' \ + --env 'QUERIES_CONFIG_DIR=/mnt/hack/tools/queries' \ + --env 'KUBECONFIG=hack/.kube/postgres-operator/pgo' \ + --env 'RELATED_IMAGE_PGADMIN=registry.developers.crunchydata.com/crunchydata/crunchy-pgadmin4:ubi8-4.30-31' \ + --env 'RELATED_IMAGE_PGBACKREST=registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi8-2.53.1-0' \ + --env 'RELATED_IMAGE_PGBOUNCER=registry.developers.crunchydata.com/crunchydata/crunchy-pgbouncer:ubi8-1.23-0' \ + --env 'RELATED_IMAGE_PGEXPORTER=registry.developers.crunchydata.com/crunchydata/crunchy-postgres-exporter:latest' \ + --env 'RELATED_IMAGE_PGUPGRADE=registry.developers.crunchydata.com/crunchydata/crunchy-upgrade:latest' \ + --env 'RELATED_IMAGE_POSTGRES_16=registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-16.4-2' \ + --env 'RELATED_IMAGE_POSTGRES_16_GIS_3.3=registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-16.4-3.3-2' \ + --env 'RELATED_IMAGE_POSTGRES_16_GIS_3.4=registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-16.4-3.4-2' \ + --env 'RELATED_IMAGE_POSTGRES_17=registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-17.0-0' \ + --env 'RELATED_IMAGE_POSTGRES_17_GIS_3.4=registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-17.0-3.4-0' \ + --env 'RELATED_IMAGE_STANDALONE_PGADMIN=registry.developers.crunchydata.com/crunchydata/crunchy-pgadmin4:ubi8-8.12-0' \ + --env 'PGO_FEATURE_GATES=TablespaceVolumes=true' \ + --name 'postgres-operator' ubuntu \ + postgres-operator + - name: Install kuttl + run: | + curl -Lo /usr/local/bin/kubectl-kuttl https://github.com/kudobuilder/kuttl/releases/download/v0.13.0/kubectl-kuttl_0.13.0_linux_x86_64 + chmod +x /usr/local/bin/kubectl-kuttl + + - run: make generate-kuttl + env: + KUTTL_PG_UPGRADE_FROM_VERSION: '16' + KUTTL_PG_UPGRADE_TO_VERSION: '17' + KUTTL_PG_VERSION: '16' + KUTTL_POSTGIS_VERSION: '3.4' + KUTTL_PSQL_IMAGE: 'registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-16.4-2' + - run: | + make check-kuttl && exit + failed=$? + echo '::group::PGO logs'; docker logs 'postgres-operator'; echo '::endgroup::' + exit $failed + env: + KUTTL_TEST: kubectl-kuttl test + - name: Stop PGO + run: docker stop 'postgres-operator' || true + + coverage-report: + if: ${{ success() || contains(needs.*.result, 'success') }} + runs-on: ubuntu-latest + needs: + - kubernetes-api + - kubernetes-k3d steps: - - uses: actions/checkout@v2 - - uses: actions/setup-go@v2 - with: { go-version: 1.x } - - uses: nolar/setup-k3d-k3s@v1 + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: { go-version: stable } + - uses: actions/download-artifact@v4 + with: { path: download } + + # Combine the coverage profiles by taking the mode line from any one file + # and the data from all files. Write a list of functions with less than + # 100% coverage to the job summary, and upload a complete HTML report. + - name: Generate report + run: | + gunzip --keep download/*/*.gz + ( sed -e '1q' download/*/*.coverage + tail -qn +2 download/*/*.coverage ) > total.coverage + go tool cover --func total.coverage -o total-coverage.txt + go tool cover --html total.coverage -o total-coverage.html + + awk < total-coverage.txt ' + END { print "
Total Coverage: " $3 " " $2 "" } + ' >> "${GITHUB_STEP_SUMMARY}" + + sed < total-coverage.txt -e '/100.0%/d' -e "s,$(go list -m)/,," | column -t | awk ' + NR == 1 { print "\n\n```" } { print } END { if (NR > 0) print "```\n\n"; print "
" } + ' >> "${GITHUB_STEP_SUMMARY}" + + # Upload coverage to GitHub + - run: gzip total-coverage.html + - uses: actions/upload-artifact@v4 with: - version: "${{ matrix.kubernetes }}" - k3d-args: --no-lb - - env: { PGO_TEST_TIMEOUT_SCALE: 1.2 } - run: make createnamespaces check-envtest-existing + name: coverage-report=html + path: total-coverage.html.gz + retention-days: 15 diff --git a/.github/workflows/trivy.yaml b/.github/workflows/trivy.yaml new file mode 100644 index 0000000000..2a16e4929c --- /dev/null +++ b/.github/workflows/trivy.yaml @@ -0,0 +1,75 @@ +name: Trivy + +on: + pull_request: + push: + branches: + - main + +env: + # Use the Go toolchain installed by setup-go + # https://github.com/actions/setup-go/issues/457 + GOTOOLCHAIN: local + +jobs: + licenses: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + # Trivy needs a populated Go module cache to detect Go module licenses. + - uses: actions/setup-go@v5 + with: { go-version: stable } + - run: go mod download + + # Report success only when detected licenses are listed in [/trivy.yaml]. + - name: Scan licenses + uses: aquasecurity/trivy-action@0.28.0 + env: + TRIVY_DEBUG: true + with: + scan-type: filesystem + scanners: license + exit-code: 1 + + vulnerabilities: + if: ${{ github.repository == 'CrunchyData/postgres-operator' }} + + permissions: + # for github/codeql-action/upload-sarif to upload SARIF results + security-events: write + + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + # Run trivy and log detected and fixed vulnerabilities + # This report should match the uploaded code scan report below + # and is a convenience/redundant effort for those who prefer to + # read logs and/or if anything goes wrong with the upload. + - name: Log all detected vulnerabilities + uses: aquasecurity/trivy-action@0.28.0 + with: + scan-type: filesystem + hide-progress: true + ignore-unfixed: true + scanners: secret,vuln + + # Upload actionable results to the GitHub Security tab. + # Pull request checks fail according to repository settings. + # - https://docs.github.com/en/code-security/code-scanning/integrating-with-code-scanning/uploading-a-sarif-file-to-github + # - https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning + - name: Report actionable vulnerabilities + uses: aquasecurity/trivy-action@0.28.0 + with: + scan-type: filesystem + ignore-unfixed: true + format: 'sarif' + output: 'trivy-results.sarif' + scanners: secret,vuln + + - name: Upload Trivy scan results to GitHub Security tab + uses: github/codeql-action/upload-sarif@v3 + with: + sarif_file: 'trivy-results.sarif' diff --git a/.gitignore b/.gitignore index 210f4ef69a..dcfd7074a3 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ .DS_Store /vendor/ -tools +/testing/kuttl/e2e-generated*/ +gke_gcloud_auth_plugin_cache diff --git a/.gitmodules b/.gitmodules deleted file mode 100644 index b8907ec067..0000000000 --- a/.gitmodules +++ /dev/null @@ -1,3 +0,0 @@ -[submodule "hugo/themes/crunchy-hugo-theme"] - path = docs/themes/crunchy-hugo-theme - url = https://github.com/crunchydata/crunchy-hugo-theme diff --git a/.golangci.next.yaml b/.golangci.next.yaml new file mode 100644 index 0000000000..95b3f63347 --- /dev/null +++ b/.golangci.next.yaml @@ -0,0 +1,40 @@ +# https://golangci-lint.run/usage/configuration/ +# +# This file is for linters that might be interesting to enforce in the future. +# Rules that should be enforced immediately belong in [.golangci.yaml]. +# +# Both files are used by [.github/workflows/lint.yaml]. + +linters: + disable-all: true + enable: + - contextcheck + - err113 + - errchkjson + - gocritic + - godot + - godox + - gofumpt + - gosec # exclude-use-default + - nilnil + - nolintlint + - predeclared + - revive + - staticcheck # exclude-use-default + - tenv + - thelper + - tparallel + - wastedassign + +issues: + # https://github.com/golangci/golangci-lint/issues/2239 + exclude-use-default: false + +linters-settings: + errchkjson: + check-error-free-encoding: true + + thelper: + # https://github.com/kulti/thelper/issues/27 + tb: { begin: true, first: true } + test: { begin: true, first: true, name: true } diff --git a/.golangci.yaml b/.golangci.yaml index 8841b518ee..87a6ed0464 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -2,20 +2,60 @@ linters: disable: + - contextcheck + - errchkjson + - gci - gofumpt - - scopelint enable: + - depguard + - goheader - gomodguard - gosimple + - importas - misspell + - unconvert presets: - bugs - format - unused linters-settings: + depguard: + rules: + everything: + deny: + - pkg: io/ioutil + desc: > + Use the "io" and "os" packages instead. + See https://go.dev/doc/go1.16#ioutil + + not-tests: + files: ['!$test'] + deny: + - pkg: net/http/httptest + desc: Should be used only in tests. + + - pkg: testing/* + desc: The "testing" packages should be used only in tests. + + - pkg: github.com/crunchydata/postgres-operator/internal/testing/* + desc: The "internal/testing" packages should be used only in tests. + exhaustive: default-signifies-exhaustive: true + + goheader: + template: |- + Copyright {{ DATES }} Crunchy Data Solutions, Inc. + + SPDX-License-Identifier: Apache-2.0 + values: + regexp: + DATES: '((201[7-9]|202[0-3]) - 2024|2024)' + + goimports: + local-prefixes: github.com/crunchydata/postgres-operator + gomodguard: blocked: modules: @@ -27,8 +67,21 @@ linters-settings: k8s.io/kubernetes is for managing dependencies of the Kubernetes project, i.e. building kubelet and kubeadm. -run: - build-tags: - - envtest - skip-dirs: + gosec: + excludes: + # Flags for potentially-unsafe casting of ints, similar problem to globally-disabled G103 + - G115 + + importas: + alias: + - pkg: k8s.io/api/(\w+)/(v[\w\w]+) + alias: $1$2 + - pkg: k8s.io/apimachinery/pkg/apis/(\w+)/(v[\w\d]+) + alias: $1$2 + - pkg: k8s.io/apimachinery/pkg/api/errors + alias: apierrors + no-unaliased: true + +issues: + exclude-dirs: - pkg/generated diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index d4aa43bc59..e209f4e5a7 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -13,15 +13,11 @@ Thanks! We look forward to your contribution. # General Contributing Guidelines All ongoing development for an upcoming release gets committed to the -**`master`** branch. The `master` branch technically serves as the "development" -branch as well, but all code that is committed to the `master` branch should be +**`main`** branch. The `main` branch technically serves as the "development" +branch as well, but all code that is committed to the `main` branch should be considered _stable_, even if it is part of an ongoing release cycle. -All fixes for a supported release should be committed to the supported release -branch. For example, the 4.3 release is maintained on the `REL_4_3` branch. -Please see the section on _Supported Releases_ for more information. - -Ensure any changes are clear and well-documented. When we say "well-documented": +Ensure any changes are clear and well-documented: - If the changes include code, ensure all additional code has corresponding documentation in and around it. This includes documenting the definition of @@ -32,10 +28,7 @@ summarize how. Avoid simply repeating details from declarations,. When in doubt, favor overexplaining to underexplaining. - Code comments should be consistent with their language conventions. For -example, please use GoDoc conventions for Go source code. - -- Any new features must have corresponding user documentation. Any removed -features must have their user documentation removed from the documents. +example, please use `gofmt` [conventions](https://go.dev/doc/comment) for Go source code. - Do not submit commented-out code. If the code does not need to be used anymore, please remove it. @@ -62,12 +55,7 @@ All commits must either be rebased in atomic order or squashed (if the squashed commit is considered atomic). Merge commits are not accepted. All conflicts must be resolved prior to pushing changes. -**All pull requests should be made from the `master` branch** unless it is a fix -for a specific supported release. - -Once a major or minor release is made, no new features are added into the -release branch, only bug fixes. Any new features are added to the `master` -branch until the time that said new features are released. +**All pull requests should be made from the `main` branch.** # Commit Messages @@ -86,12 +74,11 @@ possible as to what the changes are. Good things to include: understand. ``` -If you wish to tag a Github issue or another project management tracker, please +If you wish to tag a GitHub issue or another project management tracker, please do so at the bottom of the commit message, and make it clearly labeled like so: ``` -Issue: #123 -Issue: [ch1234] +Issue: CrunchyData/postgres-operator#123 ``` # Submitting Pull Requests @@ -100,102 +87,23 @@ All work should be made in your own repository fork. When you believe your work is ready to be committed, please follow the guidance below for creating a pull request. -## Upcoming Releases / Features - -Ongoing work for new features should occur in branches off of the `master` -branch. It is suggested, but not required, that the branch name should reflect -that this is for an upcoming release, i.e. `upstream/branch-name` where the -`branch-name` is something descriptive for what you're working on. - -## Supported Releases / Fixes - -While not required, it is recommended to make your branch name along the lines -of: `REL_X_Y/branch-name` where the `branch-name` is something descriptive -for what you're working on. - -# Releases & Versioning - -Overall, release tags attempt to follow the -[semantic versioning](https://semver.org) scheme. - -"Supported releases" (described in the next section) occur on "minor" release -branches (e.g. the `x.y` portion of the `x.y.z`). - -One or more "patch" releases can occur after a minor release. A patch release is -used to fix bugs and other issues that may be found after a supported release. - -Fixes found on the `master` branch can be backported to a support release -branch. Any fixes for a supported release must have a pull request off of the -supported release branch, which is detailed below. - -## Supported Releases +## Upcoming Features -When a "minor" release is made, the release is stamped using the `vx.y.0` format -as denoted above, and a branch is created with the name `REL_X_Y`. Once a -minor release occurs, no new features are added to the `REL_X_Y` branch. -However, bug fixes can (and if found, should) be added to this branch. +Ongoing work for new features should occur in branches off of the `main` +branch. -To contribute a bug fix to a supported release, please make a pull request off -of the supported release branch. For instance, if you find a bug in the 4.3 -release, then you would make a pull request off of the `REL_4_3` branch. +## Unsupported Branches -## Unsupported Releases - -When a release is no longer supported, the branch will be renamed following the +When a release branch is no longer supported, it will be renamed following the pattern `REL_X_Y_FINAL` with the key suffix being _FINAL_. For example, `REL_3_2_FINAL` indicates that the 3.2 release is no longer supported. Nothing should ever be pushed to a `REL_X_Y_FINAL` branch once `FINAL` is on the branch name. -## Alpha, Beta, Release Candidate Releases - -At any point in the release cycle for a new release, there could exist one or -more alpha, beta, or release candidate (RC) release. Alpha, beta, and release -candidates **should not be used in production environments**. - -Alpha is the early stage of a release cycle and is typically made to test the -mechanics of an upcoming release. These should be considered relatively -unstable. The format for an alpha release tag is `v4.3.0-alpha.1`, which in this -case indicates it is the first alpha release for 4.3. - -Beta occurs during the later stage of a release cycle. At this point, the -release should be considered feature complete and the beta is used to -distribute, test, and collect feedback on the upcoming release. The betas should -be considered unstable, but as mentioned feature complete. The format for an -beta release tag is `v4.3.0-beta.1`, which in this case indicates it is the -first beta release for 4.3. - -Release candidates (RCs) occur just before a release. A release candidate should -be considered stable, and is typically used for a final round of bug checking -and testing. Multiple release candidates can occur in the event of serious bugs. -The format for a release candidate tag is `v4.3.0-rc.1`, which in this -case indicates it is the first release candidate for 4.3. - -**After a major or minor release, no alpha, beta, or release candidate releases -are supported**. In fact, any newer release of an alpha, beta, or RC immediately -deprecates any older alpha, beta or RC. (Naturally, a beta deprecates an alpha, -and a RC deprecates a beta). - -If you are testing on an older alpha, beta or RC, bug reports will not be -accepted. Please ensure you are testing on the latest version. - # Testing -We greatly appreciate any and all testing for the project. When testing, please -be sure you do the following: - -- If testing against a release, ensure your tests are performed against the -latest minor version (the last number in the release denotes the minor version, -e.g. the "3" in the 4.3.3) -- If testing against a pre-release (alpha, beta, RC), ensure your tests are -performed against latest version -- If testing against a development (`master`) or release (`REL_X_Y`) branch, -ensure your tests are performed against the latest commit - -Please do not test against unsupported versions (e.g. any release that is marked -final). - +We greatly appreciate any and all testing for the project. There are several ways to help with the testing effort: - Manual testing: testing particular features with a series of manual commands diff --git a/LICENSE.md b/LICENSE.md index f8ebe3dacd..8d57ad6f2e 100644 --- a/LICENSE.md +++ b/LICENSE.md @@ -176,7 +176,7 @@ END OF TERMS AND CONDITIONS - Copyright 2017 - 2021 Crunchy Data Solutions, Inc. + Copyright 2017 - 2024 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/Makefile b/Makefile index 914fb02d63..37aca1a37e 100644 --- a/Makefile +++ b/Makefile @@ -1,279 +1,336 @@ +PGO_IMAGE_NAME ?= postgres-operator +PGO_IMAGE_MAINTAINER ?= Crunchy Data +PGO_IMAGE_SUMMARY ?= Crunchy PostgreSQL Operator +PGO_IMAGE_DESCRIPTION ?= $(PGO_IMAGE_SUMMARY) +PGO_IMAGE_URL ?= https://www.crunchydata.com/products/crunchy-postgresql-for-kubernetes +PGO_IMAGE_PREFIX ?= localhost -# Default values if not already set -ANSIBLE_VERSION ?= 2.9.* -PGOROOT ?= $(CURDIR) -PGO_BASEOS ?= centos8 -BASE_IMAGE_OS ?= $(PGO_BASEOS) -PGO_IMAGE_PREFIX ?= crunchydata -PGO_IMAGE_TAG ?= $(PGO_BASEOS)-$(PGO_VERSION) -PGO_VERSION ?= $(shell git describe --tags) -PGO_PG_VERSION ?= 13 -PGO_PG_FULLVERSION ?= 13.4 -PGO_BACKREST_VERSION ?= 2.33 -PGO_KUBE_CLIENT ?= kubectl -PACKAGER ?= yum - -RELTMPDIR=/tmp/release.$(PGO_VERSION) -RELFILE=/tmp/postgres-operator.$(PGO_VERSION).tar.gz - -# Valid values: buildah (default), docker -IMGBUILDER ?= buildah -# Determines whether or not rootless builds are enabled -IMG_ROOTLESS_BUILD ?= false -# The utility to use when pushing/pulling to and from an image repo (e.g. docker or buildah) -IMG_PUSHER_PULLER ?= docker -# Determines whether or not images should be pushed to the local docker daemon when building with -# a tool other than docker (e.g. when building with buildah) -IMG_PUSH_TO_DOCKER_DAEMON ?= true -# Defines the sudo command that should be prepended to various build commands when rootless builds are -# not enabled -IMGCMDSUDO= -ifneq ("$(IMG_ROOTLESS_BUILD)", "true") - IMGCMDSUDO=sudo --preserve-env -endif -IMGCMDSTEM=$(IMGCMDSUDO) buildah bud --layers $(SQUASH) -DFSET=$(PGO_BASEOS) - -# Default the buildah format to docker to ensure it is possible to pull the images from a docker -# repository using docker (otherwise the images may not be recognized) -export BUILDAH_FORMAT ?= docker - -# Allows simplification of IMGBUILDER switching -ifeq ("$(IMGBUILDER)","docker") - IMGCMDSTEM=docker build -endif - -# set the proper packager, registry and base image based on the PGO_BASEOS configured -DOCKERBASEREGISTRY= -BASE_IMAGE_OS= -ifeq ("$(PGO_BASEOS)", "ubi8") - BASE_IMAGE_OS=ubi8-minimal - DFSET=rhel - DOCKERBASEREGISTRY=registry.access.redhat.com/ - PACKAGER=microdnf -endif -ifeq ("$(PGO_BASEOS)", "centos8") - BASE_IMAGE_OS=centos8 - DFSET=centos - DOCKERBASEREGISTRY=centos: - PACKAGER=dnf -endif - -DEBUG_BUILD ?= false -GO ?= go -GO_BUILD = $(GO_CMD) build -trimpath -GO_CMD = $(GO_ENV) $(GO) -GO_TEST ?= $(GO) test - -# Disable optimizations if creating a debug build -ifeq ("$(DEBUG_BUILD)", "true") - GO_BUILD = $(GO_CMD) build -gcflags='all=-N -l' -endif - -# To build a specific image, run 'make -image' (e.g. 'make postgres-operator-image') -images = postgres-operator \ - crunchy-postgres-exporter - -.PHONY: all setup clean push pull release deploy - - -#======= Main functions ======= -all: $(images:%=%-image) - -setup: - PGOROOT='$(PGOROOT)' ./bin/get-deps.sh - ./bin/check-deps.sh - -#=== postgrescluster CRD === +PGMONITOR_DIR ?= hack/tools/pgmonitor +PGMONITOR_VERSION ?= v5.1.1 +QUERIES_CONFIG_DIR ?= hack/tools/queries -# Create operator and target namespaces -createnamespaces: - $(PGO_KUBE_CLIENT) apply -k ./config/namespace +EXTERNAL_SNAPSHOTTER_DIR ?= hack/tools/external-snapshotter +EXTERNAL_SNAPSHOTTER_VERSION ?= v8.0.1 -# Delete operator and target namespaces -deletenamespaces: - $(PGO_KUBE_CLIENT) delete -k ./config/namespace +# Buildah's "build" used to be "bud". Use the alias to be compatible for a while. +BUILDAH_BUILD ?= buildah bud -# Install the postgrescluster CRD -install: - $(PGO_KUBE_CLIENT) apply -k ./config/crd - -# Delete the postgrescluster CRD -uninstall: - $(PGO_KUBE_CLIENT) delete -k ./config/crd - -# Deploy the PostgreSQL Operator (enables the postgrescluster controller) -deploy: - $(PGO_KUBE_CLIENT) apply -k ./config/default +GO ?= go +GO_BUILD = $(GO) build +GO_TEST ?= $(GO) test +KUTTL ?= kubectl-kuttl +KUTTL_TEST ?= $(KUTTL) test + +##@ General + +# The help target prints out all targets with their descriptions organized +# beneath their categories. The categories are represented by '##@' and the +# target descriptions by '##'. The awk command is responsible for reading the +# entire set of makefiles included in this invocation, looking for lines of the +# file as xyz: ## something, and then pretty-formatting the target and help. Then, +# if there's a line with ##@ something, that gets pretty-printed as a category. +# More info on the usage of ANSI control characters for terminal formatting: +# https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters +# More info on the awk command: +# http://linuxcommand.org/lc3_adv_awk.php + +.PHONY: help +help: ## Display this help. + @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) + +.PHONY: all +all: ## Build all images +all: build-postgres-operator-image + +.PHONY: setup +setup: ## Run Setup needed to build images +setup: get-pgmonitor + +.PHONY: get-pgmonitor +get-pgmonitor: + git -C '$(dir $(PGMONITOR_DIR))' clone https://github.com/CrunchyData/pgmonitor.git || git -C '$(PGMONITOR_DIR)' fetch origin + @git -C '$(PGMONITOR_DIR)' checkout '$(PGMONITOR_VERSION)' + @git -C '$(PGMONITOR_DIR)' config pull.ff only + [ -d '${QUERIES_CONFIG_DIR}' ] || mkdir -p '${QUERIES_CONFIG_DIR}' + cp -r '$(PGMONITOR_DIR)/postgres_exporter/common/.' '${QUERIES_CONFIG_DIR}' + cp '$(PGMONITOR_DIR)/postgres_exporter/linux/queries_backrest.yml' '${QUERIES_CONFIG_DIR}' + +.PHONY: get-external-snapshotter +get-external-snapshotter: + git -C '$(dir $(EXTERNAL_SNAPSHOTTER_DIR))' clone https://github.com/kubernetes-csi/external-snapshotter.git || git -C '$(EXTERNAL_SNAPSHOTTER_DIR)' fetch origin + @git -C '$(EXTERNAL_SNAPSHOTTER_DIR)' checkout '$(EXTERNAL_SNAPSHOTTER_VERSION)' + @git -C '$(EXTERNAL_SNAPSHOTTER_DIR)' config pull.ff only + +.PHONY: clean +clean: ## Clean resources +clean: clean-deprecated + rm -f bin/postgres-operator + rm -rf licenses/*/ + [ ! -d testing/kuttl/e2e-generated ] || rm -r testing/kuttl/e2e-generated + [ ! -d testing/kuttl/e2e-generated-other ] || rm -r testing/kuttl/e2e-generated-other + [ ! -f hack/tools/setup-envtest ] || rm hack/tools/setup-envtest + [ ! -d hack/tools/envtest ] || { chmod -R u+w hack/tools/envtest && rm -r hack/tools/envtest; } + [ ! -d hack/tools/pgmonitor ] || rm -rf hack/tools/pgmonitor + [ ! -d hack/tools/external-snapshotter ] || rm -rf hack/tools/external-snapshotter + [ ! -n "$$(ls hack/tools)" ] || rm -r hack/tools/* + [ ! -d hack/.kube ] || rm -r hack/.kube -# Deploy the PostgreSQL Operator locally +.PHONY: clean-deprecated +clean-deprecated: ## Clean deprecated resources + @# packages used to be downloaded into the vendor directory + [ ! -d vendor ] || rm -r vendor + @# executables used to be compiled into the $GOBIN directory + [ ! -n '$(GOBIN)' ] || rm -f $(GOBIN)/postgres-operator $(GOBIN)/apiserver $(GOBIN)/*pgo + @# executables used to be in subdirectories + [ ! -d bin/pgo-rmdata ] || rm -r bin/pgo-rmdata + [ ! -d bin/pgo-backrest ] || rm -r bin/pgo-backrest + [ ! -d bin/pgo-scheduler ] || rm -r bin/pgo-scheduler + [ ! -d bin/postgres-operator ] || rm -r bin/postgres-operator + @# keys used to be generated before install + [ ! -d conf/pgo-backrest-repo ] || rm -r conf/pgo-backrest-repo + [ ! -d conf/postgres-operator ] || rm -r conf/postgres-operator + @# crunchy-postgres-exporter used to live in this repo + [ ! -d bin/crunchy-postgres-exporter ] || rm -r bin/crunchy-postgres-exporter + [ ! -d build/crunchy-postgres-exporter ] || rm -r build/crunchy-postgres-exporter + @# CRDs used to require patching + [ ! -d build/crd ] || rm -r build/crd + + +##@ Deployment +.PHONY: createnamespaces +createnamespaces: ## Create operator and target namespaces + kubectl apply -k ./config/namespace + +.PHONY: deletenamespaces +deletenamespaces: ## Delete operator and target namespaces + kubectl delete -k ./config/namespace + +.PHONY: install +install: ## Install the postgrescluster CRD + kubectl apply --server-side -k ./config/crd + +.PHONY: uninstall +uninstall: ## Delete the postgrescluster CRD + kubectl delete -k ./config/crd + +.PHONY: deploy +deploy: ## Deploy the PostgreSQL Operator (enables the postgrescluster controller) + kubectl apply --server-side -k ./config/default + +.PHONY: undeploy +undeploy: ## Undeploy the PostgreSQL Operator + kubectl delete -k ./config/default + +.PHONY: deploy-dev +deploy-dev: ## Deploy the PostgreSQL Operator locally +deploy-dev: PGO_FEATURE_GATES ?= "TablespaceVolumes=true,VolumeSnapshots=true" +deploy-dev: get-pgmonitor deploy-dev: build-postgres-operator - $(PGO_KUBE_CLIENT) apply -k ./config/dev +deploy-dev: createnamespaces + kubectl apply --server-side -k ./config/dev hack/create-kubeconfig.sh postgres-operator pgo env \ + QUERIES_CONFIG_DIR="${QUERIES_CONFIG_DIR}" \ CRUNCHY_DEBUG=true \ + PGO_FEATURE_GATES="${PGO_FEATURE_GATES}" \ + CHECK_FOR_UPGRADES='$(if $(CHECK_FOR_UPGRADES),$(CHECK_FOR_UPGRADES),false)' \ KUBECONFIG=hack/.kube/postgres-operator/pgo \ - $(shell $(PGO_KUBE_CLIENT) kustomize ./config/dev | \ + PGO_NAMESPACE='postgres-operator' \ + PGO_INSTALLER='deploy-dev' \ + PGO_INSTALLER_ORIGIN='postgres-operator-repo' \ + BUILD_SOURCE='build-postgres-operator' \ + $(shell kubectl kustomize ./config/dev | \ sed -ne '/^kind: Deployment/,/^---/ { \ /RELATED_IMAGE_/ { N; s,.*\(RELATED_[^[:space:]]*\).*value:[[:space:]]*\([^[:space:]]*\),\1="\2",; p; }; \ }') \ $(foreach v,$(filter RELATED_IMAGE_%,$(.VARIABLES)),$(v)="$($(v))") \ bin/postgres-operator -# Undeploy the PostgreSQL Operator -undeploy: - $(PGO_KUBE_CLIENT) delete -k ./config/default - - -#======= Binary builds ======= -build-postgres-operator: - $(GO_BUILD) -ldflags '-X "main.versionString=$(PGO_VERSION)"' \ - -o bin/postgres-operator ./cmd/postgres-operator - -build-pgo-%: - $(info No binary build needed for $@) - -build-crunchy-postgres-exporter: - $(info No binary build needed for $@) - - -#======= Image builds ======= -$(PGOROOT)/build/%/Dockerfile: - $(error No Dockerfile found for $* naming pattern: [$@]) - -%-img-build: pgo-base-$(IMGBUILDER) build-% $(PGOROOT)/build/%/Dockerfile - $(IMGCMDSTEM) \ - -f $(PGOROOT)/build/$*/Dockerfile \ - -t $(PGO_IMAGE_PREFIX)/$*:$(PGO_IMAGE_TAG) \ - --build-arg BASEOS=$(PGO_BASEOS) \ - --build-arg BASEVER=$(PGO_VERSION) \ - --build-arg DFSET=$(DFSET) \ - --build-arg PACKAGER=$(PACKAGER) \ - --build-arg PGVERSION=$(PGO_PG_VERSION) \ - --build-arg PREFIX=$(PGO_IMAGE_PREFIX) \ - $(PGOROOT) - -%-img-buildah: %-img-build ; -# only push to docker daemon if variable PGO_PUSH_TO_DOCKER_DAEMON is set to "true" -ifeq ("$(IMG_PUSH_TO_DOCKER_DAEMON)", "true") - $(IMGCMDSUDO) buildah push $(PGO_IMAGE_PREFIX)/$*:$(PGO_IMAGE_TAG) docker-daemon:$(PGO_IMAGE_PREFIX)/$*:$(PGO_IMAGE_TAG) -endif - -%-img-docker: %-img-build ; - -%-image: %-img-$(IMGBUILDER) ; - -pgo-base: pgo-base-$(IMGBUILDER) - -pgo-base-build: $(PGOROOT)/build/pgo-base/Dockerfile licenses - $(IMGCMDSTEM) \ - -f $(PGOROOT)/build/pgo-base/Dockerfile \ - -t $(PGO_IMAGE_PREFIX)/pgo-base:$(PGO_IMAGE_TAG) \ - --build-arg BASE_IMAGE_OS=$(BASE_IMAGE_OS) \ - --build-arg BASEOS=$(PGO_BASEOS) \ - --build-arg RELVER=$(PGO_VERSION) \ - --build-arg DOCKERBASEREGISTRY=$(DOCKERBASEREGISTRY) \ - --build-arg PACKAGER=$(PACKAGER) \ - --build-arg PG_FULL=$(PGO_PG_FULLVERSION) \ - --build-arg PGVERSION=$(PGO_PG_VERSION) \ - $(PGOROOT) - -pgo-base-buildah: pgo-base-build ; -# only push to docker daemon if variable PGO_PUSH_TO_DOCKER_DAEMON is set to "true" -ifeq ("$(IMG_PUSH_TO_DOCKER_DAEMON)", "true") - $(IMGCMDSUDO) buildah push $(PGO_IMAGE_PREFIX)/pgo-base:$(PGO_IMAGE_TAG) docker-daemon:$(PGO_IMAGE_PREFIX)/pgo-base:$(PGO_IMAGE_TAG) -endif - -pgo-base-docker: pgo-base-build - - -#======== Utility ======= +##@ Build - Binary +.PHONY: build-postgres-operator +build-postgres-operator: ## Build the postgres-operator binary + CGO_ENABLED=1 $(GO_BUILD) $(\ + ) --ldflags '-X "main.versionString=$(PGO_VERSION)"' $(\ + ) --trimpath -o bin/postgres-operator ./cmd/postgres-operator + +##@ Build - Images +.PHONY: build-postgres-operator-image +build-postgres-operator-image: ## Build the postgres-operator image +build-postgres-operator-image: PGO_IMAGE_REVISION := $(shell git rev-parse HEAD) +build-postgres-operator-image: PGO_IMAGE_TIMESTAMP := $(shell date -u +%FT%TZ) +build-postgres-operator-image: build-postgres-operator +build-postgres-operator-image: build/postgres-operator/Dockerfile + $(if $(shell (echo 'buildah version 1.24'; $(word 1,$(BUILDAH_BUILD)) --version) | sort -Vc 2>&1), \ + $(warning WARNING: old buildah does not invalidate its cache for changed labels: \ + https://github.com/containers/buildah/issues/3517)) + $(if $(IMAGE_TAG),, $(error missing IMAGE_TAG)) + $(strip $(BUILDAH_BUILD)) \ + --tag $(BUILDAH_TRANSPORT)$(PGO_IMAGE_PREFIX)/$(PGO_IMAGE_NAME):$(IMAGE_TAG) \ + --label name='$(PGO_IMAGE_NAME)' \ + --label build-date='$(PGO_IMAGE_TIMESTAMP)' \ + --label description='$(PGO_IMAGE_DESCRIPTION)' \ + --label maintainer='$(PGO_IMAGE_MAINTAINER)' \ + --label summary='$(PGO_IMAGE_SUMMARY)' \ + --label url='$(PGO_IMAGE_URL)' \ + --label vcs-ref='$(PGO_IMAGE_REVISION)' \ + --label vendor='$(PGO_IMAGE_MAINTAINER)' \ + --label io.k8s.display-name='$(PGO_IMAGE_NAME)' \ + --label io.k8s.description='$(PGO_IMAGE_DESCRIPTION)' \ + --label io.openshift.tags="postgresql,postgres,sql,nosql,crunchy" \ + --annotation org.opencontainers.image.authors='$(PGO_IMAGE_MAINTAINER)' \ + --annotation org.opencontainers.image.vendor='$(PGO_IMAGE_MAINTAINER)' \ + --annotation org.opencontainers.image.created='$(PGO_IMAGE_TIMESTAMP)' \ + --annotation org.opencontainers.image.description='$(PGO_IMAGE_DESCRIPTION)' \ + --annotation org.opencontainers.image.revision='$(PGO_IMAGE_REVISION)' \ + --annotation org.opencontainers.image.title='$(PGO_IMAGE_SUMMARY)' \ + --annotation org.opencontainers.image.url='$(PGO_IMAGE_URL)' \ + $(if $(PGO_VERSION),$(strip \ + --label release='$(PGO_VERSION)' \ + --label version='$(PGO_VERSION)' \ + --annotation org.opencontainers.image.version='$(PGO_VERSION)' \ + )) \ + --file $< --format docker --layers . + +##@ Test .PHONY: check -check: - $(GO_TEST) -cover ./... +check: ## Run basic go tests with coverage output +check: get-pgmonitor + QUERIES_CONFIG_DIR="$(CURDIR)/${QUERIES_CONFIG_DIR}" $(GO_TEST) -cover ./... +# Available versions: curl -s 'https://storage.googleapis.com/kubebuilder-tools/' | grep -o '[^<]*' # - KUBEBUILDER_ATTACH_CONTROL_PLANE_OUTPUT=true .PHONY: check-envtest -check-envtest: hack/tools/envtest - KUBEBUILDER_ASSETS="$(CURDIR)/$^/bin" $(GO_TEST) -count=1 -cover -tags=envtest ./... - -# - PGO_TEST_TIMEOUT_SCALE=1 +check-envtest: ## Run check using envtest and a mock kube api +check-envtest: ENVTEST_USE = $(ENVTEST) --bin-dir=$(CURDIR)/hack/tools/envtest use $(ENVTEST_K8S_VERSION) +check-envtest: SHELL = bash +check-envtest: get-pgmonitor tools/setup-envtest get-external-snapshotter + @$(ENVTEST_USE) --print=overview && echo + source <($(ENVTEST_USE) --print=env) && PGO_NAMESPACE="postgres-operator" QUERIES_CONFIG_DIR="$(CURDIR)/${QUERIES_CONFIG_DIR}" \ + $(GO_TEST) -count=1 -cover ./... + +# The "PGO_TEST_TIMEOUT_SCALE" environment variable (default: 1) can be set to a +# positive number that extends test timeouts. The following runs tests with +# timeouts that are 20% longer than normal: +# make check-envtest-existing PGO_TEST_TIMEOUT_SCALE=1.2 .PHONY: check-envtest-existing -check-envtest-existing: - ${PGO_KUBE_CLIENT} apply -k ./config/dev - USE_EXISTING_CLUSTER=true $(GO_TEST) -count=1 -cover -tags=envtest ./... - ${PGO_KUBE_CLIENT} delete -k ./config/dev - +check-envtest-existing: ## Run check using envtest and an existing kube api +check-envtest-existing: get-pgmonitor get-external-snapshotter +check-envtest-existing: createnamespaces + kubectl apply --server-side -k ./config/dev + USE_EXISTING_CLUSTER=true PGO_NAMESPACE="postgres-operator" QUERIES_CONFIG_DIR="$(CURDIR)/${QUERIES_CONFIG_DIR}" \ + $(GO_TEST) -count=1 -cover -p=1 ./... + kubectl delete -k ./config/dev + +# Expects operator to be running +.PHONY: check-kuttl +check-kuttl: ## Run kuttl end-to-end tests +check-kuttl: ## example command: make check-kuttl KUTTL_TEST=' + ${KUTTL_TEST} \ + --config testing/kuttl/kuttl-test.yaml + +.PHONY: generate-kuttl +generate-kuttl: export KUTTL_PG_UPGRADE_FROM_VERSION ?= 15 +generate-kuttl: export KUTTL_PG_UPGRADE_TO_VERSION ?= 16 +generate-kuttl: export KUTTL_PG_VERSION ?= 16 +generate-kuttl: export KUTTL_POSTGIS_VERSION ?= 3.4 +generate-kuttl: export KUTTL_PSQL_IMAGE ?= registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-16.3-1 +generate-kuttl: export KUTTL_TEST_DELETE_NAMESPACE ?= kuttl-test-delete-namespace +generate-kuttl: ## Generate kuttl tests + [ ! -d testing/kuttl/e2e-generated ] || rm -r testing/kuttl/e2e-generated + [ ! -d testing/kuttl/e2e-generated-other ] || rm -r testing/kuttl/e2e-generated-other + bash -ceu ' \ + case $(KUTTL_PG_VERSION) in \ + 16 ) export KUTTL_BITNAMI_IMAGE_TAG=16.0.0-debian-11-r3 ;; \ + 15 ) export KUTTL_BITNAMI_IMAGE_TAG=15.0.0-debian-11-r4 ;; \ + 14 ) export KUTTL_BITNAMI_IMAGE_TAG=14.5.0-debian-11-r37 ;; \ + 13 ) export KUTTL_BITNAMI_IMAGE_TAG=13.8.0-debian-11-r39 ;; \ + 12 ) export KUTTL_BITNAMI_IMAGE_TAG=12.12.0-debian-11-r40 ;; \ + esac; \ + render() { envsubst '"'"' \ + $$KUTTL_PG_UPGRADE_FROM_VERSION $$KUTTL_PG_UPGRADE_TO_VERSION \ + $$KUTTL_PG_VERSION $$KUTTL_POSTGIS_VERSION $$KUTTL_PSQL_IMAGE \ + $$KUTTL_BITNAMI_IMAGE_TAG $$KUTTL_TEST_DELETE_NAMESPACE'"'"'; }; \ + while [ $$# -gt 0 ]; do \ + source="$${1}" target="$${1/e2e/e2e-generated}"; \ + mkdir -p "$${target%/*}"; render < "$${source}" > "$${target}"; \ + shift; \ + done' - testing/kuttl/e2e/*/*.yaml testing/kuttl/e2e-other/*/*.yaml testing/kuttl/e2e/*/*/*.yaml testing/kuttl/e2e-other/*/*/*.yaml + +##@ Generate .PHONY: check-generate -check-generate: generate-crd generate-deepcopy generate-rbac +check-generate: ## Check crd, deepcopy functions, and rbac generation +check-generate: generate-crd +check-generate: generate-deepcopy +check-generate: generate-rbac git diff --exit-code -- config/crd git diff --exit-code -- config/rbac git diff --exit-code -- pkg/apis -clean: clean-deprecated - rm -f bin/postgres-operator - rm -f config/rbac/role.yaml - [ ! -d hack/tools/envtest ] || rm -r hack/tools/envtest - [ ! -n "$$(ls hack/tools)" ] || rm hack/tools/* - [ ! -d hack/.kube ] || rm -r hack/.kube - -clean-deprecated: - @# packages used to be downloaded into the vendor directory - [ ! -d vendor ] || rm -r vendor - @# executables used to be compiled into the $GOBIN directory - [ ! -n '$(GOBIN)' ] || rm -f $(GOBIN)/postgres-operator $(GOBIN)/apiserver $(GOBIN)/*pgo - @# executables used to be in subdirectories - [ ! -d bin/pgo-rmdata ] || rm -r bin/pgo-rmdata - [ ! -d bin/pgo-backrest ] || rm -r bin/pgo-backrest - [ ! -d bin/pgo-scheduler ] || rm -r bin/pgo-scheduler - [ ! -d bin/postgres-operator ] || rm -r bin/postgres-operator - @# keys used to be generated before install - [ ! -d conf/pgo-backrest-repo ] || rm -r conf/pgo-backrest-repo - [ ! -d conf/postgres-operator ] || rm -r conf/postgres-operator - -push: $(images:%=push-%) ; +.PHONY: generate +generate: ## Generate crd, deepcopy functions, and rbac +generate: generate-crd +generate: generate-deepcopy +generate: generate-rbac + +.PHONY: generate-crd +generate-crd: ## Generate Custom Resource Definitions (CRDs) +generate-crd: tools/controller-gen + $(CONTROLLER) \ + crd:crdVersions='v1' \ + paths='./pkg/apis/...' \ + output:dir='config/crd/bases' # {directory}/{group}_{plural}.yaml + +.PHONY: generate-deepcopy +generate-deepcopy: ## Generate DeepCopy functions +generate-deepcopy: tools/controller-gen + $(CONTROLLER) \ + object:headerFile='hack/boilerplate.go.txt' \ + paths='./pkg/apis/postgres-operator.crunchydata.com/...' -push-%: - $(IMG_PUSHER_PULLER) push $(PGO_IMAGE_PREFIX)/$*:$(PGO_IMAGE_TAG) +.PHONY: generate-rbac +generate-rbac: ## Generate RBAC +generate-rbac: tools/controller-gen + $(CONTROLLER) \ + rbac:roleName='postgres-operator' \ + paths='./cmd/...' paths='./internal/...' \ + output:dir='config/rbac' # {directory}/role.yaml -pull: $(images:%=pull-%) ; +##@ Tools -pull-%: - $(IMG_PUSHER_PULLER) pull $(PGO_IMAGE_PREFIX)/$*:$(PGO_IMAGE_TAG) +.PHONY: tools +tools: ## Download tools like controller-gen and kustomize if necessary. -generate: generate-crd generate-crd-docs generate-deepcopy generate-rbac +# go-get-tool will 'go install' any package $2 and install it to $1. +define go-get-tool +@[ -f '$(1)' ] || { echo Downloading '$(2)'; GOBIN='$(abspath $(dir $(1)))' $(GO) install '$(2)'; } +endef -generate-crd: - GOBIN='$(CURDIR)/hack/tools' ./hack/controller-generator.sh \ - crd:crdVersions='v1',preserveUnknownFields='false' \ - paths='./pkg/apis/postgres-operator.crunchydata.com/...' \ - output:dir='config/crd/bases' # config/crd/bases/{group}_{plural}.yaml +CONTROLLER ?= hack/tools/controller-gen +tools: tools/controller-gen +tools/controller-gen: + $(call go-get-tool,$(CONTROLLER),sigs.k8s.io/controller-tools/cmd/controller-gen@v0.16.4) -# TODO(cbandy): Run config/crd through Kustomize to pickup any patches there. -generate-crd-docs: - GOBIN='$(CURDIR)/hack/tools' go install fybrik.io/crdoc@v0.4.0 - $(CURDIR)/hack/tools/crdoc \ - --resources $(CURDIR)/config/crd/bases \ - --output $(CURDIR)/docs/content/references/crd.md \ - --template $(CURDIR)/hack/api-template.tmpl +ENVTEST ?= hack/tools/setup-envtest +tools: tools/setup-envtest +tools/setup-envtest: + $(call go-get-tool,$(ENVTEST),sigs.k8s.io/controller-runtime/tools/setup-envtest@latest) -generate-deepcopy: - GOBIN='$(CURDIR)/hack/tools' ./hack/controller-generator.sh \ - object:headerFile='hack/boilerplate.go.txt' \ - paths='./pkg/apis/postgres-operator.crunchydata.com/...' - -generate-rbac: - GOBIN='$(CURDIR)/hack/tools' ./hack/generate-rbac.sh \ - './internal/...' 'config/rbac' - -# Available versions: curl -s 'https://storage.googleapis.com/kubebuilder-tools/' | grep -o '[^<]*' -# - ENVTEST_K8S_VERSION=1.19.2 -hack/tools/envtest: SHELL = bash -hack/tools/envtest: - source '$(shell $(GO) list -f '{{ .Dir }}' -m 'sigs.k8s.io/controller-runtime')/hack/setup-envtest.sh' && fetch_envtest_tools $@ +##@ Release .PHONY: license licenses license: licenses -licenses: +licenses: ## Aggregate license files ./bin/license_aggregator.sh ./cmd/... + +.PHONY: release-postgres-operator-image release-postgres-operator-image-labels +release-postgres-operator-image: ## Build the postgres-operator image and all its prerequisites +release-postgres-operator-image: release-postgres-operator-image-labels +release-postgres-operator-image: licenses +release-postgres-operator-image: build-postgres-operator-image +release-postgres-operator-image-labels: + $(if $(PGO_IMAGE_DESCRIPTION),, $(error missing PGO_IMAGE_DESCRIPTION)) + $(if $(PGO_IMAGE_MAINTAINER),, $(error missing PGO_IMAGE_MAINTAINER)) + $(if $(PGO_IMAGE_NAME),, $(error missing PGO_IMAGE_NAME)) + $(if $(PGO_IMAGE_SUMMARY),, $(error missing PGO_IMAGE_SUMMARY)) + $(if $(PGO_VERSION),, $(error missing PGO_VERSION)) diff --git a/README.md b/README.md index 728ef433db..357734566e 100644 --- a/README.md +++ b/README.md @@ -1,35 +1,49 @@

PGO: The Postgres Operator from Crunchy Data

- PGO: The Postgres Operator from Crunchy Data + PGO: The Postgres Operator from Crunchy Data

[![Go Report Card](https://goreportcard.com/badge/github.com/CrunchyData/postgres-operator)](https://goreportcard.com/report/github.com/CrunchyData/postgres-operator) +![GitHub Repo stars](https://img.shields.io/github/stars/CrunchyData/postgres-operator) +[![License](https://img.shields.io/github/license/CrunchyData/postgres-operator)](LICENSE.md) +[![Discord](https://img.shields.io/discord/1068276526740676708?label=discord&logo=discord)](https://discord.gg/a7vWKG8Ec9) # Production Postgres Made Easy [PGO](https://github.com/CrunchyData/postgres-operator), the [Postgres Operator](https://github.com/CrunchyData/postgres-operator) from [Crunchy Data](https://www.crunchydata.com), gives you a **declarative Postgres** solution that automatically manages your [PostgreSQL](https://www.postgresql.org) clusters. -Designed for your GitOps workflows, it is [easy to get started](https://access.crunchydata.com/documentation/postgres-operator/v5/quickstart/) with Postgres on Kubernetes with PGO. Within a few moments, you can have a production grade Postgres cluster complete with high availability, disaster recovery, and monitoring, all over secure TLS communications. Even better, PGO lets you easily customize your Postgres cluster to tailor it to your workload! +Designed for your GitOps workflows, it is [easy to get started](https://access.crunchydata.com/documentation/postgres-operator/v5/quickstart/) with Postgres on Kubernetes with PGO. Within a few moments, you can have a production-grade Postgres cluster complete with high availability, disaster recovery, and monitoring, all over secure TLS communications. Even better, PGO lets you easily customize your Postgres cluster to tailor it to your workload! -With conveniences like cloning Postgres clusters to using rolling updates to roll out disruptive changes with minimal downtime, PGO is ready to support your Postgres data at every stage of your release pipeline. Built for resiliency and uptime, PGO will keep your desired Postgres in a desired state so you do not need to worry about it. +With conveniences like cloning Postgres clusters to using rolling updates to roll out disruptive changes with minimal downtime, PGO is ready to support your Postgres data at every stage of your release pipeline. Built for resiliency and uptime, PGO will keep your Postgres cluster in its desired state, so you do not need to worry about it. PGO is developed with many years of production experience in automating Postgres management on Kubernetes, providing a seamless cloud native Postgres solution to keep your data always available. +Have questions or looking for help? [Join our Discord group](https://discord.gg/a7vWKG8Ec9). + # Installation -We recommend following our [Quickstart](https://access.crunchydata.com/documentation/postgres-operator/v5/quickstart/) for how to install and get up and running with PGO, the Postgres Operator from Crunchy Data. However, if you just can't wait to try it out, here are some instructions to get Postgres up and running on Kubernetes: +Crunchy Data makes PGO available as the orchestration behind Crunchy Postgres for Kubernetes. Crunchy Postgres for Kubernetes is the integrated product that includes PostgreSQL, PGO and a collection of PostgreSQL tools and extensions that includes the various [open source components listed in the documentation](https://access.crunchydata.com/documentation/postgres-operator/latest/references/components). + +We recommend following our [Quickstart](https://access.crunchydata.com/documentation/postgres-operator/v5/quickstart/) for how to install and get up and running. However, if you can't wait to try it out, here are some instructions to get Postgres up and running on Kubernetes: 1. [Fork the Postgres Operator examples repository](https://github.com/CrunchyData/postgres-operator-examples/fork) and clone it to your host machine. For example: -``` +```sh YOUR_GITHUB_UN="" git clone --depth 1 "git@github.com:${YOUR_GITHUB_UN}/postgres-operator-examples.git" cd postgres-operator-examples ``` -2. Run `kubectl apply -k kustomize/install` +2. Run the following commands + +```sh +kubectl apply -k kustomize/install/namespace +kubectl apply --server-side -k kustomize/install/default +``` + +For more information please read the [Quickstart](https://access.crunchydata.com/documentation/postgres-operator/v5/quickstart/) and [Tutorial](https://access.crunchydata.com/documentation/postgres-operator/v5/tutorials/). -For more information please read the [Quickstart](https://access.crunchydata.com/documentation/postgres-operator/v5/quickstart/) and [Tutorial](https://access.crunchydata.com/documentation/postgres-operator/v5/tutorial/). +These installation instructions provide the steps necessary to install PGO along with Crunchy Data's Postgres distribution, Crunchy Postgres, as Crunchy Postgres for Kubernetes. In doing so the installation downloads a series of container images from Crunchy Data's Developer Portal. For more information on the use of container images downloaded from the Crunchy Data Developer Portal or other third party sources, please see 'License and Terms' below. The installation and use of PGO outside of the use of Crunchy Postgres for Kubernetes will require modifications of these installation instructions and creation of the necessary PostgreSQL and related containers. # Cloud Native Postgres for Kubernetes @@ -42,7 +56,7 @@ Pods and PostgreSQL configuration! #### [High Availability][high-availability] -Safe, automated failover backed by a [distributed consensus based high-availability solution][high-availability]. +Safe, automated failover backed by a [distributed consensus high availability solution][high-availability]. Uses [Pod Anti-Affinity][k8s-anti-affinity] to help resiliency; you can configure how aggressive this can be! Failed primaries automatically heal, allowing for faster recovery time. @@ -52,7 +66,7 @@ Support for [standby PostgreSQL clusters][multiple-cluster] that work both withi [Backups][backups] and [restores][disaster-recovery] leverage the open source [pgBackRest][] utility and [includes support for full, incremental, and differential backups as well as efficient delta restores][backups]. -Set how long you want your backups retained for. Works great with very large databases! +Set how long you to retain your backups. Works great with very large databases! #### Security and [TLS][tls] @@ -83,7 +97,7 @@ Advanced [connection pooling][pool] support using [pgBouncer][]. #### Pod Anti-Affinity, Node Affinity, Pod Tolerations -Have your PostgreSQL clusters deployed to [Kubernetes Nodes][k8s-nodes] of your preference. Set your [pod anti-affinity][k8s-anti-affinity], node affinity, Pod tolerations and more rules to customize your deployment topology! +Have your PostgreSQL clusters deployed to [Kubernetes Nodes][k8s-nodes] of your preference. Set your [pod anti-affinity][k8s-anti-affinity], node affinity, Pod tolerations, and more rules to customize your deployment topology! #### [Scheduled Backups][backup-management] @@ -101,37 +115,36 @@ You can also [mix-and-match][backups-multi]: PGO lets you [store backups in mult PGO makes it easy to fully customize your Postgres cluster to tailor to your workload: - Choose the resources for your Postgres cluster: [container resources and storage size][resize-cluster]. [Resize at any time][resize-cluster] with minimal disruption. -- Use your own container image repository, including support `imagePullSecrets` and private repositories +- - Use your own container image repository, including support `imagePullSecrets` and private repositories - [Customize your PostgreSQL configuration][customize-cluster] #### [Namespaces][k8s-namespaces] Deploy PGO to watch Postgres clusters in all of your [namespaces][k8s-namespaces], or [restrict which namespaces][single-namespace] you want PGO to manage Postgres clusters in! -[backups]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorial/backups/ -[backups-s3]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorial/backups/#using-s3 -[backups-gcs]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorial/backups/#using-google-cloud-storage-gcs -[backups-azure]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorial/backups/#using-azure-blob-storage -[backups-multi]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorial/backups/#set-up-multiple-backup-repositories -[backup-management]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorial/backup-management/ -[clone]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorial/disaster-recovery/#clone-a-postgres-cluster -[customize-cluster]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorial/customize-cluster/ -[disaster-recovery]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorial/disaster-recovery/ -[high-availability]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorial/high-availability/ -[monitoring]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorial/monitoring/ -[multiple-cluster]: https://access.crunchydata.com/documentation/postgres-operator/v5/architecture/high-availability/multi-cluster-kubernetes/ -[pool]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorial/connection-pooling/ -[provisioning]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorial/create-cluster/ -[resize-cluster]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorial/resize-cluster/ +[backups]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorials/backups-disaster-recovery/backups +[backups-s3]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorials/backups-disaster-recovery/backups#using-s3 +[backups-gcs]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorials/backups-disaster-recovery/backups#using-google-cloud-storage-gcs +[backups-azure]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorials/backups-disaster-recovery/backups#using-azure-blob-storage +[backups-multi]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorials/backups-disaster-recovery/backups#set-up-multiple-backup-repositories +[backup-management]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorials/backups-disaster-recovery/backup-management +[clone]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorials/backups-disaster-recovery/disaster-recovery#clone-a-postgres-cluster +[customize-cluster]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorials/day-two/customize-cluster +[disaster-recovery]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorials/backups-disaster-recovery/disaster-recovery +[high-availability]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorials/day-two/high-availability/ +[monitoring]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorials/day-two/monitoring/ +[multiple-cluster]: https://access.crunchydata.com/documentation/postgres-operator/v5/architecture/disaster-recovery/#standby-cluster-overview +[pool]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorials/basic-setup/connection-pooling/ +[provisioning]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorials/basic-setup/create-cluster/ +[resize-cluster]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorials/cluster-management/resize-cluster/ [single-namespace]: https://access.crunchydata.com/documentation/postgres-operator/v5/installation/kustomize/#installation-mode -[tls]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorial/customize-cluster/#customize-tls -[update-postgres]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorial/update-cluster/ +[tls]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorials/day-two/customize-cluster#customize-tls +[update-postgres]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorials/cluster-management/update-cluster [k8s-anti-affinity]: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#inter-pod-affinity-and-anti-affinity [k8s-namespaces]: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ [k8s-nodes]: https://kubernetes.io/docs/concepts/architecture/nodes/ - [pgBackRest]: https://www.pgbackrest.org -[pgBouncer]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorial/connection-pooling/ +[pgBouncer]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorials/basic-setup/connection-pooling/ [pgMonitor]: https://github.com/CrunchyData/pgmonitor ## Included Components @@ -163,75 +176,62 @@ In addition to the above, the geospatially enhanced PostgreSQL + PostGIS contain - [PostGIS](http://postgis.net/) - [pgRouting](https://pgrouting.org/) -[PostgreSQL Operator Monitoring](https://crunchydata.github.io/postgres-operator/latest/architecture/monitoring/) uses the following components: +[PostgreSQL Operator Monitoring](https://access.crunchydata.com/documentation/postgres-operator/latest/architecture/monitoring/) uses the following components: - [pgMonitor](https://github.com/CrunchyData/pgmonitor) - [Prometheus](https://github.com/prometheus/prometheus) - [Grafana](https://github.com/grafana/grafana) - [Alertmanager](https://github.com/prometheus/alertmanager) -Additional containers that are not directly integrated with the PostgreSQL Operator but can work alongside it include: - -- [pgPool II](https://access.crunchydata.com/documentation/crunchy-postgres-containers/latest/container-specifications/crunchy-pgpool/) -- [pg_upgrade](https://access.crunchydata.com/documentation/crunchy-postgres-containers/latest/container-specifications/crunchy-upgrade/) -- [pgBench](https://access.crunchydata.com/documentation/crunchy-postgres-containers/latest/container-specifications/crunchy-pgbench/) +For more information about which versions of the PostgreSQL Operator include which components, please visit the [compatibility](https://access.crunchydata.com/documentation/postgres-operator/v5/references/components/) section of the documentation. -For more information about which versions of the PostgreSQL Operator include which components, please visit the [compatibility](https://access.crunchydata.com/documentation/postgres-operator/latest/configuration/compatibility/) section of the documentation. - -## Supported Platforms +## [Supported Platforms](https://access.crunchydata.com/documentation/postgres-operator/latest/overview/supported-platforms) PGO, the Postgres Operator from Crunchy Data, is tested on the following platforms: -- Kubernetes 1.18+ -- OpenShift 4.5+ +- Kubernetes +- OpenShift +- Rancher - Google Kubernetes Engine (GKE), including Anthos - Amazon EKS - Microsoft AKS - VMware Tanzu -This list only includes the platforms that the Postgres Operator is specifically -tested on as part of the release process: PGO works on other Kubernetes -distributions as well, such as Rancher. - # Contributing to the Project Want to contribute to the PostgreSQL Operator project? Great! We've put together -as set of contributing guidelines that you can review here: +a set of contributing guidelines that you can review here: - [Contributing Guidelines](CONTRIBUTING.md) Once you are ready to submit a Pull Request, please ensure you do the following: -1. Reviewing the [contributing guidelines](CONTRIBUTING.md) and ensure your -that you have followed the commit message format, added testing where -appropriate, documented your changes, etc. +1. Reviewing the [contributing guidelines](CONTRIBUTING.md) and ensure + that you have followed the commit message format, added testing where + appropriate, documented your changes, etc. 1. Open up a pull request based upon the guidelines. If you are adding a new -feature, please open up the pull request on the `master` branch. + feature, please open up the pull request on the `main` branch. 1. Please be as descriptive in your pull request as possible. If you are -referencing an issue, please be sure to include the issue in your pull request + referencing an issue, please be sure to include the issue in your pull request ## Support -If you believe you have found a bug or have detailed feature request, please open a GitHub issue and follow the guidelines for submitting a bug. +If you believe you have found a bug or have a detailed feature request, please open a GitHub issue and follow the guidelines for submitting a bug. -For general questions or community support, we welcome you to [join the PGO project community mailing list](https://groups.google.com/a/crunchydata.com/forum/#!forum/postgres-operator/join) at [https://groups.google.com/a/crunchydata.com/forum/#!forum/postgres-operator/join](https://groups.google.com/a/crunchydata.com/forum/#!forum/postgres-operator/join) and ask your question there. +For general questions or community support, we welcome you to join our [community Discord](https://discord.gg/a7vWKG8Ec9) and ask your questions there. For other information, please visit the [Support](https://access.crunchydata.com/documentation/postgres-operator/latest/support/) section of the documentation. # Documentation -For additional information regarding design, configuration and operation of the +For additional information regarding the design, configuration, and operation of the PostgreSQL Operator, pleases see the [Official Project Documentation][documentation]. -If you are looking for the [nightly builds of the documentation](https://crunchydata.github.io/postgres-operator/latest/), you can view them at: - -https://crunchydata.github.io/postgres-operator/latest/ - -[documentation]: https://access.crunchydata.com/documentation/postgres-operator/ +[documentation]: https://access.crunchydata.com/documentation/postgres-operator/latest/ ## Past Versions -Documentation for previous releases can be found at the [Crunchy Data Access Portal](https://access.crunchydata.com/documentation/) +Documentation for previous releases can be found at the [Crunchy Data Access Portal](https://access.crunchydata.com/documentation/). # Releases @@ -239,10 +239,15 @@ When a PostgreSQL Operator general availability (GA) release occurs, the contain - [Crunchy Data Customer Portal](https://access.crunchydata.com/) - [Crunchy Data Developer Portal](https://www.crunchydata.com/developers) -- [DockerHub](https://hub.docker.com/u/crunchydata) The image rollout can occur over the course of several days. -To stay up-to-date on when releases are made available in the [Crunchy Data Developer Portal](https://www.crunchydata.com/developers), please sign up for the [Crunchy Data Developer Program Newsletter](https://www.crunchydata.com/developers/newsletter). You can also [join the PGO project community mailing list](https://groups.google.com/a/crunchydata.com/forum/#!forum/postgres-operator/join) +To stay up-to-date on when releases are made available in the [Crunchy Data Developer Portal](https://www.crunchydata.com/developers), please sign up for the [Crunchy Data Developer Program Newsletter](https://www.crunchydata.com/developers#email). You can also [join the PGO project community discord](https://discord.gg/a7vWKG8Ec9) + +# FAQs, License and Terms + +For more information regarding PGO, the Postgres Operator project from Crunchy Data, and Crunchy Postgres for Kubernetes, please see the [frequently asked questions](https://access.crunchydata.com/documentation/postgres-operator/latest/faq). + +The installation instructions provided in this repo are designed for the use of PGO along with Crunchy Data's Postgres distribution, Crunchy Postgres, as Crunchy Postgres for Kubernetes. The unmodified use of these installation instructions will result in downloading container images from Crunchy Data repositories - specifically the Crunchy Data Developer Portal. The use of container images downloaded from the Crunchy Data Developer Portal are subject to the [Crunchy Data Developer Program terms](https://www.crunchydata.com/developers/terms-of-use). The PGO Postgres Operator project source code is available subject to the [Apache 2.0 license](LICENSE.md) with the PGO logo and branding assets covered by [our trademark guidelines](docs/static/logos/TRADEMARKS.md). diff --git a/bin/check-deps.sh b/bin/check-deps.sh deleted file mode 100755 index 9d6aee0f5b..0000000000 --- a/bin/check-deps.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/bash -e - -# Copyright 2021 Crunchy Data Solutions, Inc. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -echo "Ensuring project dependencies..." - -if ! command -v go &> /dev/null; then - echo 'Cannot find `go`. Perhaps:' - echo ' sudo yum install golang' - exit 1 -fi -if ! sort -VC <<< $'go1.13\n'"$( read -ra array <<< "$(go version)"; echo "${array[2]-}" )"; then - echo 'Old version of `go`: «' "$(go version)" '» Perhaps:' - echo ' sudo yum update golang' - exit 1 -fi - -if ! command -v buildah &> /dev/null; then - echo 'Cannot find `buildah`. Perhaps:' - echo ' sudo yum install buildah' - exit 1 -fi -if ! sort -VC <<< $'1.14.9\n'"$( read -ra array <<< "$(buildah --version)"; echo "${array[2]-}" )"; then - echo 'Old version of `buildah`: «' "$(buildah --version)" '» Perhaps:' - echo ' sudo yum update buildah' - exit 1 -fi diff --git a/bin/crunchy-postgres-exporter/.gitignore b/bin/crunchy-postgres-exporter/.gitignore deleted file mode 100644 index bd718d9cd9..0000000000 --- a/bin/crunchy-postgres-exporter/.gitignore +++ /dev/null @@ -1 +0,0 @@ -collectserver diff --git a/bin/crunchy-postgres-exporter/common_lib.sh b/bin/crunchy-postgres-exporter/common_lib.sh deleted file mode 100755 index a42a618eb1..0000000000 --- a/bin/crunchy-postgres-exporter/common_lib.sh +++ /dev/null @@ -1,48 +0,0 @@ -#!/bin/bash - -# Copyright 2018 - 2021 Crunchy Data Solutions, Inc. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -RED="\033[0;31m" -GREEN="\033[0;32m" -YELLOW="\033[0;33m" -RESET="\033[0m" - -function enable_debugging() { - if [[ ${CRUNCHY_DEBUG:-false} == "true" ]] - then - echo_info "Turning debugging on.." - export PS4='+(${BASH_SOURCE}:${LINENO})> ${FUNCNAME[0]:+${FUNCNAME[0]}(): }' - set -x - fi -} - -function env_check_err() { - if [[ -z ${!1} ]] - then - echo_err "$1 environment variable is not set, aborting." - exit 1 - fi -} - -function echo_err() { - echo -e "${RED?}$(date) ERROR: ${1?}${RESET?}" -} - -function echo_info() { - echo -e "${GREEN?}$(date) INFO: ${1?}${RESET?}" -} - -function echo_warn() { - echo -e "${YELLOW?}$(date) WARN: ${1?}${RESET?}" -} diff --git a/bin/crunchy-postgres-exporter/start.sh b/bin/crunchy-postgres-exporter/start.sh deleted file mode 100755 index ffafe9d93f..0000000000 --- a/bin/crunchy-postgres-exporter/start.sh +++ /dev/null @@ -1,225 +0,0 @@ -#!/bin/bash - -# Copyright 2017 - 2021 Crunchy Data Solutions, Inc. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -source /opt/cpm/bin/common_lib.sh -enable_debugging - -export PG_EXP_HOME=$(find /opt/cpm/bin/ -type d -name 'postgres_exporter*') -export PG_DIR=$(find /usr/ -type d -name 'pgsql-*') -POSTGRES_EXPORTER_PIDFILE=/tmp/postgres_exporter.pid -CONFIG_DIR='/opt/cpm/conf' -QUERIES=( - queries_backrest - queries_global - queries_per_db - queries_nodemx -) - -function trap_sigterm() { - echo_info "Doing trap logic.." - - echo_warn "Clean shutdown of postgres-exporter.." - kill -SIGINT $(head -1 ${POSTGRES_EXPORTER_PIDFILE?}) -} - -# Set default env vars for the postgres exporter container -set_default_postgres_exporter_env() { - if [[ ! -v POSTGRES_EXPORTER_PORT ]] - then - export POSTGRES_EXPORTER_PORT="9187" - default_exporter_env_vars+=("POSTGRES_EXPORTER_PORT=${POSTGRES_EXPORTER_PORT}") - fi -} - -# Set default PG env vars for the exporter container -set_default_pg_exporter_env() { - - if [[ ! -v EXPORTER_PG_HOST ]] - then - export EXPORTER_PG_HOST="127.0.0.1" - default_exporter_env_vars+=("EXPORTER_PG_HOST=${EXPORTER_PG_HOST}") - fi - - if [[ ! -v EXPORTER_PG_PORT ]] - then - export EXPORTER_PG_PORT="5432" - default_exporter_env_vars+=("EXPORTER_PG_PORT=${EXPORTER_PG_PORT}") - fi - - if [[ ! -v EXPORTER_PG_DATABASE ]] - then - export EXPORTER_PG_DATABASE="postgres" - default_exporter_env_vars+=("EXPORTER_PG_DATABASE=${EXPORTER_PG_DATABASE}") - fi - - if [[ ! -v EXPORTER_PG_USER ]] - then - export EXPORTER_PG_USER="ccp_monitoring" - default_exporter_env_vars+=("EXPORTER_PG_USER=${EXPORTER_PG_USER}") - fi - - env_check_err "EXPORTER_PG_PASSWORD" -} - -trap 'trap_sigterm' SIGINT SIGTERM - -set_default_postgres_exporter_env -set_default_pg_exporter_env - -if [[ ! ${#default_exporter_env_vars[@]} -eq 0 ]] -then - echo_info "Defaults have been set for the following exporter env vars:" - echo_info "[${default_exporter_env_vars[*]}]" -fi - -# Check that postgres is accepting connections. -echo_info "Waiting for PostgreSQL to be ready.." -while true; do - ${PG_DIR?}/bin/pg_isready -q -h "${EXPORTER_PG_HOST}" -p "${EXPORTER_PG_PORT}" - if [ $? -eq 0 ]; then - break - fi - sleep 2 -done - -echo_info "Checking if "${EXPORTER_PG_USER}" is is created.." -while true; do - PGPASSWORD="${EXPORTER_PG_PASSWORD}" ${PG_DIR?}/bin/psql -q -h "${EXPORTER_PG_HOST}" -p "${EXPORTER_PG_PORT}" -U "${EXPORTER_PG_USER}" -c "SELECT 1;" "${EXPORTER_PG_DATABASE}" - if [ $? -eq 0 ]; then - break - fi - sleep 2 -done - -if [[ -f /conf/queries.yml ]] -then - echo_info "Custom queries configuration detected.." - QUERY_DIR='/conf' -else - echo_info "No custom queries detected. Applying default configuration.." - QUERY_DIR='/tmp' - - touch ${QUERY_DIR?}/queries.yml && > ${QUERY_DIR?}/queries.yml - for query in "${QUERIES[@]}" - do - if [[ -f ${CONFIG_DIR?}/${query?}.yml ]] - then - cat ${CONFIG_DIR?}/${query?}.yml >> /tmp/queries.yml - else - echo_err "Query file ${query?}.yml does not exist (it should).." - exit 1 - fi - done - - VERSION=$(PGPASSWORD="${EXPORTER_PG_PASSWORD}" ${PG_DIR?}/bin/psql -h "${EXPORTER_PG_HOST}" -p "${EXPORTER_PG_PORT}" -U "${EXPORTER_PG_USER}" -qtAX -c "SELECT current_setting('server_version_num')" "${EXPORTER_PG_DATABASE}") - if (( ${VERSION?} >= 90600 )) && (( ${VERSION?} < 100000 )) - then - if [[ -f ${CONFIG_DIR?}/pg96/queries_general.yml ]] - then - cat ${CONFIG_DIR?}/pg96/queries_general.yml >> /tmp/queries.yml - else - echo_err "Query file queries_general.yml does not exist (it should).." - fi - elif (( ${VERSION?} >= 100000 )) && (( ${VERSION?} < 110000 )) - then - if [[ -f ${CONFIG_DIR?}/pg10/queries_general.yml ]] - then - cat ${CONFIG_DIR?}/pg10/queries_general.yml >> /tmp/queries.yml - else - echo_err "Query file queries_general.yml does not exist (it should).." - fi - if [[ -f ${CONFIG_DIR?}/pg10/queries_pg_stat_statements.yml ]] - then - cat ${CONFIG_DIR?}/pg10/queries_pg_stat_statements.yml >> /tmp/queries.yml - else - echo_warn "Query file queries_pg_stat_statements.yml not loaded." - fi - elif (( ${VERSION?} >= 110000 )) && (( ${VERSION?} < 120000 )) - then - if [[ -f ${CONFIG_DIR?}/pg11/queries_general.yml ]] - then - cat ${CONFIG_DIR?}/pg11/queries_general.yml >> /tmp/queries.yml - else - echo_err "Query file queries_general.yml does not exist (it should).." - fi - if [[ -f ${CONFIG_DIR?}/pg11/queries_pg_stat_statements.yml ]] - then - cat ${CONFIG_DIR?}/pg11/queries_pg_stat_statements.yml >> /tmp/queries.yml - else - echo_warn "Query file queries_pg_stat_statements.yml not loaded." - fi - elif (( ${VERSION?} >= 120000 )) && (( ${VERSION?} < 130000 )) - then - if [[ -f ${CONFIG_DIR?}/pg12/queries_general.yml ]] - then - cat ${CONFIG_DIR?}/pg12/queries_general.yml >> /tmp/queries.yml - else - echo_err "Query file queries_general.yml does not exist (it should).." - fi - if [[ -f ${CONFIG_DIR?}/pg12/queries_pg_stat_statements.yml ]] - then - cat ${CONFIG_DIR?}/pg12/queries_pg_stat_statements.yml >> /tmp/queries.yml - else - echo_warn "Query file queries_pg_stat_statements.yml not loaded." - fi - # queries_pg_stat_statements_reset is only available in PG12+. This may - # need to be updated based on a new path - if [[ -f ${CONFIG_DIR?}/pg12/queries_pg_stat_statements_reset_info.yml ]]; - then - cat ${CONFIG_DIR?}/pg12/queries_pg_stat_statements_reset_info.yml >> /tmp/queries.yml - else - echo_warn "Query file queries_pg_stat_statements_reset_info.yml not loaded." - fi - elif (( ${VERSION?} >= 130000 )) - then - if [[ -f ${CONFIG_DIR?}/pg13/queries_general.yml ]] - then - cat ${CONFIG_DIR?}/pg13/queries_general.yml >> /tmp/queries.yml - else - echo_err "Query file queries_general.yml does not exist (it should).." - fi - if [[ -f ${CONFIG_DIR?}/pg13/queries_pg_stat_statements.yml ]] - then - cat ${CONFIG_DIR?}/pg13/queries_pg_stat_statements.yml >> /tmp/queries.yml - else - echo_warn "Query file queries_pg_stat_statements.yml not loaded." - fi - # queries_pg_stat_statements_reset is only available in PG12+. This may - # need to be updated based on a new path - if [[ -f ${CONFIG_DIR?}/pg13/queries_pg_stat_statements_reset_info.yml ]]; - then - cat ${CONFIG_DIR?}/pg13/queries_pg_stat_statements_reset_info.yml >> /tmp/queries.yml - else - echo_warn "Query file queries_pg_stat_statements_reset_info.yml not loaded." - fi - else - echo_err "Unknown or unsupported version of PostgreSQL. Exiting.." - exit 1 - fi -fi - -sed -i \ - -e "s/#PGBACKREST_INFO_THROTTLE_MINUTES#/${PGBACKREST_INFO_THROTTLE_MINUTES:-10}/g" \ - -e "s/#PG_STAT_STATEMENTS_LIMIT#/${PG_STAT_STATEMENTS_LIMIT:-20}/g" \ - -e "s/#PG_STAT_STATEMENTS_THROTTLE_MINUTES#/${PG_STAT_STATEMENTS_THROTTLE_MINUTES:--1}/g" \ - /tmp/queries.yml - -PG_OPTIONS="--extend.query-path=${QUERY_DIR?}/queries.yml --web.listen-address=:${POSTGRES_EXPORTER_PORT}" - -echo_info "Starting postgres-exporter.." -DATA_SOURCE_URI="${EXPORTER_PG_HOST}:${EXPORTER_PG_PORT}/${EXPORTER_PG_DATABASE}?${EXPORTER_PG_PARAMS}" DATA_SOURCE_USER="${EXPORTER_PG_USER}" DATA_SOURCE_PASS="${EXPORTER_PG_PASSWORD}" ${PG_EXP_HOME?}/postgres_exporter ${PG_OPTIONS?} >>/dev/stdout 2>&1 & -echo $! > $POSTGRES_EXPORTER_PIDFILE - -wait diff --git a/bin/get-deps.sh b/bin/get-deps.sh deleted file mode 100755 index 2f049fed86..0000000000 --- a/bin/get-deps.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/bash -e - -# Copyright 2017 - 2021 Crunchy Data Solutions, Inc. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -echo "Getting project dependencies..." -BINDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" -POSTGRES_EXPORTER_VERSION=0.8.0 - -# Download Postgres Exporter, only required to build the Crunchy Postgres Exporter container -wget -O $PGOROOT/postgres_exporter.tar.gz https://github.com/wrouesnel/postgres_exporter/releases/download/v${POSTGRES_EXPORTER_VERSION?}/postgres_exporter_v${POSTGRES_EXPORTER_VERSION?}_linux-amd64.tar.gz - -# pgMonitor Setup -source $BINDIR/get-pgmonitor.sh diff --git a/bin/get-pgmonitor.sh b/bin/get-pgmonitor.sh deleted file mode 100755 index e46c9c4b9e..0000000000 --- a/bin/get-pgmonitor.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash -e - -# Copyright 2017 - 2021 Crunchy Data Solutions, Inc. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -echo "Getting pgMonitor..." -PGMONITOR_COMMIT='v4.5-RC3' - -# pgMonitor Setup -if [[ -d ${PGOROOT?}/tools/pgmonitor ]] -then - rm -rf ${PGOROOT?}/tools/pgmonitor -fi - -git clone https://github.com/CrunchyData/pgmonitor.git ${PGOROOT?}/tools/pgmonitor -cd ${PGOROOT?}/tools/pgmonitor -git checkout ${PGMONITOR_COMMIT?} diff --git a/bin/license_aggregator.sh b/bin/license_aggregator.sh index b625aa60da..66f7284a97 100755 --- a/bin/license_aggregator.sh +++ b/bin/license_aggregator.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# Copyright 2021 Crunchy Data Solutions, Inc. +# Copyright 2021 - 2024 Crunchy Data Solutions, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at diff --git a/bin/pre-pull-crunchy-containers.sh b/bin/pre-pull-crunchy-containers.sh deleted file mode 100755 index 91cfcb9dc8..0000000000 --- a/bin/pre-pull-crunchy-containers.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash - -# Copyright 2018 - 2021 Crunchy Data Solutions, Inc. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -for CNAME in crunchy-postgres crunchy-pgbadger crunchy-pgbouncer -do - docker pull crunchydata/$CNAME:$CCP_IMAGE_TAG -done diff --git a/bin/pull-ccp-from-gcr.sh b/bin/pull-ccp-from-gcr.sh deleted file mode 100755 index 0e6dc20aea..0000000000 --- a/bin/pull-ccp-from-gcr.sh +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/bash - -set -e -u - -REGISTRY='us.gcr.io/container-suite' -VERSION=$CCP_IMAGE_TAG -IMAGES=( - crunchy-postgres-ha - crunchy-pgbadger - crunchy-pgbouncer - crunchy-pgdump - crunchy-pgrestore -) - -function echo_green() { - echo -e "\033[0;32m" - echo "$1" - echo -e "\033[0m" -} - -gcloud auth login -gcloud config set project container-suite -gcloud auth configure-docker - -for image in "${IMAGES[@]}" -do - echo_green "=> Pulling ${REGISTRY?}/${image?}:${VERSION?}.." - docker pull ${REGISTRY?}/${image?}:${VERSION?} - docker tag ${REGISTRY?}/${image?}:${VERSION?} crunchydata/${image?}:${VERSION?} -done - -echo_green "=> Done!" - -exit 0 diff --git a/bin/pull-from-gcr.sh b/bin/pull-from-gcr.sh deleted file mode 100755 index b8ff95c9f5..0000000000 --- a/bin/pull-from-gcr.sh +++ /dev/null @@ -1,44 +0,0 @@ -#!/bin/bash - -# Copyright 2018 - 2021 Crunchy Data Solutions, Inc. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -e -u - -REGISTRY='us.gcr.io/container-suite' -VERSION=$PGO_IMAGE_TAG -IMAGES=( - postgres-operator - crunchy-postgres-exporter -) - -function echo_green() { - echo -e "\033[0;32m" - echo "$1" - echo -e "\033[0m" -} - -gcloud auth login -gcloud config set project container-suite -gcloud auth configure-docker - -for image in "${IMAGES[@]}" -do - echo_green "=> Pulling ${REGISTRY?}/${image?}:${VERSION?}.." - docker pull ${REGISTRY?}/${image?}:${VERSION?} - docker tag ${REGISTRY?}/${image?}:${VERSION?} crunchydata/${image?}:${VERSION?} -done - -echo_green "=> Done!" - -exit 0 diff --git a/bin/push-to-gcr.sh b/bin/push-to-gcr.sh deleted file mode 100755 index e0c23dee99..0000000000 --- a/bin/push-to-gcr.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/bash - -# Copyright 2018 - 2021 Crunchy Data Solutions, Inc. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -GCR_IMAGE_PREFIX=gcr.io/crunchy-dev-test - -IMAGES=( -postgres-operator -crunchy-postgres-exporter -) - -for image in "${IMAGES[@]}" -do - docker tag $PGO_IMAGE_PREFIX/$image:$PGO_IMAGE_TAG \ - $GCR_IMAGE_PREFIX/$image:$PGO_IMAGE_TAG - gcloud docker -- push $GCR_IMAGE_PREFIX/$image:$PGO_IMAGE_TAG -done diff --git a/build/crunchy-postgres-exporter/Dockerfile b/build/crunchy-postgres-exporter/Dockerfile deleted file mode 100644 index 3a0781c1e2..0000000000 --- a/build/crunchy-postgres-exporter/Dockerfile +++ /dev/null @@ -1,52 +0,0 @@ -ARG BASEOS -ARG BASEVER -ARG PREFIX -FROM ${PREFIX}/pgo-base:${BASEOS}-${BASEVER} - -ARG BASEOS -ARG PGVERSION -ARG PACKAGER -ARG DFSET - -LABEL name="crunchy-postgres-exporter" \ - summary="Metrics exporter for PostgreSQL" \ - description="When run with the crunchy-postgres family of containers, crunchy-postgres-exporter reads the PostgreSQL data directory and has a SQL interface to a database to allow for metrics collection." \ - io.k8s.description="Crunchy PostgreSQL Exporter" \ - io.k8s.display-name="Crunchy PostgreSQL Exporter" \ - io.openshift.tags="postgresql,postgres,monitoring,database,crunchy" - -RUN if [ "$DFSET" = "centos" ] ; then \ - ${PACKAGER} -y install epel-release \ - && ${PACKAGER} install -y \ - --setopt=skip_missing_names_on_install=False \ - postgresql${PGVERSION} \ - && ${PACKAGER} -y clean all ; \ -fi - -RUN if [ "$BASEOS" = "ubi8" ] ; then \ - ${PACKAGER} install -y \ - findutils \ - postgresql${PGVERSION} \ - && ${PACKAGER} -y clean all ; \ -fi - -RUN mkdir -p /opt/cpm/bin /opt/cpm/conf - -ADD postgres_exporter.tar.gz /opt/cpm/bin -ADD tools/pgmonitor/postgres_exporter/common /opt/cpm/conf -ADD tools/pgmonitor/postgres_exporter/linux /opt/cpm/conf -ADD bin/crunchy-postgres-exporter /opt/cpm/bin - -RUN chgrp -R 0 /opt/cpm/bin /opt/cpm/conf && \ - chmod -R g=u /opt/cpm/bin/ opt/cpm/conf - -# postgres_exporter -EXPOSE 9187 - -# The VOLUME directive must appear after all RUN directives to ensure the proper -# volume permissions are applied when building the image -VOLUME ["/conf"] - -USER 2 - -CMD ["/opt/cpm/bin/start.sh"] diff --git a/build/pgo-base/Dockerfile b/build/pgo-base/Dockerfile deleted file mode 100644 index 2f28296520..0000000000 --- a/build/pgo-base/Dockerfile +++ /dev/null @@ -1,44 +0,0 @@ -ARG BASE_IMAGE_OS -ARG DOCKERBASEREGISTRY -FROM ${DOCKERBASEREGISTRY}${BASE_IMAGE_OS} - -ARG BASEOS -ARG PGVERSION -ARG PG_FULL -ARG PACKAGER -ARG RELVER - -MAINTAINER info@crunchydata.com - -LABEL vendor="Crunchy Data" \ - url="https://crunchydata.com" \ - release="${RELVER}" \ - postgresql.version.major="${PGVERSION}" \ - postgresql.version="${PG_FULL}" \ - org.opencontainers.image.vendor="Crunchy Data" \ - io.openshift.tags="postgresql,postgres,sql,nosql,crunchy" \ - io.k8s.description="Trusted open source PostgreSQL-as-a-Service" - -COPY redhat/atomic/help.1 /help.1 -COPY redhat/atomic/help.md /help.md -COPY licenses /licenses - -RUN ${PACKAGER} -y update && ${PACKAGER} -y clean all - -RUN if [ "$BASEOS" = "centos8" ]; then \ - ${PACKAGER} -qy module disable postgresql ; \ -fi - -# Create module file to disable postgres module, microdnf cannot do this with the current version -RUN if [ "$BASEOS" = "ubi8" ] ; then \ - echo "[postgresql]" >> /etc/dnf/modules.d/postgresql.module \ - && echo "name=postgresql" >> /etc/dnf/modules.d/postgresql.module \ - && echo "stream=10" >> /etc/dnf/modules.d/postgresql.module \ - && echo "profiles=" >> /etc/dnf/modules.d/postgresql.module \ - && echo "state=disabled" >> /etc/dnf/modules.d/postgresql.module ; \ -fi - -# Crunchy PostgreSQL repository -ADD conf/RPM-GPG-KEY-crunchydata* / -ADD conf/crunchypg${PGVERSION}.repo /etc/yum.repos.d/ -RUN rpm --import RPM-GPG-KEY-crunchydata* diff --git a/build/postgres-operator/Dockerfile b/build/postgres-operator/Dockerfile index 7ea24fbb5d..69c5953761 100644 --- a/build/postgres-operator/Dockerfile +++ b/build/postgres-operator/Dockerfile @@ -1,13 +1,14 @@ -ARG BASEOS -ARG BASEVER -ARG PREFIX -FROM ${PREFIX}/pgo-base:${BASEOS}-${BASEVER} +FROM registry.access.redhat.com/ubi8/ubi-minimal -LABEL name="postgres-operator" \ - summary="Crunchy PostgreSQL Operator" \ - description="Crunchy PostgreSQL Operator" +COPY licenses /licenses -ADD bin/postgres-operator /usr/local/bin +COPY bin/postgres-operator /usr/local/bin + +RUN mkdir -p /opt/crunchy/conf + +COPY hack/tools/queries /opt/crunchy/conf + +RUN chgrp -R 0 /opt/crunchy/conf && chmod -R g=u opt/crunchy/conf USER 2 diff --git a/cmd/postgres-operator/main.go b/cmd/postgres-operator/main.go index 227f01c4c0..b2f8ae49b6 100644 --- a/cmd/postgres-operator/main.go +++ b/cmd/postgres-operator/main.go @@ -1,34 +1,38 @@ -package main - -/* -Copyright 2017 - 2021 Crunchy Data -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 +// Copyright 2017 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +package main import ( "context" + "fmt" + "net/http" "os" + "strconv" "strings" + "time" + "unicode" "go.opentelemetry.io/otel" + "k8s.io/apimachinery/pkg/util/validation" "k8s.io/client-go/discovery" "k8s.io/client-go/rest" - cruntime "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/healthz" + "github.com/crunchydata/postgres-operator/internal/bridge" + "github.com/crunchydata/postgres-operator/internal/bridge/crunchybridgecluster" + "github.com/crunchydata/postgres-operator/internal/controller/pgupgrade" "github.com/crunchydata/postgres-operator/internal/controller/postgrescluster" "github.com/crunchydata/postgres-operator/internal/controller/runtime" + "github.com/crunchydata/postgres-operator/internal/controller/standalone_pgadmin" + "github.com/crunchydata/postgres-operator/internal/feature" + "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/logging" + "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/internal/registration" + "github.com/crunchydata/postgres-operator/internal/upgradecheck" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) var versionString string @@ -41,27 +45,96 @@ func assertNoError(err error) { } func initLogging() { - // Configure a singleton that treats logr.Logger.V(1) as logrus.DebugLevel. + // Configure a singleton that treats logging.Logger.V(1) as logrus.DebugLevel. var verbosity int if strings.EqualFold(os.Getenv("CRUNCHY_DEBUG"), "true") { verbosity = 1 } - logging.SetLogFunc(verbosity, logging.Logrus(os.Stdout, versionString, 1)) + logging.SetLogSink(logging.Logrus(os.Stdout, versionString, 1, verbosity)) + + global := logging.FromContext(context.Background()) + runtime.SetLogger(global) +} + +//+kubebuilder:rbac:groups="coordination.k8s.io",resources="leases",verbs={get,create,update,watch} + +func initManager() (runtime.Options, error) { + log := logging.FromContext(context.Background()) + + options := runtime.Options{} + options.Cache.SyncPeriod = initialize.Pointer(time.Hour) + + options.HealthProbeBindAddress = ":8081" + + // Enable leader elections when configured with a valid Lease.coordination.k8s.io name. + // - https://docs.k8s.io/concepts/architecture/leases + // - https://releases.k8s.io/v1.30.0/pkg/apis/coordination/validation/validation.go#L26 + if lease := os.Getenv("PGO_CONTROLLER_LEASE_NAME"); len(lease) > 0 { + if errs := validation.IsDNS1123Subdomain(lease); len(errs) > 0 { + return options, fmt.Errorf("value for PGO_CONTROLLER_LEASE_NAME is invalid: %v", errs) + } + + options.LeaderElection = true + options.LeaderElectionID = lease + options.LeaderElectionNamespace = os.Getenv("PGO_NAMESPACE") + } + + // Check PGO_TARGET_NAMESPACE for backwards compatibility with + // "singlenamespace" installations + singlenamespace := strings.TrimSpace(os.Getenv("PGO_TARGET_NAMESPACE")) + + // Check PGO_TARGET_NAMESPACES for non-cluster-wide, multi-namespace + // installations + multinamespace := strings.TrimSpace(os.Getenv("PGO_TARGET_NAMESPACES")) + + // Initialize DefaultNamespaces if any target namespaces are set + if len(singlenamespace) > 0 || len(multinamespace) > 0 { + options.Cache.DefaultNamespaces = map[string]runtime.CacheConfig{} + } + + if len(singlenamespace) > 0 { + options.Cache.DefaultNamespaces[singlenamespace] = runtime.CacheConfig{} + } + + if len(multinamespace) > 0 { + for _, namespace := range strings.FieldsFunc(multinamespace, func(c rune) bool { + return c != '-' && !unicode.IsLetter(c) && !unicode.IsNumber(c) + }) { + options.Cache.DefaultNamespaces[namespace] = runtime.CacheConfig{} + } + } + + options.Controller.GroupKindConcurrency = map[string]int{ + "PostgresCluster." + v1beta1.GroupVersion.Group: 2, + } + + if s := os.Getenv("PGO_WORKERS"); s != "" { + if i, err := strconv.Atoi(s); err == nil && i > 0 { + options.Controller.GroupKindConcurrency["PostgresCluster."+v1beta1.GroupVersion.Group] = i + } else { + log.Error(err, "PGO_WORKERS must be a positive number") + } + } + + return options, nil } func main() { + // This context is canceled by SIGINT, SIGTERM, or by calling shutdown. + ctx, shutdown := context.WithCancel(runtime.SignalHandler()) + otelFlush, err := initOpenTelemetry() assertNoError(err) defer otelFlush() initLogging() - // create a context that will be used to stop all controllers on a SIGTERM or SIGINT - ctx := cruntime.SetupSignalHandler() log := logging.FromContext(ctx) log.V(1).Info("debug flag set to true") - cruntime.SetLogger(log) + features := feature.NewGate() + assertNoError(features.Set(os.Getenv("PGO_FEATURE_GATES"))) + log.Info("feature gates enabled", "PGO_FEATURE_GATES", features.String()) cfg, err := runtime.GetConfig() assertNoError(err) @@ -73,48 +146,155 @@ func main() { // deprecation warnings when using an older version of a resource for backwards compatibility). rest.SetDefaultWarningHandler(rest.NoWarnings{}) - mgr, err := runtime.CreateRuntimeManager(os.Getenv("PGO_TARGET_NAMESPACE"), cfg, false) + options, err := initManager() assertNoError(err) - // add all PostgreSQL Operator controllers to the runtime manager - err = addControllersToManager(ctx, mgr) + // Add to the Context that Manager passes to Reconciler.Start, Runnable.Start, + // and eventually Reconciler.Reconcile. + options.BaseContext = func() context.Context { + ctx := context.Background() + ctx = feature.NewContext(ctx, features) + return ctx + } + + mgr, err := runtime.NewManager(cfg, options) + assertNoError(err) + + openshift := isOpenshift(cfg) + if openshift { + log.Info("detected OpenShift environment") + } + + registrar, err := registration.NewRunner(os.Getenv("RSA_KEY"), os.Getenv("TOKEN_PATH"), shutdown) assertNoError(err) + assertNoError(mgr.Add(registrar)) + token, _ := registrar.CheckToken() + + // add all PostgreSQL Operator controllers to the runtime manager + addControllersToManager(mgr, openshift, log, registrar) + + if features.Enabled(feature.BridgeIdentifiers) { + constructor := func() *bridge.Client { + client := bridge.NewClient(os.Getenv("PGO_BRIDGE_URL"), versionString) + client.Transport = otelTransportWrapper()(http.DefaultTransport) + return client + } + + assertNoError(bridge.ManagedInstallationReconciler(mgr, constructor)) + } + + // Enable upgrade checking + upgradeCheckingDisabled := strings.EqualFold(os.Getenv("CHECK_FOR_UPGRADES"), "false") + if !upgradeCheckingDisabled { + log.Info("upgrade checking enabled") + // get the URL for the check for upgrades endpoint if set in the env + assertNoError( + upgradecheck.ManagedScheduler( + mgr, + openshift, + os.Getenv("CHECK_FOR_UPGRADES_URL"), + versionString, + token, + )) + } else { + log.Info("upgrade checking disabled") + } + + // Enable health probes + assertNoError(mgr.AddHealthzCheck("health", healthz.Ping)) + assertNoError(mgr.AddReadyzCheck("check", healthz.Ping)) log.Info("starting controller runtime manager and will wait for signal to exit") + assertNoError(mgr.Start(ctx)) log.Info("signal received, exiting") } // addControllersToManager adds all PostgreSQL Operator controllers to the provided controller // runtime manager. -func addControllersToManager(ctx context.Context, mgr manager.Manager) error { - r := &postgrescluster.Reconciler{ +func addControllersToManager(mgr runtime.Manager, openshift bool, log logging.Logger, reg registration.Registration) { + pgReconciler := &postgrescluster.Reconciler{ + Client: mgr.GetClient(), + IsOpenShift: openshift, + Owner: postgrescluster.ControllerName, + Recorder: mgr.GetEventRecorderFor(postgrescluster.ControllerName), + Registration: reg, + Tracer: otel.Tracer(postgrescluster.ControllerName), + } + + if err := pgReconciler.SetupWithManager(mgr); err != nil { + log.Error(err, "unable to create PostgresCluster controller") + os.Exit(1) + } + + upgradeReconciler := &pgupgrade.PGUpgradeReconciler{ + Client: mgr.GetClient(), + Owner: "pgupgrade-controller", + Recorder: mgr.GetEventRecorderFor("pgupgrade-controller"), + Registration: reg, + } + + if err := upgradeReconciler.SetupWithManager(mgr); err != nil { + log.Error(err, "unable to create PGUpgrade controller") + os.Exit(1) + } + + pgAdminReconciler := &standalone_pgadmin.PGAdminReconciler{ Client: mgr.GetClient(), - Owner: postgrescluster.ControllerName, - Recorder: mgr.GetEventRecorderFor(postgrescluster.ControllerName), - Tracer: otel.Tracer(postgrescluster.ControllerName), - IsOpenShift: isOpenshift(ctx, mgr.GetConfig()), + Owner: "pgadmin-controller", + Recorder: mgr.GetEventRecorderFor(naming.ControllerPGAdmin), + IsOpenShift: openshift, } - return r.SetupWithManager(mgr) -} -func isOpenshift(ctx context.Context, cfg *rest.Config) bool { - log := logging.FromContext(ctx) + if err := pgAdminReconciler.SetupWithManager(mgr); err != nil { + log.Error(err, "unable to create PGAdmin controller") + os.Exit(1) + } - const openShiftAPIGroupSuffix = ".openshift.io" + constructor := func() bridge.ClientInterface { + client := bridge.NewClient(os.Getenv("PGO_BRIDGE_URL"), versionString) + client.Transport = otelTransportWrapper()(http.DefaultTransport) + return client + } - client, err := discovery.NewDiscoveryClientForConfig(cfg) - assertNoError(err) + crunchyBridgeClusterReconciler := &crunchybridgecluster.CrunchyBridgeClusterReconciler{ + Client: mgr.GetClient(), + Owner: "crunchybridgecluster-controller", + // TODO(crunchybridgecluster): recorder? + // Recorder: mgr.GetEventRecorderFor(naming...), + NewClient: constructor, + } - groups, _, err := client.ServerGroupsAndResources() + if err := crunchyBridgeClusterReconciler.SetupWithManager(mgr); err != nil { + log.Error(err, "unable to create CrunchyBridgeCluster controller") + os.Exit(1) + } +} + +func isOpenshift(cfg *rest.Config) bool { + const sccGroupName, sccKind = "security.openshift.io", "SecurityContextConstraints" + + client, err := discovery.NewDiscoveryClientForConfig(cfg) assertNoError(err) - // If we detect that any API group name ends with "openshift.io", we'll - // return that this is an OpenShift environment - for _, g := range groups { - if strings.HasSuffix(g.Name, openShiftAPIGroupSuffix) { - log.Info("detected OpenShift environment") - return true + groups, err := client.ServerGroups() + if err != nil { + assertNoError(err) + } + for _, g := range groups.Groups { + if g.Name != sccGroupName { + continue + } + for _, v := range g.Versions { + resourceList, err := client.ServerResourcesForGroupVersion(v.GroupVersion) + if err != nil { + assertNoError(err) + } + for _, r := range resourceList.APIResources { + if r.Kind == sccKind { + return true + } + } } } diff --git a/cmd/postgres-operator/main_test.go b/cmd/postgres-operator/main_test.go new file mode 100644 index 0000000000..f369ce6bd3 --- /dev/null +++ b/cmd/postgres-operator/main_test.go @@ -0,0 +1,118 @@ +// Copyright 2017 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package main + +import ( + "reflect" + "testing" + "time" + + "gotest.tools/v3/assert" + "gotest.tools/v3/assert/cmp" +) + +func TestInitManager(t *testing.T) { + t.Run("Defaults", func(t *testing.T) { + options, err := initManager() + assert.NilError(t, err) + + if assert.Check(t, options.Cache.SyncPeriod != nil) { + assert.Equal(t, *options.Cache.SyncPeriod, time.Hour) + } + + assert.Assert(t, options.HealthProbeBindAddress == ":8081") + + assert.DeepEqual(t, options.Controller.GroupKindConcurrency, + map[string]int{ + "PostgresCluster.postgres-operator.crunchydata.com": 2, + }) + + assert.Assert(t, options.Cache.DefaultNamespaces == nil) + assert.Assert(t, options.LeaderElection == false) + + { + options.Cache.SyncPeriod = nil + options.Controller.GroupKindConcurrency = nil + options.HealthProbeBindAddress = "" + + assert.Assert(t, reflect.ValueOf(options).IsZero(), + "expected remaining fields to be unset:\n%+v", options) + } + }) + + t.Run("PGO_CONTROLLER_LEASE_NAME", func(t *testing.T) { + t.Setenv("PGO_NAMESPACE", "test-namespace") + + t.Run("Invalid", func(t *testing.T) { + t.Setenv("PGO_CONTROLLER_LEASE_NAME", "INVALID_NAME") + + options, err := initManager() + assert.ErrorContains(t, err, "PGO_CONTROLLER_LEASE_NAME") + assert.ErrorContains(t, err, "invalid") + + assert.Assert(t, options.LeaderElection == false) + assert.Equal(t, options.LeaderElectionNamespace, "") + }) + + t.Run("Valid", func(t *testing.T) { + t.Setenv("PGO_CONTROLLER_LEASE_NAME", "valid-name") + + options, err := initManager() + assert.NilError(t, err) + assert.Assert(t, options.LeaderElection == true) + assert.Equal(t, options.LeaderElectionNamespace, "test-namespace") + assert.Equal(t, options.LeaderElectionID, "valid-name") + }) + }) + + t.Run("PGO_TARGET_NAMESPACE", func(t *testing.T) { + t.Setenv("PGO_TARGET_NAMESPACE", "some-such") + + options, err := initManager() + assert.NilError(t, err) + assert.Assert(t, cmp.Len(options.Cache.DefaultNamespaces, 1), + "expected only one configured namespace") + + assert.Assert(t, cmp.Contains(options.Cache.DefaultNamespaces, "some-such")) + }) + + t.Run("PGO_TARGET_NAMESPACES", func(t *testing.T) { + t.Setenv("PGO_TARGET_NAMESPACES", "some-such,another-one") + + options, err := initManager() + assert.NilError(t, err) + assert.Assert(t, cmp.Len(options.Cache.DefaultNamespaces, 2), + "expect two configured namespaces") + + assert.Assert(t, cmp.Contains(options.Cache.DefaultNamespaces, "some-such")) + assert.Assert(t, cmp.Contains(options.Cache.DefaultNamespaces, "another-one")) + }) + + t.Run("PGO_WORKERS", func(t *testing.T) { + t.Run("Invalid", func(t *testing.T) { + for _, v := range []string{"-3", "0", "3.14"} { + t.Setenv("PGO_WORKERS", v) + + options, err := initManager() + assert.NilError(t, err) + assert.DeepEqual(t, options.Controller.GroupKindConcurrency, + map[string]int{ + "PostgresCluster.postgres-operator.crunchydata.com": 2, + }) + } + }) + + t.Run("Valid", func(t *testing.T) { + t.Setenv("PGO_WORKERS", "19") + + options, err := initManager() + assert.NilError(t, err) + assert.DeepEqual(t, options.Controller.GroupKindConcurrency, + map[string]int{ + "PostgresCluster.postgres-operator.crunchydata.com": 19, + }) + }) + }) +} diff --git a/cmd/postgres-operator/open_telemetry.go b/cmd/postgres-operator/open_telemetry.go index 70c411987b..2c9eedc135 100644 --- a/cmd/postgres-operator/open_telemetry.go +++ b/cmd/postgres-operator/open_telemetry.go @@ -1,21 +1,11 @@ -package main - -/* -Copyright 2021 Crunchy Data -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +package main import ( + "context" "fmt" "io" "net/http" @@ -23,43 +13,26 @@ import ( "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/exporters/stdout" - "go.opentelemetry.io/otel/exporters/trace/jaeger" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp" + "go.opentelemetry.io/otel/exporters/stdout/stdouttrace" + "go.opentelemetry.io/otel/sdk/trace" ) func initOpenTelemetry() (func(), error) { - // At the time of this writing, the SDK (go.opentelemetry.io/otel@v0.13.0) - // does not automatically initialize any trace or metric exporter. An upcoming - // specification details environment variables that should facilitate this in - // the future. + // At the time of this writing, the SDK (go.opentelemetry.io/otel@v1.2.0) + // does not automatically initialize any exporter. We import the OTLP and + // stdout exporters and configure them below. Much of the OTLP exporter can + // be configured through environment variables. // - // - https://github.com/open-telemetry/opentelemetry-specification/blob/f5519f2b/specification/sdk-environment-variables.md - - switch os.Getenv("OTEL_EXPORTER") { - case "jaeger": - var endpoint jaeger.EndpointOption - agent := os.Getenv("JAEGER_AGENT_ENDPOINT") - collector := jaeger.CollectorEndpointFromEnv() - - if agent != "" { - endpoint = jaeger.WithAgentEndpoint(agent) - } - if collector != "" { - endpoint = jaeger.WithCollectorEndpoint(collector) - } - - provider, flush, err := jaeger.NewExportPipeline(endpoint) - if err != nil { - return nil, fmt.Errorf("unable to initialize Jaeger exporter: %w", err) - } - - otel.SetTracerProvider(provider) - return flush, nil + // - https://github.com/open-telemetry/opentelemetry-go/issues/2310 + // - https://github.com/open-telemetry/opentelemetry-specification/blob/v1.8.0/specification/sdk-environment-variables.md + switch os.Getenv("OTEL_TRACES_EXPORTER") { case "json": var closer io.Closer filename := os.Getenv("OTEL_JSON_FILE") - options := []stdout.Option{stdout.WithoutMetricExport()} + options := []stdouttrace.Option{} if filename != "" { file, err := os.OpenFile(filename, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o644) @@ -67,25 +40,42 @@ func initOpenTelemetry() (func(), error) { return nil, fmt.Errorf("unable to open exporter file: %w", err) } closer = file - options = append(options, stdout.WithWriter(file)) + options = append(options, stdouttrace.WithWriter(file)) } - provider, pusher, err := stdout.NewExportPipeline(options, nil) + exporter, err := stdouttrace.New(options...) if err != nil { return nil, fmt.Errorf("unable to initialize stdout exporter: %w", err) } + + provider := trace.NewTracerProvider(trace.WithBatcher(exporter)) flush := func() { - pusher.Stop() + _ = provider.Shutdown(context.TODO()) if closer != nil { _ = closer.Close() } } + otel.SetTracerProvider(provider) + return flush, nil + + case "otlp": + client := otlptracehttp.NewClient() + exporter, err := otlptrace.New(context.TODO(), client) + if err != nil { + return nil, fmt.Errorf("unable to initialize OTLP exporter: %w", err) + } + + provider := trace.NewTracerProvider(trace.WithBatcher(exporter)) + flush := func() { + _ = provider.Shutdown(context.TODO()) + } + otel.SetTracerProvider(provider) return flush, nil } - // $OTEL_EXPORTER is unset or unknown, so no TracerProvider has been assigned. + // $OTEL_TRACES_EXPORTER is unset or unknown, so no TracerProvider has been assigned. // The default at this time is a single "no-op" tracer. return func() {}, nil diff --git a/conf/.gitignore b/conf/.gitignore index 2212b52b1d..8925435045 100644 --- a/conf/.gitignore +++ b/conf/.gitignore @@ -1,4 +1,4 @@ *.repo *.public *.private -RPM-GPG-KEY-* +*KEY* diff --git a/config/README.md b/config/README.md index b2145a550e..73d2e59e6f 100644 --- a/config/README.md +++ b/config/README.md @@ -1,16 +1,7 @@ @@ -19,9 +10,6 @@ - The `default` target installs the operator in the `postgres-operator` namespace and configures it to manage resources in all namespaces. -- The `singlenamespace` target installs the operator in the `postgres-operator` - namespace and configures it to manage resources in that same namespace. - diff --git a/config/crd/bases/postgres-operator.crunchydata.com_crunchybridgeclusters.yaml b/config/crd/bases/postgres-operator.crunchydata.com_crunchybridgeclusters.yaml new file mode 100644 index 0000000000..82db84b466 --- /dev/null +++ b/config/crd/bases/postgres-operator.crunchydata.com_crunchybridgeclusters.yaml @@ -0,0 +1,290 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.4 + name: crunchybridgeclusters.postgres-operator.crunchydata.com +spec: + group: postgres-operator.crunchydata.com + names: + kind: CrunchyBridgeCluster + listKind: CrunchyBridgeClusterList + plural: crunchybridgeclusters + singular: crunchybridgecluster + scope: Namespaced + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + description: CrunchyBridgeCluster is the Schema for the crunchybridgeclusters + API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + CrunchyBridgeClusterSpec defines the desired state of CrunchyBridgeCluster + to be managed by Crunchy Data Bridge + properties: + clusterName: + description: The name of the cluster + maxLength: 50 + minLength: 5 + pattern: ^[A-Za-z][A-Za-z0-9\-_ ]*[A-Za-z0-9]$ + type: string + isHa: + description: |- + Whether the cluster is high availability, + meaning that it has a secondary it can fail over to quickly + in case the primary becomes unavailable. + type: boolean + isProtected: + description: |- + Whether the cluster is protected. Protected clusters can't be destroyed until + their protected flag is removed + type: boolean + majorVersion: + description: |- + The ID of the cluster's major Postgres version. + Currently Bridge offers 13-17 + maximum: 17 + minimum: 13 + type: integer + metadata: + description: Metadata contains metadata for custom resources + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + plan: + description: The ID of the cluster's plan. Determines instance, CPU, + and memory. + type: string + provider: + description: |- + The cloud provider where the cluster is located. + Currently Bridge offers aws, azure, and gcp only + enum: + - aws + - azure + - gcp + type: string + x-kubernetes-validations: + - message: immutable + rule: self == oldSelf + region: + description: The provider region where the cluster is located. + type: string + x-kubernetes-validations: + - message: immutable + rule: self == oldSelf + roles: + description: |- + Roles for which to create Secrets that contain their credentials which + are retrieved from the Bridge API. An empty list creates no role secrets. + Removing a role from this list does NOT drop the role nor revoke their + access, but it will delete that role's secret from the kube cluster. + items: + properties: + name: + description: |- + Name of the role within Crunchy Bridge. + More info: https://docs.crunchybridge.com/concepts/users + type: string + secretName: + description: The name of the Secret that will hold the role + credentials. + maxLength: 253 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + required: + - name + - secretName + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + secret: + description: The name of the secret containing the API key and team + id + type: string + storage: + anyOf: + - type: integer + - type: string + description: |- + The amount of storage available to the cluster in gigabytes. + The amount must be an integer, followed by Gi (gibibytes) or G (gigabytes) to match Kubernetes conventions. + If the amount is given in Gi, we round to the nearest G value. + The minimum value allowed by Bridge is 10 GB. + The maximum value allowed by Bridge is 65535 GB. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - clusterName + - isHa + - majorVersion + - plan + - provider + - region + - secret + - storage + type: object + status: + description: CrunchyBridgeClusterStatus defines the observed state of + CrunchyBridgeCluster + properties: + conditions: + description: conditions represent the observations of postgres cluster's + current state. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + host: + description: The Hostname of the postgres cluster in Bridge, provided + by Bridge API and null until then. + type: string + id: + description: The ID of the postgres cluster in Bridge, provided by + Bridge API and null until then. + type: string + isHa: + description: |- + Whether the cluster is high availability, meaning that it has a secondary it can fail + over to quickly in case the primary becomes unavailable. + type: boolean + isProtected: + description: |- + Whether the cluster is protected. Protected clusters can't be destroyed until + their protected flag is removed + type: boolean + majorVersion: + description: The cluster's major Postgres version. + type: integer + name: + description: The name of the cluster in Bridge. + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + on which the status was based. + format: int64 + minimum: 0 + type: integer + ongoingUpgrade: + description: The cluster upgrade as represented by Bridge + items: + properties: + flavor: + type: string + starting_from: + type: string + state: + type: string + required: + - flavor + - starting_from + - state + type: object + type: array + plan: + description: The ID of the cluster's plan. Determines instance, CPU, + and memory. + type: string + responses: + description: Most recent, raw responses from Bridge API + type: object + x-kubernetes-preserve-unknown-fields: true + state: + description: State of cluster in Bridge. + type: string + storage: + anyOf: + - type: integer + - type: string + description: The amount of storage available to the cluster. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/postgres-operator.crunchydata.com_pgadmins.yaml b/config/crd/bases/postgres-operator.crunchydata.com_pgadmins.yaml new file mode 100644 index 0000000000..da729cfaf2 --- /dev/null +++ b/config/crd/bases/postgres-operator.crunchydata.com_pgadmins.yaml @@ -0,0 +1,1924 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.4 + name: pgadmins.postgres-operator.crunchydata.com +spec: + group: postgres-operator.crunchydata.com + names: + kind: PGAdmin + listKind: PGAdminList + plural: pgadmins + singular: pgadmin + scope: Namespaced + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + description: PGAdmin is the Schema for the PGAdmin API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PGAdminSpec defines the desired state of PGAdmin + properties: + affinity: + description: |- + Scheduling constraints of the PGAdmin pod. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the + pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the + corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate + this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. + avoid putting this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: object + config: + description: |- + Configuration settings for the pgAdmin process. Changes to any of these + values will be loaded without validation. Be careful, as + you may put pgAdmin into an unusable state. + properties: + configDatabaseURI: + description: |- + A Secret containing the value for the CONFIG_DATABASE_URI setting. + More info: https://www.pgadmin.org/docs/pgadmin4/latest/external_database.html + properties: + key: + description: The key of the secret to select from. Must be + a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must be + defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + files: + description: |- + Files allows the user to mount projected volumes into the pgAdmin + container so that files can be referenced by pgAdmin as needed. + items: + description: Projection that may be projected along with other + supported volume types + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume root to write + the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the configMap data + to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about the downwardAPI + data to project + properties: + items: + description: Items is a list of DownwardAPIVolume file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the + pod: only annotations, labels, name, namespace + and uid are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must not + be absolute or contain the ''..'' path. Must + be utf-8 encoded. The first item of the relative + path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the secret data to + project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify whether the Secret + or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information about the + serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + gunicorn: + description: |- + Settings for the gunicorn server. + More info: https://docs.gunicorn.org/en/latest/settings.html + type: object + x-kubernetes-preserve-unknown-fields: true + ldapBindPassword: + description: |- + A Secret containing the value for the LDAP_BIND_PASSWORD setting. + More info: https://www.pgadmin.org/docs/pgadmin4/latest/ldap.html + properties: + key: + description: The key of the secret to select from. Must be + a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must be + defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + settings: + description: |- + Settings for the pgAdmin server process. Keys should be uppercase and + values must be constants. + More info: https://www.pgadmin.org/docs/pgadmin4/latest/config_py.html + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + dataVolumeClaimSpec: + description: |- + Defines a PersistentVolumeClaim for pgAdmin data. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume + backing this claim. + type: string + type: object + image: + description: The image name to use for pgAdmin instance. + type: string + imagePullPolicy: + description: |- + ImagePullPolicy is used to determine when Kubernetes will attempt to + pull (download) container images. + More info: https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy + enum: + - Always + - Never + - IfNotPresent + type: string + imagePullSecrets: + description: |- + The image pull secrets used to pull from a private registry. + Changing this value causes all running PGAdmin pods to restart. + https://k8s.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + items: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + type: array + metadata: + description: Metadata contains metadata for custom resources + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + priorityClassName: + description: |- + Priority class name for the PGAdmin pod. Changing this + value causes PGAdmin pod to restart. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/ + type: string + resources: + description: Resource requirements for the PGAdmin container. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + serverGroups: + description: |- + ServerGroups for importing PostgresClusters to pgAdmin. + To create a pgAdmin with no selectors, leave this field empty. + A pgAdmin created with no `ServerGroups` will not automatically + add any servers through discovery. PostgresClusters can still be + added manually. + items: + properties: + name: + description: |- + The name for the ServerGroup in pgAdmin. + Must be unique in the pgAdmin's ServerGroups since it becomes the ServerGroup name in pgAdmin. + type: string + postgresClusterName: + description: PostgresClusterName selects one cluster to add + to pgAdmin by name. + type: string + postgresClusterSelector: + description: |- + PostgresClusterSelector selects clusters to dynamically add to pgAdmin by matching labels. + An empty selector like `{}` will select ALL clusters in the namespace. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + required: + - name + type: object + x-kubernetes-validations: + - message: exactly one of "postgresClusterName" or "postgresClusterSelector" + is required + rule: '[has(self.postgresClusterName),has(self.postgresClusterSelector)].exists_one(x,x)' + type: array + serviceName: + description: |- + ServiceName will be used as the name of a ClusterIP service pointing + to the pgAdmin pod and port. If the service already exists, PGO will + update the service. For more information about services reference + the Kubernetes and CrunchyData documentation. + https://kubernetes.io/docs/concepts/services-networking/service/ + type: string + tolerations: + description: |- + Tolerations of the PGAdmin pod. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + users: + description: |- + pgAdmin users that are managed via the PGAdmin spec. Users can still + be added via the pgAdmin GUI, but those users will not show up here. + items: + properties: + passwordRef: + description: A reference to the secret that holds the user's + password. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + role: + description: |- + Role determines whether the user has admin privileges or not. + Defaults to User. Valid options are Administrator and User. + enum: + - Administrator + - User + type: string + username: + description: |- + The username for User in pgAdmin. + Must be unique in the pgAdmin's users list. + type: string + required: + - passwordRef + - username + type: object + type: array + x-kubernetes-list-map-keys: + - username + x-kubernetes-list-type: map + required: + - dataVolumeClaimSpec + type: object + status: + description: PGAdminStatus defines the observed state of PGAdmin + properties: + conditions: + description: |- + conditions represent the observations of pgAdmin's current state. + Known .status.conditions.type is: "PersistentVolumeResizing" + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + imageSHA: + description: ImageSHA represents the image SHA for the container running + pgAdmin. + type: string + majorVersion: + description: MajorVersion represents the major version of the running + pgAdmin. + type: integer + observedGeneration: + description: observedGeneration represents the .metadata.generation + on which the status was based. + format: int64 + minimum: 0 + type: integer + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/postgres-operator.crunchydata.com_pgupgrades.yaml b/config/crd/bases/postgres-operator.crunchydata.com_pgupgrades.yaml new file mode 100644 index 0000000000..4ae831cfc7 --- /dev/null +++ b/config/crd/bases/postgres-operator.crunchydata.com_pgupgrades.yaml @@ -0,0 +1,1210 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.4 + name: pgupgrades.postgres-operator.crunchydata.com +spec: + group: postgres-operator.crunchydata.com + names: + kind: PGUpgrade + listKind: PGUpgradeList + plural: pgupgrades + singular: pgupgrade + scope: Namespaced + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + description: PGUpgrade is the Schema for the pgupgrades API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PGUpgradeSpec defines the desired state of PGUpgrade + properties: + affinity: + description: |- + Scheduling constraints of the PGUpgrade pod. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the + pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the + corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate + this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. + avoid putting this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: object + fromPostgresVersion: + description: The major version of PostgreSQL before the upgrade. + maximum: 17 + minimum: 11 + type: integer + image: + description: The image name to use for major PostgreSQL upgrades. + type: string + imagePullPolicy: + description: |- + ImagePullPolicy is used to determine when Kubernetes will attempt to + pull (download) container images. + More info: https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy + enum: + - Always + - Never + - IfNotPresent + type: string + imagePullSecrets: + description: |- + The image pull secrets used to pull from a private registry. + Changing this value causes all running PGUpgrade pods to restart. + https://k8s.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + items: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + type: array + metadata: + description: Metadata contains metadata for custom resources + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + postgresClusterName: + description: The name of the cluster to be updated + minLength: 1 + type: string + priorityClassName: + description: |- + Priority class name for the PGUpgrade pod. Changing this + value causes PGUpgrade pod to restart. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/ + type: string + resources: + description: Resource requirements for the PGUpgrade container. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + toPostgresImage: + description: |- + The image name to use for PostgreSQL containers after upgrade. + When omitted, the value comes from an operator environment variable. + type: string + toPostgresVersion: + description: The major version of PostgreSQL to be upgraded to. + maximum: 17 + minimum: 11 + type: integer + tolerations: + description: |- + Tolerations of the PGUpgrade pod. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + required: + - fromPostgresVersion + - postgresClusterName + - toPostgresVersion + type: object + status: + description: PGUpgradeStatus defines the observed state of PGUpgrade + properties: + conditions: + description: conditions represent the observations of PGUpgrade's + current state. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: observedGeneration represents the .metadata.generation + on which the status was based. + format: int64 + minimum: 0 + type: integer + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml b/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml index e98fb370db..6f9dd40f02 100644 --- a/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml +++ b/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml @@ -1,11 +1,9 @@ - --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.4.1 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.16.4 name: postgresclusters.postgres-operator.crunchydata.com spec: group: postgres-operator.crunchydata.com @@ -22,14 +20,19 @@ spec: description: PostgresCluster is the Schema for the postgresclusters API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -43,54 +46,143 @@ spec: description: pgBackRest archive configuration properties: configuration: - description: 'Projected volumes containing custom pgBackRest - configuration. These files are mounted under "/etc/pgbackrest/conf.d" - alongside any pgBackRest configuration generated by the - PostgreSQL Operator: https://pgbackrest.org/configuration.html' + description: |- + Projected volumes containing custom pgBackRest configuration. These files are mounted + under "/etc/pgbackrest/conf.d" alongside any pgBackRest configuration generated by the + PostgreSQL Operator: + https://pgbackrest.org/configuration.html items: description: Projection that may be projected along with other supported volume types properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume root + to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object configMap: - description: information about the configMap data to - project + description: configMap information about the configMap + data to project properties: items: - description: If unspecified, each key-value pair - in the Data field of the referenced ConfigMap - will be projected into the volume as a file whose - name is the key and content is the value. If specified, - the listed keys will be projected into the specified - paths, and unlisted keys will not be present. - If a key is specified which is not present in - the ConfigMap, the volume setup will error unless - it is marked optional. Paths must be relative - and may not contain the '..' path or start with - '..'. + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. items: description: Maps a string key to a path within a volume. properties: key: - description: The key to project. + description: key is the key to project. type: string mode: - description: 'Optional: mode bits used to - set permissions on this file. Must be an - octal value between 0000 and 0777 or a decimal - value between 0 and 511. YAML accepts both - octal and decimal values, JSON requires - decimal values for mode bits. If not specified, - the volume defaultMode will be used. This - might be in conflict with other options - that affect the file mode, like fsGroup, - and the result can be other mode bits set.' + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: The relative path of the file - to map the key to. May not be an absolute - path. May not contain the path element '..'. + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. May not start with the string '..'. type: string required: @@ -98,19 +190,25 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: - description: Specify whether the ConfigMap or its - keys must be defined + description: optional specify whether the ConfigMap + or its keys must be defined type: boolean type: object + x-kubernetes-map-type: atomic downwardAPI: - description: information about the downwardAPI data - to project + description: downwardAPI information about the downwardAPI + data to project properties: items: description: Items is a list of DownwardAPIVolume @@ -122,8 +220,8 @@ spec: properties: fieldRef: description: 'Required: Selects a field of - the pod: only annotations, labels, name - and namespace are supported.' + the pod: only annotations, labels, name, + namespace and uid are supported.' properties: apiVersion: description: Version of the schema the @@ -137,17 +235,15 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic mode: - description: 'Optional: mode bits used to - set permissions on this file, must be an - octal value between 0000 and 0777 or a decimal - value between 0 and 511. YAML accepts both - octal and decimal values, JSON requires - decimal values for mode bits. If not specified, - the volume defaultMode will be used. This - might be in conflict with other options - that affect the file mode, like fsGroup, - and the result can be other mode bits set.' + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: @@ -158,10 +254,9 @@ spec: the relative path must not start with ''..''' type: string resourceFieldRef: - description: 'Selects a resource of the container: - only resources limits and requests (limits.cpu, - limits.memory, requests.cpu and requests.memory) - are currently supported.' + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. properties: containerName: description: 'Container name: required @@ -182,50 +277,48 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic required: - path type: object type: array + x-kubernetes-list-type: atomic type: object secret: - description: information about the secret data to project + description: secret information about the secret data + to project properties: items: - description: If unspecified, each key-value pair - in the Data field of the referenced Secret will - be projected into the volume as a file whose name - is the key and content is the value. If specified, - the listed keys will be projected into the specified - paths, and unlisted keys will not be present. - If a key is specified which is not present in - the Secret, the volume setup will error unless - it is marked optional. Paths must be relative - and may not contain the '..' path or start with - '..'. + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. items: description: Maps a string key to a path within a volume. properties: key: - description: The key to project. + description: key is the key to project. type: string mode: - description: 'Optional: mode bits used to - set permissions on this file. Must be an - octal value between 0000 and 0777 or a decimal - value between 0 and 511. YAML accepts both - octal and decimal values, JSON requires - decimal values for mode bits. If not specified, - the volume defaultMode will be used. This - might be in conflict with other options - that affect the file mode, like fsGroup, - and the result can be other mode bits set.' + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: The relative path of the file - to map the key to. May not be an absolute - path. May not contain the path element '..'. + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. May not start with the string '..'. type: string required: @@ -233,43 +326,47 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: - description: Specify whether the Secret or its key - must be defined + description: optional field specify whether the + Secret or its key must be defined type: boolean type: object + x-kubernetes-map-type: atomic serviceAccountToken: - description: information about the serviceAccountToken - data to project + description: serviceAccountToken is information about + the serviceAccountToken data to project properties: audience: - description: Audience is the intended audience of - the token. A recipient of a token must identify - itself with an identifier specified in the audience - of the token, and otherwise should reject the - token. The audience defaults to the identifier - of the apiserver. + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. type: string expirationSeconds: - description: ExpirationSeconds is the requested - duration of validity of the service account token. - As the token approaches expiration, the kubelet - volume plugin will proactively rotate the service - account token. The kubelet will start trying to - rotate the token if the token is older than 80 - percent of its time to live or if the token is - older than 24 hours.Defaults to 1 hour and must - be at least 10 minutes. + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. format: int64 type: integer path: - description: Path is the path relative to the mount - point of the file to project the token into. + description: |- + path is the path relative to the mount point of the file to project the + token into. type: string required: - path @@ -279,83 +376,46 @@ spec: global: additionalProperties: type: string - description: 'Global pgBackRest configuration settings. These - settings are included in the "global" section of the pgBackRest - configuration generated by the PostgreSQL Operator, and - then mounted under "/etc/pgbackrest/conf.d": https://pgbackrest.org/configuration.html' + description: |- + Global pgBackRest configuration settings. These settings are included in the "global" + section of the pgBackRest configuration generated by the PostgreSQL Operator, and then + mounted under "/etc/pgbackrest/conf.d": + https://pgbackrest.org/configuration.html type: object image: - description: The image name to use for pgBackRest containers. Utilized - to run pgBackRest repository hosts and backups. The image - may also be set using the RELATED_IMAGE_PGBACKREST environment - variable + description: |- + The image name to use for pgBackRest containers. Utilized to run + pgBackRest repository hosts and backups. The image may also be set using + the RELATED_IMAGE_PGBACKREST environment variable type: string - manual: - description: Defines details for manual pgBackRest backup - Jobs - properties: - options: - description: Command line options to include when running - the pgBackRest backup command. https://pgbackrest.org/command.html#command-backup - items: - type: string - type: array - repoName: - description: The name of the pgBackRest repo to run the - backup command against. - pattern: ^repo[1-4] - type: string - required: - - repoName - type: object - metadata: - description: Metadata contains metadata for PostgresCluster - resources - properties: - annotations: - additionalProperties: - type: string - type: object - labels: - additionalProperties: - type: string - type: object - type: object - repoHost: - description: Defines configuration for a pgBackRest dedicated - repository host. This section is only applicable if at - least one "volume" (i.e. PVC-based) repository is defined - in the "repos" section, therefore enabling a dedicated repository - host Deployment. + jobs: + description: Jobs field allows configuration for all backup + jobs properties: affinity: - description: 'Scheduling constraints of the Dedicated - repo host pod. Changing this value causes repo host - to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node' + description: |- + Scheduling constraints of pgBackRest backup Job pods. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node properties: nodeAffinity: description: Describes node affinity scheduling rules for the pod. properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule - pods to nodes that satisfy the affinity expressions - specified by this field, but it may choose a - node that violates one or more of the expressions. - The node that is most preferred is the one with - the greatest sum of weights, i.e. for each node - that meets all of the scheduling requirements - (resource request, requiredDuringScheduling - affinity expressions, etc.), compute a sum by - iterating through the elements of this field - and adding "weight" to the sum if the node matches - the corresponding matchExpressions; the node(s) - with the highest sum are the most preferred. + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. items: - description: An empty preferred scheduling term - matches all objects with implicit weight 0 - (i.e. it's a no-op). A null preferred scheduling - term matches no objects (i.e. is also a no-op). + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). properties: preference: description: A node selector term, associated @@ -365,85 +425,72 @@ spec: description: A list of node selector requirements by node's labels. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's - relationship to a set of values. - Valid operators are In, NotIn, - Exists, DoesNotExist. Gt, and - Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string - values. If the operator is In - or NotIn, the values array must - be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - If the operator is Gt or Lt, - the values array must have a - single element, which will be - interpreted as an integer. This - array is replaced during a strategic - merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's - relationship to a set of values. - Valid operators are In, NotIn, - Exists, DoesNotExist. Gt, and - Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string - values. If the operator is In - or NotIn, the values array must - be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - If the operator is Gt or Lt, - the values array must have a - single element, which will be - interpreted as an integer. This - array is replaced during a strategic - merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic weight: description: Weight associated with matching the corresponding nodeSelectorTerm, in @@ -455,112 +502,100 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified - by this field are not met at scheduling time, - the pod will not be scheduled onto the node. - If the affinity requirements specified by this - field cease to be met at some point during pod - execution (e.g. due to an update), the system - may or may not try to eventually evict the pod - from its node. + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. properties: nodeSelectorTerms: description: Required. A list of node selector terms. The terms are ORed. items: - description: A null or empty node selector - term matches no objects. The requirements - of them are ANDed. The TopologySelectorTerm - type implements a subset of the NodeSelectorTerm. + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. properties: matchExpressions: description: A list of node selector requirements by node's labels. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's - relationship to a set of values. - Valid operators are In, NotIn, - Exists, DoesNotExist. Gt, and - Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string - values. If the operator is In - or NotIn, the values array must - be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - If the operator is Gt or Lt, - the values array must have a - single element, which will be - interpreted as an integer. This - array is replaced during a strategic - merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's - relationship to a set of values. - Valid operators are In, NotIn, - Exists, DoesNotExist. Gt, and - Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string - values. If the operator is In - or NotIn, the values array must - be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - If the operator is Gt or Lt, - the values array must have a - single element, which will be - interpreted as an integer. This - array is replaced during a strategic - merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object + x-kubernetes-map-type: atomic type: object podAffinity: description: Describes pod affinity scheduling rules @@ -568,20 +603,16 @@ spec: etc. as some other pod(s)). properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule - pods to nodes that satisfy the affinity expressions - specified by this field, but it may choose a - node that violates one or more of the expressions. - The node that is most preferred is the one with - the greatest sum of weights, i.e. for each node - that meets all of the scheduling requirements - (resource request, requiredDuringScheduling - affinity expressions, etc.), compute a sum by - iterating through the elements of this field - and adding "weight" to the sum if the node has - pods which matches the corresponding podAffinityTerm; - the node(s) with the highest sum are the most - preferred. + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node @@ -592,19 +623,101 @@ spec: associated with the corresponding weight. properties: labelSelector: - description: A label query over a set - of resources, in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is + a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector - requirement is a selector that - contains values, a key, and - an operator that relates the - key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -612,72 +725,61 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to - a set of values. Valid operators - are In, NotIn, Exists and - DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an - array of string values. - If the operator is In or - NotIn, the values array - must be non-empty. If the - operator is Exists or DoesNotExist, - the values array must be - empty. This array is replaced - during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map - of {key,value} pairs. A single - {key,value} in the matchLabels - map is equivalent to an element - of matchExpressions, whose key - field is "key", the operator is - "In", and the values array contains - only "value". The requirements - are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies which - namespaces the labelSelector applies - to (matches against); null or empty - list means "this pod's namespace" + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where - co-located is defined as running on - a node whose value of the label with - key topologyKey matches that of any - node on which any of the selected - pods is running. Empty topologyKey - is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object weight: - description: weight associated with matching - the corresponding podAffinityTerm, in - the range 1-100. + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. format: int32 type: integer required: @@ -685,41 +787,121 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified - by this field are not met at scheduling time, - the pod will not be scheduled onto the node. - If the affinity requirements specified by this - field cease to be met at some point during pod - execution (e.g. due to a pod label update), - the system may or may not try to eventually - evict the pod from its node. When there are - multiple elements, the lists of nodes corresponding - to each podAffinityTerm are intersected, i.e. - all terms must be satisfied. + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. items: - description: Defines a set of pods (namely those - matching the labelSelector relative to the - given namespace(s)) that this pod should be - co-located (affinity) or not co-located (anti-affinity) - with, where co-located is defined as running - on a node whose value of the label with key - matches that of any node on - which a pod of the set of pods is running + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running properties: labelSelector: - description: A label query over a set of - resources, in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -727,64 +909,59 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies which - namespaces the labelSelector applies to - (matches against); null or empty list - means "this pod's namespace" + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where co-located - is defined as running on a node whose - value of the label with key topologyKey - matches that of any node on which any - of the selected pods is running. Empty - topologyKey is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object podAntiAffinity: description: Describes pod anti-affinity scheduling @@ -792,20 +969,16 @@ spec: zone, etc. as some other pod(s)). properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule - pods to nodes that satisfy the anti-affinity - expressions specified by this field, but it - may choose a node that violates one or more - of the expressions. The node that is most preferred - is the one with the greatest sum of weights, - i.e. for each node that meets all of the scheduling - requirements (resource request, requiredDuringScheduling - anti-affinity expressions, etc.), compute a - sum by iterating through the elements of this - field and adding "weight" to the sum if the - node has pods which matches the corresponding - podAffinityTerm; the node(s) with the highest - sum are the most preferred. + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node @@ -816,19 +989,101 @@ spec: associated with the corresponding weight. properties: labelSelector: - description: A label query over a set - of resources, in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is + a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector - requirement is a selector that - contains values, a key, and - an operator that relates the - key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -836,72 +1091,61 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to - a set of values. Valid operators - are In, NotIn, Exists and - DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an - array of string values. - If the operator is In or - NotIn, the values array - must be non-empty. If the - operator is Exists or DoesNotExist, - the values array must be - empty. This array is replaced - during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map - of {key,value} pairs. A single - {key,value} in the matchLabels - map is equivalent to an element - of matchExpressions, whose key - field is "key", the operator is - "In", and the values array contains - only "value". The requirements - are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies which - namespaces the labelSelector applies - to (matches against); null or empty - list means "this pod's namespace" + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where - co-located is defined as running on - a node whose value of the label with - key topologyKey matches that of any - node on which any of the selected - pods is running. Empty topologyKey - is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object weight: - description: weight associated with matching - the corresponding podAffinityTerm, in - the range 1-100. + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. format: int32 type: integer required: @@ -909,41 +1153,121 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements - specified by this field are not met at scheduling - time, the pod will not be scheduled onto the - node. If the anti-affinity requirements specified - by this field cease to be met at some point - during pod execution (e.g. due to a pod label - update), the system may or may not try to eventually - evict the pod from its node. When there are - multiple elements, the lists of nodes corresponding - to each podAffinityTerm are intersected, i.e. - all terms must be satisfied. + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. items: - description: Defines a set of pods (namely those - matching the labelSelector relative to the - given namespace(s)) that this pod should be - co-located (affinity) or not co-located (anti-affinity) - with, where co-located is defined as running - on a node whose value of the label with key - matches that of any node on - which a pod of the set of pods is running + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running properties: labelSelector: - description: A label query over a set of - resources, in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -951,70 +1275,97 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies which - namespaces the labelSelector applies to - (matches against); null or empty list - means "this pod's namespace" + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where co-located - is defined as running on a node whose - value of the label with key topologyKey - matches that of any node on which any - of the selected pods is running. Empty - topologyKey is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object type: object + priorityClassName: + description: |- + Priority class name for the pgBackRest backup Job pods. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/ + type: string resources: - description: Resource requirements for a pgBackRest repository - host + description: |- + Resource limits for backup jobs. Includes manual, scheduled and replica + create backups properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -1022,8 +1373,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount - of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -1032,431 +1384,124 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount - of compute resources required. If Requests is omitted - for a container, it defaults to Limits if that is - explicitly specified, otherwise to an implementation-defined - value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object - sshConfigMap: - description: ConfigMap containing custom SSH configuration - properties: - items: - description: If unspecified, each key-value pair in - the Data field of the referenced ConfigMap will - be projected into the volume as a file whose name - is the key and content is the value. If specified, - the listed keys will be projected into the specified - paths, and unlisted keys will not be present. If - a key is specified which is not present in the ConfigMap, - the volume setup will error unless it is marked - optional. Paths must be relative and may not contain - the '..' path or start with '..'. - items: - description: Maps a string key to a path within - a volume. - properties: - key: - description: The key to project. - type: string - mode: - description: 'Optional: mode bits used to set - permissions on this file. Must be an octal - value between 0000 and 0777 or a decimal value - between 0 and 511. YAML accepts both octal - and decimal values, JSON requires decimal - values for mode bits. If not specified, the - volume defaultMode will be used. This might - be in conflict with other options that affect - the file mode, like fsGroup, and the result - can be other mode bits set.' - format: int32 - type: integer - path: - description: The relative path of the file to - map the key to. May not be an absolute path. - May not contain the path element '..'. May - not start with the string '..'. - type: string - required: - - key - - path - type: object - type: array - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the ConfigMap or its - keys must be defined - type: boolean - type: object - sshSecret: - description: Secret containing custom SSH keys - properties: - items: - description: If unspecified, each key-value pair in - the Data field of the referenced Secret will be - projected into the volume as a file whose name is - the key and content is the value. If specified, - the listed keys will be projected into the specified - paths, and unlisted keys will not be present. If - a key is specified which is not present in the Secret, - the volume setup will error unless it is marked - optional. Paths must be relative and may not contain - the '..' path or start with '..'. - items: - description: Maps a string key to a path within - a volume. - properties: - key: - description: The key to project. - type: string - mode: - description: 'Optional: mode bits used to set - permissions on this file. Must be an octal - value between 0000 and 0777 or a decimal value - between 0 and 511. YAML accepts both octal - and decimal values, JSON requires decimal - values for mode bits. If not specified, the - volume defaultMode will be used. This might - be in conflict with other options that affect - the file mode, like fsGroup, and the result - can be other mode bits set.' - format: int32 - type: integer - path: - description: The relative path of the file to - map the key to. May not be an absolute path. - May not contain the path element '..'. May - not start with the string '..'. - type: string - required: - - key - - path - type: object - type: array - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the Secret or its key - must be defined - type: boolean - type: object tolerations: - description: 'Tolerations of a PgBackRest repo host pod. - Changing this value causes a restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration' + description: |- + Tolerations of pgBackRest backup Job pods. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration items: - description: The pod this Toleration is attached to - tolerates any taint that matches the triple - using the matching operator . + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . properties: effect: - description: Effect indicates the taint effect to - match. Empty means match all taint effects. When - specified, allowed values are NoSchedule, PreferNoSchedule - and NoExecute. + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. type: string key: - description: Key is the taint key that the toleration - applies to. Empty means match all taint keys. - If the key is empty, operator must be Exists; - this combination means to match all values and - all keys. + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. type: string operator: - description: Operator represents a key's relationship - to the value. Valid operators are Exists and Equal. - Defaults to Equal. Exists is equivalent to wildcard - for value, so that a pod can tolerate all taints - of a particular category. + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. type: string tolerationSeconds: - description: TolerationSeconds represents the period - of time the toleration (which must be of effect - NoExecute, otherwise this field is ignored) tolerates - the taint. By default, it is not set, which means - tolerate the taint forever (do not evict). Zero - and negative values will be treated as 0 (evict - immediately) by the system. + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. format: int64 type: integer value: - description: Value is the taint value the toleration - matches to. If the operator is Exists, the value - should be empty, otherwise just a regular string. + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. type: string type: object type: array + ttlSecondsAfterFinished: + description: |- + Limit the lifetime of a Job that has finished. + More info: https://kubernetes.io/docs/concepts/workloads/controllers/job + format: int32 + minimum: 60 + type: integer type: object - repos: - description: Defines a pgBackRest repository - items: - description: PGBackRestRepo represents a pgBackRest repository. Only - one of its members may be specified. - properties: - azure: - description: Represents a pgBackRest repository that - is created using Azure storage - properties: - container: - description: The Azure container utilized for the - repository - type: string - required: - - container - type: object - gcs: - description: Represents a pgBackRest repository that - is created using Google Cloud Storage - properties: - bucket: - description: The GCS bucket utilized for the repository - type: string - required: - - bucket - type: object - name: - description: The name of the the repository - pattern: ^repo[1-4] + manual: + description: Defines details for manual pgBackRest backup + Jobs + properties: + options: + description: |- + Command line options to include when running the pgBackRest backup command. + https://pgbackrest.org/command.html#command-backup + items: type: string - s3: - description: RepoS3 represents a pgBackRest repository - that is created using AWS S3 (or S3-compatible) storage - properties: - bucket: - description: The S3 bucket utilized for the repository - type: string - endpoint: - description: A valid endpoint corresponding to the - specified region - type: string - region: - description: The region corresponding to the S3 - bucket - type: string - required: - - bucket - - endpoint - - region - type: object - schedules: - description: 'Defines the schedules for the pgBackRest - backups Full, Differential and Incremental backup - types are supported: https://pgbackrest.org/user-guide.html#concept/backup' - properties: - differential: - description: 'Defines the Cron schedule for a differential - pgBackRest backup. Follows the standard Cron schedule - syntax: https://k8s.io/docs/concepts/workloads/controllers/cron-jobs/#cron-schedule-syntax' - minLength: 6 - type: string - full: - description: 'Defines the Cron schedule for a full - pgBackRest backup. Follows the standard Cron schedule - syntax: https://k8s.io/docs/concepts/workloads/controllers/cron-jobs/#cron-schedule-syntax' - minLength: 6 - type: string - incremental: - description: 'Defines the Cron schedule for an incremental - pgBackRest backup. Follows the standard Cron schedule - syntax: https://k8s.io/docs/concepts/workloads/controllers/cron-jobs/#cron-schedule-syntax' - minLength: 6 - type: string - type: object - volume: - description: Represents a pgBackRest repository that - is created using a PersistentVolumeClaim - properties: - volumeClaimSpec: - description: Defines a PersistentVolumeClaim spec - used to create and/or bind a volume - properties: - accessModes: - description: 'AccessModes contains the desired - access modes the volume should have. More - info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' - items: - type: string - type: array - dataSource: - description: 'This field can be used to specify - either: * An existing VolumeSnapshot object - (snapshot.storage.k8s.io/VolumeSnapshot) * - An existing PVC (PersistentVolumeClaim) * - An existing custom resource that implements - data population (Alpha) In order to use custom - resource types that implement data population, - the AnyVolumeDataSource feature gate must - be enabled. If the provisioner or an external - controller can support the specified data - source, it will create a new volume based - on the contents of the specified data source.' - properties: - apiGroup: - description: APIGroup is the group for the - resource being referenced. If APIGroup - is not specified, the specified Kind must - be in the core API group. For any other - third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource - being referenced - type: string - name: - description: Name is the name of resource - being referenced - type: string - required: - - kind - - name - type: object - resources: - description: 'Resources represents the minimum - resources the volume should have. More info: - https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum - amount of compute resources allowed. More - info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum - amount of compute resources required. - If Requests is omitted for a container, - it defaults to Limits if that is explicitly - specified, otherwise to an implementation-defined - value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - type: object - selector: - description: A label query over volumes to consider - for binding. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The requirements - are ANDed. - items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. - properties: - key: - description: key is the label key - that the selector applies to. - type: string - operator: - description: operator represents a - key's relationship to a set of values. - Valid operators are In, NotIn, Exists - and DoesNotExist. - type: string - values: - description: values is an array of - string values. If the operator is - In or NotIn, the values array must - be non-empty. If the operator is - Exists or DoesNotExist, the values - array must be empty. This array - is replaced during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. - type: object - type: object - storageClassName: - description: 'Name of the StorageClass required - by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' - type: string - volumeMode: - description: volumeMode defines what type of - volume is required by the claim. Value of - Filesystem is implied when not included in - claim spec. - type: string - volumeName: - description: VolumeName is the binding reference - to the PersistentVolume backing this claim. - type: string - type: object - required: - - volumeClaimSpec - type: object - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - restore: - description: Defines details for performing an in-place restore - using pgBackRest + type: array + repoName: + description: The name of the pgBackRest repo to run the + backup command against. + pattern: ^repo[1-4] + type: string + required: + - repoName + type: object + metadata: + description: Metadata contains metadata for custom resources + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + repoHost: + description: |- + Defines configuration for a pgBackRest dedicated repository host. This section is only + applicable if at least one "volume" (i.e. PVC-based) repository is defined in the "repos" + section, therefore enabling a dedicated repository host Deployment. properties: affinity: - description: 'Scheduling constraints of the pgBackRest - restore Job. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node' + description: |- + Scheduling constraints of the Dedicated repo host pod. + Changing this value causes repo host to restart. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node properties: nodeAffinity: description: Describes node affinity scheduling rules for the pod. properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule - pods to nodes that satisfy the affinity expressions - specified by this field, but it may choose a - node that violates one or more of the expressions. - The node that is most preferred is the one with - the greatest sum of weights, i.e. for each node - that meets all of the scheduling requirements - (resource request, requiredDuringScheduling - affinity expressions, etc.), compute a sum by - iterating through the elements of this field - and adding "weight" to the sum if the node matches - the corresponding matchExpressions; the node(s) - with the highest sum are the most preferred. + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. items: - description: An empty preferred scheduling term - matches all objects with implicit weight 0 - (i.e. it's a no-op). A null preferred scheduling - term matches no objects (i.e. is also a no-op). + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). properties: preference: description: A node selector term, associated @@ -1466,85 +1511,72 @@ spec: description: A list of node selector requirements by node's labels. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's - relationship to a set of values. - Valid operators are In, NotIn, - Exists, DoesNotExist. Gt, and - Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string - values. If the operator is In - or NotIn, the values array must - be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - If the operator is Gt or Lt, - the values array must have a - single element, which will be - interpreted as an integer. This - array is replaced during a strategic - merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's - relationship to a set of values. - Valid operators are In, NotIn, - Exists, DoesNotExist. Gt, and - Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string - values. If the operator is In - or NotIn, the values array must - be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - If the operator is Gt or Lt, - the values array must have a - single element, which will be - interpreted as an integer. This - array is replaced during a strategic - merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic weight: description: Weight associated with matching the corresponding nodeSelectorTerm, in @@ -1556,112 +1588,100 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified - by this field are not met at scheduling time, - the pod will not be scheduled onto the node. - If the affinity requirements specified by this - field cease to be met at some point during pod - execution (e.g. due to an update), the system - may or may not try to eventually evict the pod - from its node. + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. properties: nodeSelectorTerms: description: Required. A list of node selector terms. The terms are ORed. items: - description: A null or empty node selector - term matches no objects. The requirements - of them are ANDed. The TopologySelectorTerm - type implements a subset of the NodeSelectorTerm. + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. properties: matchExpressions: description: A list of node selector requirements by node's labels. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's - relationship to a set of values. - Valid operators are In, NotIn, - Exists, DoesNotExist. Gt, and - Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string - values. If the operator is In - or NotIn, the values array must - be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - If the operator is Gt or Lt, - the values array must have a - single element, which will be - interpreted as an integer. This - array is replaced during a strategic - merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's - relationship to a set of values. - Valid operators are In, NotIn, - Exists, DoesNotExist. Gt, and - Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string - values. If the operator is In - or NotIn, the values array must - be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - If the operator is Gt or Lt, - the values array must have a - single element, which will be - interpreted as an integer. This - array is replaced during a strategic - merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object + x-kubernetes-map-type: atomic type: object podAffinity: description: Describes pod affinity scheduling rules @@ -1669,20 +1689,16 @@ spec: etc. as some other pod(s)). properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule - pods to nodes that satisfy the affinity expressions - specified by this field, but it may choose a - node that violates one or more of the expressions. - The node that is most preferred is the one with - the greatest sum of weights, i.e. for each node - that meets all of the scheduling requirements - (resource request, requiredDuringScheduling - affinity expressions, etc.), compute a sum by - iterating through the elements of this field - and adding "weight" to the sum if the node has - pods which matches the corresponding podAffinityTerm; - the node(s) with the highest sum are the most - preferred. + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node @@ -1693,19 +1709,101 @@ spec: associated with the corresponding weight. properties: labelSelector: - description: A label query over a set - of resources, in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is + a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector - requirement is a selector that - contains values, a key, and - an operator that relates the - key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -1713,72 +1811,61 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to - a set of values. Valid operators - are In, NotIn, Exists and - DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an - array of string values. - If the operator is In or - NotIn, the values array - must be non-empty. If the - operator is Exists or DoesNotExist, - the values array must be - empty. This array is replaced - during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map - of {key,value} pairs. A single - {key,value} in the matchLabels - map is equivalent to an element - of matchExpressions, whose key - field is "key", the operator is - "In", and the values array contains - only "value". The requirements - are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies which - namespaces the labelSelector applies - to (matches against); null or empty - list means "this pod's namespace" + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where - co-located is defined as running on - a node whose value of the label with - key topologyKey matches that of any - node on which any of the selected - pods is running. Empty topologyKey - is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object weight: - description: weight associated with matching - the corresponding podAffinityTerm, in - the range 1-100. + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. format: int32 type: integer required: @@ -1786,41 +1873,38 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified - by this field are not met at scheduling time, - the pod will not be scheduled onto the node. - If the affinity requirements specified by this - field cease to be met at some point during pod - execution (e.g. due to a pod label update), - the system may or may not try to eventually - evict the pod from its node. When there are - multiple elements, the lists of nodes corresponding - to each podAffinityTerm are intersected, i.e. - all terms must be satisfied. + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. items: - description: Defines a set of pods (namely those - matching the labelSelector relative to the - given namespace(s)) that this pod should be - co-located (affinity) or not co-located (anti-affinity) - with, where co-located is defined as running - on a node whose value of the label with key - matches that of any node on - which a pod of the set of pods is running + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running properties: labelSelector: - description: A label query over a set of - resources, in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -1828,88 +1912,162 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object - namespaces: - description: namespaces specifies which - namespaces the labelSelector applies to - (matches against); null or empty list - means "this pod's namespace" + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. items: type: string type: array - topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where co-located - is defined as running on a node whose - value of the label with key topologyKey - matches that of any node on which any - of the selected pods is running. Empty - topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - description: Describes pod anti-affinity scheduling - rules (e.g. avoid putting this pod in the same node, - zone, etc. as some other pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule - pods to nodes that satisfy the anti-affinity - expressions specified by this field, but it - may choose a node that violates one or more - of the expressions. The node that is most preferred - is the one with the greatest sum of weights, - i.e. for each node that meets all of the scheduling - requirements (resource request, requiredDuringScheduling - anti-affinity expressions, etc.), compute a - sum by iterating through the elements of this - field and adding "weight" to the sum if the - node has pods which matches the corresponding - podAffinityTerm; the node(s) with the highest - sum are the most preferred. - items: - description: The weights of all of the matched - WeightedPodAffinityTerm fields are added per-node + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling + rules (e.g. avoid putting this pod in the same node, + zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched + WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) properties: podAffinityTerm: @@ -1917,19 +2075,101 @@ spec: associated with the corresponding weight. properties: labelSelector: - description: A label query over a set - of resources, in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is + a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector - requirement is a selector that - contains values, a key, and - an operator that relates the - key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -1937,72 +2177,61 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to - a set of values. Valid operators - are In, NotIn, Exists and - DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an - array of string values. - If the operator is In or - NotIn, the values array - must be non-empty. If the - operator is Exists or DoesNotExist, - the values array must be - empty. This array is replaced - during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map - of {key,value} pairs. A single - {key,value} in the matchLabels - map is equivalent to an element - of matchExpressions, whose key - field is "key", the operator is - "In", and the values array contains - only "value". The requirements - are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies which - namespaces the labelSelector applies - to (matches against); null or empty - list means "this pod's namespace" + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where - co-located is defined as running on - a node whose value of the label with - key topologyKey matches that of any - node on which any of the selected - pods is running. Empty topologyKey - is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object weight: - description: weight associated with matching - the corresponding podAffinityTerm, in - the range 1-100. + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. format: int32 type: integer required: @@ -2010,41 +2239,121 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements - specified by this field are not met at scheduling - time, the pod will not be scheduled onto the - node. If the anti-affinity requirements specified - by this field cease to be met at some point - during pod execution (e.g. due to a pod label - update), the system may or may not try to eventually - evict the pod from its node. When there are - multiple elements, the lists of nodes corresponding - to each podAffinityTerm are intersected, i.e. - all terms must be satisfied. + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. items: - description: Defines a set of pods (namely those - matching the labelSelector relative to the - given namespace(s)) that this pod should be - co-located (affinity) or not co-located (anti-affinity) - with, where co-located is defined as running - on a node whose value of the label with key - matches that of any node on - which a pod of the set of pods is running + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running properties: labelSelector: - description: A label query over a set of - resources, in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -2052,100 +2361,97 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies which - namespaces the labelSelector applies to - (matches against); null or empty list - means "this pod's namespace" + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where co-located - is defined as running on a node whose - value of the label with key topologyKey - matches that of any node on which any - of the selected pods is running. Empty - topologyKey is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object type: object - clusterName: - description: The name of an existing PostgresCluster to - use as the data source for the new PostgresCluster. - Defaults to the name of the PostgresCluster being created - if not provided. - type: string - clusterNamespace: - description: The namespace of the cluster specified as - the data source using the clusterName field. Defaults - to the namespace of the PostgresCluster being created - if not provided. - type: string - enabled: - default: false - description: Whether or not in-place pgBackRest restores - are enabled for this PostgresCluster. - type: boolean - options: - description: Command line options to include when running - the pgBackRest restore command. https://pgbackrest.org/command.html#command-restore - items: - type: string - type: array - repoName: - description: The name of the pgBackRest repo within the - source PostgresCluster that contains the backups that - should be utilized to perform a pgBackRest restore when - initializing the data source for the new PostgresCluster. - pattern: ^repo[1-4] + priorityClassName: + description: |- + Priority class name for the pgBackRest repo host pod. Changing this value + causes PostgreSQL to restart. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/ type: string resources: - description: Resource requirements for the pgBackRest - restore Job. + description: Resource requirements for a pgBackRest repository + host properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -2153,8 +2459,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount - of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -2163,2317 +2470,12566 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount - of compute resources required. If Requests is omitted - for a container, it defaults to Limits if that is - explicitly specified, otherwise to an implementation-defined - value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object + sshConfigMap: + description: |- + ConfigMap containing custom SSH configuration. + Deprecated: Repository hosts use mTLS for encryption, authentication, and authorization. + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + sshSecret: + description: |- + Secret containing custom SSH keys. + Deprecated: Repository hosts use mTLS for encryption, authentication, and authorization. + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify whether the Secret + or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic tolerations: - description: 'Tolerations of the pgBackRest restore Job. - More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration' + description: |- + Tolerations of a PgBackRest repo host pod. Changing this value causes a restart. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration items: - description: The pod this Toleration is attached to - tolerates any taint that matches the triple - using the matching operator . + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . properties: effect: - description: Effect indicates the taint effect to - match. Empty means match all taint effects. When - specified, allowed values are NoSchedule, PreferNoSchedule - and NoExecute. + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. type: string key: - description: Key is the taint key that the toleration - applies to. Empty means match all taint keys. - If the key is empty, operator must be Exists; - this combination means to match all values and - all keys. + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. type: string operator: - description: Operator represents a key's relationship - to the value. Valid operators are Exists and Equal. - Defaults to Equal. Exists is equivalent to wildcard - for value, so that a pod can tolerate all taints - of a particular category. + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. type: string tolerationSeconds: - description: TolerationSeconds represents the period - of time the toleration (which must be of effect - NoExecute, otherwise this field is ignored) tolerates - the taint. By default, it is not set, which means - tolerate the taint forever (do not evict). Zero - and negative values will be treated as 0 (evict - immediately) by the system. + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. format: int64 type: integer value: - description: Value is the taint value the toleration - matches to. If the operator is Exists, the value - should be empty, otherwise just a regular string. + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. type: string type: object type: array - required: - - enabled - - repoName + topologySpreadConstraints: + description: |- + Topology spread constraints of a Dedicated repo host pod. Changing this + value causes the repo host to restart. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + items: + description: TopologySpreadConstraint specifies how + to spread matching pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array type: object - type: object - required: - - pgbackrest - type: object - customReplicationTLSSecret: - description: 'The secret containing the replication client certificates - and keys for secure connections to the PostgreSQL server. It will - need to contain the client TLS certificate, TLS key and the Certificate - Authority certificate with the data keys set to tls.crt, tls.key - and ca.crt, respectively. NOTE: If CustomReplicationClientTLSSecret - is provided, CustomTLSSecret MUST be provided and the ca.crt provided - must be the same.' - properties: - items: - description: If unspecified, each key-value pair in the Data field - of the referenced Secret will be projected into the volume as - a file whose name is the key and content is the value. If specified, - the listed keys will be projected into the specified paths, - and unlisted keys will not be present. If a key is specified - which is not present in the Secret, the volume setup will error - unless it is marked optional. Paths must be relative and may - not contain the '..' path or start with '..'. - items: - description: Maps a string key to a path within a volume. - properties: - key: - description: The key to project. - type: string - mode: - description: 'Optional: mode bits used to set permissions - on this file. Must be an octal value between 0000 and - 0777 or a decimal value between 0 and 511. YAML accepts - both octal and decimal values, JSON requires decimal values - for mode bits. If not specified, the volume defaultMode - will be used. This might be in conflict with other options - that affect the file mode, like fsGroup, and the result - can be other mode bits set.' - format: int32 - type: integer - path: - description: The relative path of the file to map the key - to. May not be an absolute path. May not contain the path - element '..'. May not start with the string '..'. - type: string - required: - - key - - path - type: object - type: array - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must be defined - type: boolean - type: object - customTLSSecret: - description: 'The secret containing the Certificates and Keys to encrypt - PostgreSQL traffic will need to contain the server TLS certificate, - TLS key and the Certificate Authority certificate with the data - keys set to tls.crt, tls.key and ca.crt, respectively. It will then - be mounted as a volume projection to the ''/pgconf/tls'' directory. - For more information on Kubernetes secret projections, please see - https://k8s.io/docs/concepts/configuration/secret/#projection-of-secret-keys-to-specific-paths - NOTE: If CustomTLSSecret is provided, CustomReplicationClientTLSSecret - MUST be provided and the ca.crt provided must be the same.' - properties: - items: - description: If unspecified, each key-value pair in the Data field - of the referenced Secret will be projected into the volume as - a file whose name is the key and content is the value. If specified, - the listed keys will be projected into the specified paths, - and unlisted keys will not be present. If a key is specified - which is not present in the Secret, the volume setup will error - unless it is marked optional. Paths must be relative and may - not contain the '..' path or start with '..'. - items: - description: Maps a string key to a path within a volume. - properties: - key: - description: The key to project. - type: string - mode: - description: 'Optional: mode bits used to set permissions - on this file. Must be an octal value between 0000 and - 0777 or a decimal value between 0 and 511. YAML accepts - both octal and decimal values, JSON requires decimal values - for mode bits. If not specified, the volume defaultMode - will be used. This might be in conflict with other options - that affect the file mode, like fsGroup, and the result - can be other mode bits set.' - format: int32 - type: integer - path: - description: The relative path of the file to map the key - to. May not be an absolute path. May not contain the path - element '..'. May not start with the string '..'. - type: string - required: - - key - - path - type: object - type: array - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must be defined - type: boolean - type: object - dataSource: - description: Specifies a data source for bootstrapping the PostgreSQL - cluster. - properties: - postgresCluster: - description: Defines a pgBackRest data source that can be used - to pre-populate the PostgreSQL data directory for a new PostgreSQL - cluster using a pgBackRest restore. - properties: - affinity: - description: 'Scheduling constraints of the pgBackRest restore - Job. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node' - properties: - nodeAffinity: - description: Describes node affinity scheduling rules - for the pod. - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule - pods to nodes that satisfy the affinity expressions - specified by this field, but it may choose a node - that violates one or more of the expressions. The - node that is most preferred is the one with the - greatest sum of weights, i.e. for each node that - meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, - etc.), compute a sum by iterating through the elements - of this field and adding "weight" to the sum if - the node matches the corresponding matchExpressions; - the node(s) with the highest sum are the most preferred. - items: - description: An empty preferred scheduling term - matches all objects with implicit weight 0 (i.e. - it's a no-op). A null preferred scheduling term - matches no objects (i.e. is also a no-op). + repos: + description: Defines a pgBackRest repository + items: + description: PGBackRestRepo represents a pgBackRest repository. Only + one of its members may be specified. + properties: + azure: + description: Represents a pgBackRest repository that + is created using Azure storage + properties: + container: + description: The Azure container utilized for the + repository + type: string + required: + - container + type: object + gcs: + description: Represents a pgBackRest repository that + is created using Google Cloud Storage + properties: + bucket: + description: The GCS bucket utilized for the repository + type: string + required: + - bucket + type: object + name: + description: The name of the repository + pattern: ^repo[1-4] + type: string + s3: + description: |- + RepoS3 represents a pgBackRest repository that is created using AWS S3 (or S3-compatible) + storage + properties: + bucket: + description: The S3 bucket utilized for the repository + type: string + endpoint: + description: A valid endpoint corresponding to the + specified region + type: string + region: + description: The region corresponding to the S3 + bucket + type: string + required: + - bucket + - endpoint + - region + type: object + schedules: + description: |- + Defines the schedules for the pgBackRest backups + Full, Differential and Incremental backup types are supported: + https://pgbackrest.org/user-guide.html#concept/backup + properties: + differential: + description: |- + Defines the Cron schedule for a differential pgBackRest backup. + Follows the standard Cron schedule syntax: + https://k8s.io/docs/concepts/workloads/controllers/cron-jobs/#cron-schedule-syntax + minLength: 6 + type: string + full: + description: |- + Defines the Cron schedule for a full pgBackRest backup. + Follows the standard Cron schedule syntax: + https://k8s.io/docs/concepts/workloads/controllers/cron-jobs/#cron-schedule-syntax + minLength: 6 + type: string + incremental: + description: |- + Defines the Cron schedule for an incremental pgBackRest backup. + Follows the standard Cron schedule syntax: + https://k8s.io/docs/concepts/workloads/controllers/cron-jobs/#cron-schedule-syntax + minLength: 6 + type: string + type: object + volume: + description: Represents a pgBackRest repository that + is created using a PersistentVolumeClaim + properties: + volumeClaimSpec: + description: Defines a PersistentVolumeClaim spec + used to create and/or bind a volume properties: - preference: - description: A node selector term, associated - with the corresponding weight. + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over + volumes to consider for binding. properties: matchExpressions: - description: A list of node selector requirements - by node's labels. - items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. - properties: - key: - description: The label key that the - selector applies to. - type: string - operator: - description: Represents a key's relationship - to a set of values. Valid operators - are In, NotIn, Exists, DoesNotExist. - Gt, and Lt. - type: string - values: - description: An array of string values. - If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. - If the operator is Gt or Lt, the - values array must have a single - element, which will be interpreted - as an integer. This array is replaced - during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements - by node's fields. + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: - description: The label key that the - selector applies to. + description: key is the label key + that the selector applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators - are In, NotIn, Exists, DoesNotExist. - Gt, and Lt. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: An array of string values. - If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. - If the operator is Gt or Lt, the - values array must have a single - element, which will be interpreted - as an integer. This array is replaced - during a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object type: object - weight: - description: Weight associated with matching - the corresponding nodeSelectorTerm, in the - range 1-100. - format: int32 - type: integer - required: - - preference - - weight + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified - by this field are not met at scheduling time, the - pod will not be scheduled onto the node. If the - affinity requirements specified by this field cease - to be met at some point during pod execution (e.g. - due to an update), the system may or may not try - to eventually evict the pod from its node. + x-kubernetes-validations: + - message: missing accessModes + rule: has(self.accessModes) && size(self.accessModes) + > 0 + - message: missing storage request + rule: has(self.resources) && has(self.resources.requests) + && has(self.resources.requests.storage) + required: + - volumeClaimSpec + type: object + required: + - name + type: object + minItems: 1 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + restore: + description: Defines details for performing an in-place restore + using pgBackRest + properties: + affinity: + description: |- + Scheduling constraints of the pgBackRest restore Job. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node + properties: + nodeAffinity: + description: Describes node affinity scheduling rules + for the pod. properties: - nodeSelectorTerms: - description: Required. A list of node selector - terms. The terms are ORed. + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. items: - description: A null or empty node selector term - matches no objects. The requirements of them - are ANDed. The TopologySelectorTerm type implements - a subset of the NodeSelectorTerm. + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). properties: - matchExpressions: - description: A list of node selector requirements - by node's labels. - items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. - properties: - key: - description: The label key that the - selector applies to. - type: string - operator: - description: Represents a key's relationship - to a set of values. Valid operators - are In, NotIn, Exists, DoesNotExist. - Gt, and Lt. - type: string - values: - description: An array of string values. - If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. - If the operator is Gt or Lt, the - values array must have a single - element, which will be interpreted - as an integer. This array is replaced - during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements - by node's fields. - items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. - properties: - key: - description: The label key that the - selector applies to. - type: string - operator: - description: Represents a key's relationship - to a set of values. Valid operators - are In, NotIn, Exists, DoesNotExist. - Gt, and Lt. - type: string - values: - description: An array of string values. - If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. - If the operator is Gt or Lt, the - values array must have a single - element, which will be interpreted - as an integer. This array is replaced - during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - type: array - required: - - nodeSelectorTerms - type: object - type: object - podAffinity: - description: Describes pod affinity scheduling rules (e.g. - co-locate this pod in the same node, zone, etc. as some - other pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule - pods to nodes that satisfy the affinity expressions - specified by this field, but it may choose a node - that violates one or more of the expressions. The - node that is most preferred is the one with the - greatest sum of weights, i.e. for each node that - meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, - etc.), compute a sum by iterating through the elements - of this field and adding "weight" to the sum if - the node has pods which matches the corresponding - podAffinityTerm; the node(s) with the highest sum - are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm - fields are added per-node to find the most preferred - node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, - associated with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of - resources, in this case pods. + preference: + description: A node selector term, associated + with the corresponding weight. properties: matchExpressions: - description: matchExpressions is a list - of label selector requirements. The - requirements are ANDed. + description: A list of node selector + requirements by node's labels. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: - description: key is the label - key that the selector applies - to. + description: The label key that + the selector applies to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. - type: object + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector + requirements by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic type: object - namespaces: - description: namespaces specifies which - namespaces the labelSelector applies to - (matches against); null or empty list - means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where co-located - is defined as running on a node whose - value of the label with key topologyKey - matches that of any node on which any - of the selected pods is running. Empty - topologyKey is not allowed. - type: string + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching + the corresponding nodeSelectorTerm, in + the range 1-100. + format: int32 + type: integer required: - - topologyKey - type: object - weight: - description: weight associated with matching - the corresponding podAffinityTerm, in the - range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified - by this field are not met at scheduling time, the - pod will not be scheduled onto the node. If the - affinity requirements specified by this field cease - to be met at some point during pod execution (e.g. - due to a pod label update), the system may or may - not try to eventually evict the pod from its node. - When there are multiple elements, the lists of nodes - corresponding to each podAffinityTerm are intersected, - i.e. all terms must be satisfied. - items: - description: Defines a set of pods (namely those - matching the labelSelector relative to the given - namespace(s)) that this pod should be co-located - (affinity) or not co-located (anti-affinity) with, - where co-located is defined as running on a node - whose value of the label with key - matches that of any node on which a pod of the - set of pods is running - properties: - labelSelector: - description: A label query over a set of resources, - in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The requirements - are ANDed. - items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. - properties: - key: - description: key is the label key - that the selector applies to. - type: string - operator: - description: operator represents a - key's relationship to a set of values. - Valid operators are In, NotIn, Exists - and DoesNotExist. - type: string - values: - description: values is an array of - string values. If the operator is - In or NotIn, the values array must - be non-empty. If the operator is - Exists or DoesNotExist, the values - array must be empty. This array - is replaced during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. - type: object + - preference + - weight type: object - namespaces: - description: namespaces specifies which namespaces - the labelSelector applies to (matches against); - null or empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the - pods matching the labelSelector in the specified - namespaces, where co-located is defined as - running on a node whose value of the label - with key topologyKey matches that of any node - on which any of the selected pods is running. - Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - description: Describes pod anti-affinity scheduling rules - (e.g. avoid putting this pod in the same node, zone, - etc. as some other pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule - pods to nodes that satisfy the anti-affinity expressions - specified by this field, but it may choose a node - that violates one or more of the expressions. The - node that is most preferred is the one with the - greatest sum of weights, i.e. for each node that - meets all of the scheduling requirements (resource - request, requiredDuringScheduling anti-affinity - expressions, etc.), compute a sum by iterating through - the elements of this field and adding "weight" to - the sum if the node has pods which matches the corresponding - podAffinityTerm; the node(s) with the highest sum - are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm - fields are added per-node to find the most preferred - node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, - associated with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of - resources, in this case pods. + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector + terms. The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. properties: matchExpressions: - description: matchExpressions is a list - of label selector requirements. The - requirements are ANDed. + description: A list of node selector + requirements by node's labels. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: - description: key is the label - key that the selector applies - to. + description: The label key that + the selector applies to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. - type: object + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector + requirements by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic type: object - namespaces: - description: namespaces specifies which - namespaces the labelSelector applies to - (matches against); null or empty list - means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where co-located - is defined as running on a node whose - value of the label with key topologyKey - matches that of any node on which any - of the selected pods is running. Empty - topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching - the corresponding podAffinityTerm, in the - range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements specified - by this field are not met at scheduling time, the - pod will not be scheduled onto the node. If the - anti-affinity requirements specified by this field - cease to be met at some point during pod execution - (e.g. due to a pod label update), the system may - or may not try to eventually evict the pod from - its node. When there are multiple elements, the - lists of nodes corresponding to each podAffinityTerm - are intersected, i.e. all terms must be satisfied. - items: - description: Defines a set of pods (namely those - matching the labelSelector relative to the given - namespace(s)) that this pod should be co-located - (affinity) or not co-located (anti-affinity) with, - where co-located is defined as running on a node - whose value of the label with key - matches that of any node on which a pod of the - set of pods is running - properties: - labelSelector: - description: A label query over a set of resources, - in this case pods. + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules + (e.g. co-locate this pod in the same node, zone, + etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched + WeightedPodAffinityTerm fields are added per-node + to find the most preferred node(s) properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The requirements - are ANDed. - items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. - properties: - key: - description: key is the label key - that the selector applies to. - type: string - operator: - description: operator represents a - key's relationship to a set of values. - Valid operators are In, NotIn, Exists - and DoesNotExist. - type: string - values: - description: values is an array of - string values. If the operator is - In or NotIn, the values array must - be non-empty. If the operator is - Exists or DoesNotExist, the values - array must be empty. This array - is replaced during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies which namespaces - the labelSelector applies to (matches against); - null or empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the - pods matching the labelSelector in the specified - namespaces, where co-located is defined as - running on a node whose value of the label - with key topologyKey matches that of any node - on which any of the selected pods is running. - Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - type: object - clusterName: - description: The name of an existing PostgresCluster to use - as the data source for the new PostgresCluster. Defaults - to the name of the PostgresCluster being created if not - provided. - type: string - clusterNamespace: - description: The namespace of the cluster specified as the - data source using the clusterName field. Defaults to the - namespace of the PostgresCluster being created if not provided. - type: string - options: - description: Command line options to include when running - the pgBackRest restore command. https://pgbackrest.org/command.html#command-restore - items: - type: string - type: array - repoName: - description: The name of the pgBackRest repo within the source - PostgresCluster that contains the backups that should be - utilized to perform a pgBackRest restore when initializing - the data source for the new PostgresCluster. - pattern: ^repo[1-4] - type: string - resources: - description: Resource requirements for the pgBackRest restore - Job. - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of - compute resources required. If Requests is omitted for - a container, it defaults to Limits if that is explicitly - specified, otherwise to an implementation-defined value. - More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - type: object - tolerations: - description: 'Tolerations of the pgBackRest restore Job. More - info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration' - items: - description: The pod this Toleration is attached to tolerates - any taint that matches the triple using - the matching operator . - properties: - effect: - description: Effect indicates the taint effect to match. - Empty means match all taint effects. When specified, - allowed values are NoSchedule, PreferNoSchedule and - NoExecute. - type: string - key: - description: Key is the taint key that the toleration - applies to. Empty means match all taint keys. If the - key is empty, operator must be Exists; this combination - means to match all values and all keys. - type: string - operator: - description: Operator represents a key's relationship - to the value. Valid operators are Exists and Equal. - Defaults to Equal. Exists is equivalent to wildcard - for value, so that a pod can tolerate all taints of - a particular category. - type: string - tolerationSeconds: - description: TolerationSeconds represents the period - of time the toleration (which must be of effect NoExecute, - otherwise this field is ignored) tolerates the taint. - By default, it is not set, which means tolerate the - taint forever (do not evict). Zero and negative values - will be treated as 0 (evict immediately) by the system. - format: int64 - type: integer - value: - description: Value is the taint value the toleration - matches to. If the operator is Exists, the value should - be empty, otherwise just a regular string. - type: string - type: object - type: array - required: - - repoName - type: object - type: object - image: - description: The image name to use for PostgreSQL containers. When - omitted, the value comes from an operator environment variable. - For standard PostgreSQL images, the format is RELATED_IMAGE_POSTGRES_{postgresVersion}, - e.g. RELATED_IMAGE_POSTGRES_13. For PostGIS enabled PostgreSQL images, - the format is RELATED_IMAGE_POSTGRES_{postgresVersion}_GIS_{postGISVersion}, - e.g. RELATED_IMAGE_POSTGRES_13_GIS_3.1. - type: string - imagePullSecrets: - description: The image pull secrets used to pull from a private registry - Changing this value causes all running pods to restart. https://k8s.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - items: - description: LocalObjectReference contains enough information to - let you locate the referenced object inside the same namespace. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - type: array - instances: - items: - properties: - affinity: - description: 'Scheduling constraints of a PostgreSQL pod. Changing - this value causes PostgreSQL to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node' - properties: - nodeAffinity: - description: Describes node affinity scheduling rules for - the pod. - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods - to nodes that satisfy the affinity expressions specified - by this field, but it may choose a node that violates - one or more of the expressions. The node that is most - preferred is the one with the greatest sum of weights, - i.e. for each node that meets all of the scheduling - requirements (resource request, requiredDuringScheduling - affinity expressions, etc.), compute a sum by iterating - through the elements of this field and adding "weight" - to the sum if the node matches the corresponding matchExpressions; - the node(s) with the highest sum are the most preferred. - items: - description: An empty preferred scheduling term matches - all objects with implicit weight 0 (i.e. it's a - no-op). A null preferred scheduling term matches - no objects (i.e. is also a no-op). - properties: - preference: - description: A node selector term, associated - with the corresponding weight. - properties: - matchExpressions: - description: A list of node selector requirements - by node's labels. - items: - description: A node selector requirement - is a selector that contains values, a - key, and an operator that relates the - key and values. + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. properties: - key: - description: The label key that the - selector applies to. - type: string - operator: - description: Represents a key's relationship - to a set of values. Valid operators - are In, NotIn, Exists, DoesNotExist. - Gt, and Lt. - type: string - values: - description: An array of string values. - If the operator is In or NotIn, the - values array must be non-empty. If - the operator is Exists or DoesNotExist, - the values array must be empty. If - the operator is Gt or Lt, the values - array must have a single element, - which will be interpreted as an integer. - This array is replaced during a strategic - merge patch. + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is + a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. items: type: string type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements - by node's fields. - items: - description: A node selector requirement - is a selector that contains values, a - key, and an operator that relates the - key and values. - properties: - key: - description: The label key that the - selector applies to. - type: string - operator: - description: Represents a key's relationship - to a set of values. Valid operators - are In, NotIn, Exists, DoesNotExist. - Gt, and Lt. - type: string - values: - description: An array of string values. - If the operator is In or NotIn, the - values array must be non-empty. If - the operator is Exists or DoesNotExist, - the values array must be empty. If - the operator is Gt or Lt, the values - array must have a single element, - which will be interpreted as an integer. - This array is replaced during a strategic - merge patch. + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. items: type: string type: array - required: - - key - - operator - type: object - type: array - type: object - weight: - description: Weight associated with matching the - corresponding nodeSelectorTerm, in the range - 1-100. - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified - by this field are not met at scheduling time, the - pod will not be scheduled onto the node. If the affinity - requirements specified by this field cease to be met - at some point during pod execution (e.g. due to an - update), the system may or may not try to eventually - evict the pod from its node. - properties: - nodeSelectorTerms: - description: Required. A list of node selector terms. - The terms are ORed. - items: - description: A null or empty node selector term - matches no objects. The requirements of them - are ANDed. The TopologySelectorTerm type implements - a subset of the NodeSelectorTerm. - properties: - matchExpressions: - description: A list of node selector requirements - by node's labels. - items: - description: A node selector requirement - is a selector that contains values, a - key, and an operator that relates the - key and values. - properties: - key: - description: The label key that the - selector applies to. - type: string - operator: - description: Represents a key's relationship - to a set of values. Valid operators - are In, NotIn, Exists, DoesNotExist. - Gt, and Lt. - type: string - values: - description: An array of string values. - If the operator is In or NotIn, the - values array must be non-empty. If - the operator is Exists or DoesNotExist, - the values array must be empty. If - the operator is Gt or Lt, the values - array must have a single element, - which will be interpreted as an integer. - This array is replaced during a strategic - merge patch. + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is + a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string required: - - key - - operator + - topologyKey type: object - type: array - matchFields: - description: A list of node selector requirements - by node's fields. - items: - description: A node selector requirement - is a selector that contains values, a - key, and an operator that relates the - key and values. - properties: - key: - description: The label key that the - selector applies to. - type: string - operator: - description: Represents a key's relationship - to a set of values. Valid operators - are In, NotIn, Exists, DoesNotExist. - Gt, and Lt. - type: string - values: - description: An array of string values. - If the operator is In or NotIn, the - values array must be non-empty. If - the operator is Exists or DoesNotExist, - the values array must be empty. If - the operator is Gt or Lt, the values - array must have a single element, - which will be interpreted as an integer. - This array is replaced during a strategic - merge patch. + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object type: array - required: - - key - - operator + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object type: object - type: array - type: object - type: array - required: - - nodeSelectorTerms - type: object - type: object - podAffinity: - description: Describes pod affinity scheduling rules (e.g. - co-locate this pod in the same node, zone, etc. as some - other pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods - to nodes that satisfy the affinity expressions specified - by this field, but it may choose a node that violates - one or more of the expressions. The node that is most - preferred is the one with the greatest sum of weights, - i.e. for each node that meets all of the scheduling - requirements (resource request, requiredDuringScheduling - affinity expressions, etc.), compute a sum by iterating - through the elements of this field and adding "weight" - to the sum if the node has pods which matches the - corresponding podAffinityTerm; the node(s) with the - highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm - fields are added per-node to find the most preferred - node(s) + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling + rules (e.g. avoid putting this pod in the same node, + zone, etc. as some other pod(s)). properties: - podAffinityTerm: - description: Required. A pod affinity term, associated - with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, - in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The - requirements are ANDed. - items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched + WeightedPodAffinityTerm fields are added per-node + to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: - key: - description: key is the label key - that the selector applies to. - type: string - operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. This - array is replaced during a strategic - merge patch. + matchExpressions: + description: matchExpressions is + a list of label selector requirements. + The requirements are ANDed. items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is + a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object type: array - required: - - key - - operator + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies which namespaces - the labelSelector applies to (matches against); - null or empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where co-located - is defined as running on a node whose value - of the label with key topologyKey matches - that of any node on which any of the selected - pods is running. Empty topologyKey is not - allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the - corresponding podAffinityTerm, in the range - 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified - by this field are not met at scheduling time, the - pod will not be scheduled onto the node. If the affinity - requirements specified by this field cease to be met - at some point during pod execution (e.g. due to a - pod label update), the system may or may not try to - eventually evict the pod from its node. When there - are multiple elements, the lists of nodes corresponding - to each podAffinityTerm are intersected, i.e. all - terms must be satisfied. - items: - description: Defines a set of pods (namely those matching - the labelSelector relative to the given namespace(s)) - that this pod should be co-located (affinity) or - not co-located (anti-affinity) with, where co-located - is defined as running on a node whose value of the - label with key matches that of any - node on which a pod of the set of pods is running - properties: - labelSelector: - description: A label query over a set of resources, - in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of - label selector requirements. The requirements - are ANDed. - items: - description: A label selector requirement - is a selector that contains values, a - key, and an operator that relates the - key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and - DoesNotExist. - type: string - values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. This - array is replaced during a strategic - merge patch. + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string required: - - key - - operator + - topologyKey type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is - "In", and the values array contains only - "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies which namespaces - the labelSelector applies to (matches against); - null or empty list means "this pod's namespace" - items: - type: string + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object type: array - topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods - matching the labelSelector in the specified - namespaces, where co-located is defined as running - on a node whose value of the label with key - topologyKey matches that of any node on which - any of the selected pods is running. Empty topologyKey - is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - description: Describes pod anti-affinity scheduling rules - (e.g. avoid putting this pod in the same node, zone, etc. - as some other pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods - to nodes that satisfy the anti-affinity expressions - specified by this field, but it may choose a node - that violates one or more of the expressions. The - node that is most preferred is the one with the greatest - sum of weights, i.e. for each node that meets all - of the scheduling requirements (resource request, - requiredDuringScheduling anti-affinity expressions, - etc.), compute a sum by iterating through the elements - of this field and adding "weight" to the sum if the - node has pods which matches the corresponding podAffinityTerm; - the node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm - fields are added per-node to find the most preferred - node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated - with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, - in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The - requirements are ANDed. - items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. - properties: - key: - description: key is the label key - that the selector applies to. - type: string - operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. This - array is replaced during a strategic - merge patch. - items: + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. type: string - type: array - required: - - key - - operator + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies which namespaces - the labelSelector applies to (matches against); - null or empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where co-located - is defined as running on a node whose value - of the label with key topologyKey matches - that of any node on which any of the selected - pods is running. Empty topologyKey is not - allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the - corresponding podAffinityTerm, in the range - 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements specified - by this field are not met at scheduling time, the - pod will not be scheduled onto the node. If the anti-affinity - requirements specified by this field cease to be met - at some point during pod execution (e.g. due to a - pod label update), the system may or may not try to - eventually evict the pod from its node. When there - are multiple elements, the lists of nodes corresponding - to each podAffinityTerm are intersected, i.e. all - terms must be satisfied. - items: - description: Defines a set of pods (namely those matching - the labelSelector relative to the given namespace(s)) - that this pod should be co-located (affinity) or - not co-located (anti-affinity) with, where co-located - is defined as running on a node whose value of the - label with key matches that of any - node on which a pod of the set of pods is running - properties: - labelSelector: - description: A label query over a set of resources, - in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of - label selector requirements. The requirements - are ANDed. - items: - description: A label selector requirement - is a selector that contains values, a - key, and an operator that relates the - key and values. + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and - DoesNotExist. - type: string - values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. This - array is replaced during a strategic - merge patch. + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. items: - type: string + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object type: array - required: - - key - - operator + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object type: object - type: array - matchLabels: - additionalProperties: + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is - "In", and the values array contains only - "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies which namespaces - the labelSelector applies to (matches against); - null or empty list means "this pod's namespace" - items: - type: string + required: + - topologyKey + type: object type: array - topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods - matching the labelSelector in the specified - namespaces, where co-located is defined as running - on a node whose value of the label with key - topologyKey matches that of any node on which - any of the selected pods is running. Empty topologyKey - is not allowed. - type: string - required: - - topologyKey + x-kubernetes-list-type: atomic type: object - type: array - type: object - type: object - dataVolumeClaimSpec: - description: 'Defines a PersistentVolumeClaim for PostgreSQL - data. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes' - properties: - accessModes: - description: 'AccessModes contains the desired access modes - the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' - items: + type: object + clusterName: + description: |- + The name of an existing PostgresCluster to use as the data source for the new PostgresCluster. + Defaults to the name of the PostgresCluster being created if not provided. type: string - type: array - dataSource: - description: 'This field can be used to specify either: - * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) - * An existing PVC (PersistentVolumeClaim) * An existing - custom resource that implements data population (Alpha) - In order to use custom resource types that implement data - population, the AnyVolumeDataSource feature gate must - be enabled. If the provisioner or an external controller - can support the specified data source, it will create - a new volume based on the contents of the specified data - source.' - properties: - apiGroup: - description: APIGroup is the group for the resource - being referenced. If APIGroup is not specified, the - specified Kind must be in the core API group. For - any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced + clusterNamespace: + description: |- + The namespace of the cluster specified as the data source using the clusterName field. + Defaults to the namespace of the PostgresCluster being created if not provided. + type: string + enabled: + default: false + description: Whether or not in-place pgBackRest restores + are enabled for this PostgresCluster. + type: boolean + options: + description: |- + Command line options to include when running the pgBackRest restore command. + https://pgbackrest.org/command.html#command-restore + items: type: string - required: - - kind - - name - type: object - resources: - description: 'Resources represents the minimum resources - the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of - compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount - of compute resources required. If Requests is omitted - for a container, it defaults to Limits if that is - explicitly specified, otherwise to an implementation-defined - value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - type: object - selector: - description: A label query over volumes to consider for - binding. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, - NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. - If the operator is In or NotIn, the values array - must be non-empty. If the operator is Exists - or DoesNotExist, the values array must be empty. - This array is replaced during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field - is "key", the operator is "In", and the values array - contains only "value". The requirements are ANDed. - type: object - type: object - storageClassName: - description: 'Name of the StorageClass required by the claim. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' - type: string - volumeMode: - description: volumeMode defines what type of volume is required - by the claim. Value of Filesystem is implied when not - included in claim spec. - type: string - volumeName: - description: VolumeName is the binding reference to the - PersistentVolume backing this claim. - type: string - type: object - metadata: - description: Metadata contains metadata for PostgresCluster - resources - properties: - annotations: - additionalProperties: + type: array + priorityClassName: + description: |- + Priority class name for the pgBackRest restore Job pod. Changing this + value causes PostgreSQL to restart. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/ type: string - type: object - labels: - additionalProperties: + repoName: + description: |- + The name of the pgBackRest repo within the source PostgresCluster that contains the backups + that should be utilized to perform a pgBackRest restore when initializing the data source + for the new PostgresCluster. + pattern: ^repo[1-4] type: string - type: object - type: object - name: - default: "" - type: string - replicas: - default: 1 - format: int32 - minimum: 1 - type: integer - resources: - description: Compute resources of a PostgreSQL container. - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. More info: - https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - type: object - tolerations: - description: 'Tolerations of a PostgreSQL pod. Changing this - value causes PostgreSQL to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration' - items: - description: The pod this Toleration is attached to tolerates - any taint that matches the triple using - the matching operator . + resources: + description: Resource requirements for the pgBackRest + restore Job. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + tolerations: + description: |- + Tolerations of the pgBackRest restore Job. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + required: + - enabled + - repoName + type: object + sidecars: + description: Configuration for pgBackRest sidecar containers properties: - effect: - description: Effect indicates the taint effect to match. - Empty means match all taint effects. When specified, - allowed values are NoSchedule, PreferNoSchedule and - NoExecute. - type: string - key: - description: Key is the taint key that the toleration - applies to. Empty means match all taint keys. If the - key is empty, operator must be Exists; this combination - means to match all values and all keys. - type: string - operator: - description: Operator represents a key's relationship - to the value. Valid operators are Exists and Equal. - Defaults to Equal. Exists is equivalent to wildcard - for value, so that a pod can tolerate all taints of - a particular category. - type: string - tolerationSeconds: - description: TolerationSeconds represents the period of - time the toleration (which must be of effect NoExecute, - otherwise this field is ignored) tolerates the taint. - By default, it is not set, which means tolerate the - taint forever (do not evict). Zero and negative values - will be treated as 0 (evict immediately) by the system. - format: int64 - type: integer - value: - description: Value is the taint value the toleration matches - to. If the operator is Exists, the value should be empty, - otherwise just a regular string. - type: string + pgbackrest: + description: Defines the configuration for the pgBackRest + sidecar container + properties: + resources: + description: Resource requirements for a sidecar container + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + type: object + pgbackrestConfig: + description: Defines the configuration for the pgBackRest + config sidecar container + properties: + resources: + description: Resource requirements for a sidecar container + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + type: object type: object - type: array - walVolumeClaimSpec: - description: 'Defines a separate PersistentVolumeClaim for PostgreSQL''s - write-ahead log. More info: https://www.postgresql.org/docs/current/wal.html' + required: + - repos + type: object + snapshots: + description: VolumeSnapshot configuration + properties: + volumeSnapshotClassName: + description: Name of the VolumeSnapshotClass that should be + used by VolumeSnapshots + minLength: 1 + type: string + required: + - volumeSnapshotClassName + type: object + type: object + config: + properties: + files: + items: + description: Projection that may be projected along with other + supported volume types properties: - accessModes: - description: 'AccessModes contains the desired access modes - the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' - items: - type: string - type: array - dataSource: - description: 'This field can be used to specify either: - * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) - * An existing PVC (PersistentVolumeClaim) * An existing - custom resource that implements data population (Alpha) - In order to use custom resource types that implement data - population, the AnyVolumeDataSource feature gate must - be enabled. If the provisioner or an external controller - can support the specified data source, it will create - a new volume based on the contents of the specified data - source.' + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. properties: - apiGroup: - description: APIGroup is the group for the resource - being referenced. If APIGroup is not specified, the - specified Kind must be in the core API group. For - any other third-party types, APIGroup is required. + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. type: string - kind: - description: Kind is the type of resource being referenced + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume root to write + the bundle. type: string - name: - description: Name is the name of resource being referenced + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. type: string required: - - kind - - name + - path type: object - resources: - description: 'Resources represents the minimum resources - the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + configMap: + description: configMap information about the configMap data + to project properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of - compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount - of compute resources required. If Requests is omitted - for a container, it defaults to Limits if that is - explicitly specified, otherwise to an implementation-defined - value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean type: object - selector: - description: A label query over volumes to consider for - binding. + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about the downwardAPI + data to project properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. + items: + description: Items is a list of DownwardAPIVolume file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the + pod: only annotations, labels, name, namespace + and uid are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must not + be absolute or contain the ''..'' path. Must + be utf-8 encoded. The first item of the relative + path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the secret data to + project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify whether the Secret + or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information about the + serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + type: object + customReplicationTLSSecret: + description: |- + The secret containing the replication client certificates and keys for + secure connections to the PostgreSQL server. It will need to contain the + client TLS certificate, TLS key and the Certificate Authority certificate + with the data keys set to tls.crt, tls.key and ca.crt, respectively. + NOTE: If CustomReplicationClientTLSSecret is provided, CustomTLSSecret + MUST be provided and the ca.crt provided must be the same. + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify whether the Secret or its + key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + customTLSSecret: + description: |- + The secret containing the Certificates and Keys to encrypt PostgreSQL + traffic will need to contain the server TLS certificate, TLS key and the + Certificate Authority certificate with the data keys set to tls.crt, + tls.key and ca.crt, respectively. It will then be mounted as a volume + projection to the '/pgconf/tls' directory. For more information on + Kubernetes secret projections, please see + https://k8s.io/docs/concepts/configuration/secret/#projection-of-secret-keys-to-specific-paths + NOTE: If CustomTLSSecret is provided, CustomReplicationClientTLSSecret + MUST be provided and the ca.crt provided must be the same. + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify whether the Secret or its + key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + dataSource: + description: Specifies a data source for bootstrapping the PostgreSQL + cluster. + properties: + pgbackrest: + description: |- + Defines a pgBackRest cloud-based data source that can be used to pre-populate the + PostgreSQL data directory for a new PostgreSQL cluster using a pgBackRest restore. + The PGBackRest field is incompatible with the PostgresCluster field: only one + data source can be used for pre-populating a new PostgreSQL cluster + properties: + affinity: + description: |- + Scheduling constraints of the pgBackRest restore Job. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node + properties: + nodeAffinity: + description: Describes node affinity scheduling rules + for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated + with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching + the corresponding nodeSelectorTerm, in the + range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector + terms. The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, + etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: object + configuration: + description: |- + Projected volumes containing custom pgBackRest configuration. These files are mounted + under "/etc/pgbackrest/conf.d" alongside any pgBackRest configuration generated by the + PostgreSQL Operator: + https://pgbackrest.org/configuration.html + items: + description: Projection that may be projected along with + other supported volume types + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume root + to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the configMap + data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about the downwardAPI + data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing the + pod field + properties: + fieldRef: + description: 'Required: Selects a field of + the pod: only annotations, labels, name, + namespace and uid are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' path. + Must be utf-8 encoded. The first item of + the relative path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the secret data + to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify whether the + Secret or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information about + the serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + global: + additionalProperties: + type: string + description: |- + Global pgBackRest configuration settings. These settings are included in the "global" + section of the pgBackRest configuration generated by the PostgreSQL Operator, and then + mounted under "/etc/pgbackrest/conf.d": + https://pgbackrest.org/configuration.html + type: object + options: + description: |- + Command line options to include when running the pgBackRest restore command. + https://pgbackrest.org/command.html#command-restore + items: + type: string + type: array + priorityClassName: + description: |- + Priority class name for the pgBackRest restore Job pod. Changing this + value causes PostgreSQL to restart. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/ + type: string + repo: + description: Defines a pgBackRest repository + properties: + azure: + description: Represents a pgBackRest repository that is + created using Azure storage + properties: + container: + description: The Azure container utilized for the + repository + type: string + required: + - container + type: object + gcs: + description: Represents a pgBackRest repository that is + created using Google Cloud Storage + properties: + bucket: + description: The GCS bucket utilized for the repository + type: string + required: + - bucket + type: object + name: + description: The name of the repository + pattern: ^repo[1-4] + type: string + s3: + description: |- + RepoS3 represents a pgBackRest repository that is created using AWS S3 (or S3-compatible) + storage + properties: + bucket: + description: The S3 bucket utilized for the repository + type: string + endpoint: + description: A valid endpoint corresponding to the + specified region + type: string + region: + description: The region corresponding to the S3 bucket + type: string + required: + - bucket + - endpoint + - region + type: object + schedules: + description: |- + Defines the schedules for the pgBackRest backups + Full, Differential and Incremental backup types are supported: + https://pgbackrest.org/user-guide.html#concept/backup + properties: + differential: + description: |- + Defines the Cron schedule for a differential pgBackRest backup. + Follows the standard Cron schedule syntax: + https://k8s.io/docs/concepts/workloads/controllers/cron-jobs/#cron-schedule-syntax + minLength: 6 + type: string + full: + description: |- + Defines the Cron schedule for a full pgBackRest backup. + Follows the standard Cron schedule syntax: + https://k8s.io/docs/concepts/workloads/controllers/cron-jobs/#cron-schedule-syntax + minLength: 6 + type: string + incremental: + description: |- + Defines the Cron schedule for an incremental pgBackRest backup. + Follows the standard Cron schedule syntax: + https://k8s.io/docs/concepts/workloads/controllers/cron-jobs/#cron-schedule-syntax + minLength: 6 + type: string + type: object + volume: + description: Represents a pgBackRest repository that is + created using a PersistentVolumeClaim + properties: + volumeClaimSpec: + description: Defines a PersistentVolumeClaim spec + used to create and/or bind a volume + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes + to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of + label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + x-kubernetes-validations: + - message: missing accessModes + rule: has(self.accessModes) && size(self.accessModes) + > 0 + - message: missing storage request + rule: has(self.resources) && has(self.resources.requests) + && has(self.resources.requests.storage) + required: + - volumeClaimSpec + type: object + required: + - name + type: object + resources: + description: Resource requirements for the pgBackRest restore + Job. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + stanza: + default: db + description: |- + The name of an existing pgBackRest stanza to use as the data source for the new PostgresCluster. + Defaults to `db` if not provided. + type: string + tolerations: + description: |- + Tolerations of the pgBackRest restore Job. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + required: + - repo + - stanza + type: object + postgresCluster: + description: |- + Defines a pgBackRest data source that can be used to pre-populate the PostgreSQL data + directory for a new PostgreSQL cluster using a pgBackRest restore. + The PGBackRest field is incompatible with the PostgresCluster field: only one + data source can be used for pre-populating a new PostgreSQL cluster + properties: + affinity: + description: |- + Scheduling constraints of the pgBackRest restore Job. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node + properties: + nodeAffinity: + description: Describes node affinity scheduling rules + for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated + with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching + the corresponding nodeSelectorTerm, in the + range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector + terms. The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, + etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: object + clusterName: + description: |- + The name of an existing PostgresCluster to use as the data source for the new PostgresCluster. + Defaults to the name of the PostgresCluster being created if not provided. + type: string + clusterNamespace: + description: |- + The namespace of the cluster specified as the data source using the clusterName field. + Defaults to the namespace of the PostgresCluster being created if not provided. + type: string + options: + description: |- + Command line options to include when running the pgBackRest restore command. + https://pgbackrest.org/command.html#command-restore + items: + type: string + type: array + priorityClassName: + description: |- + Priority class name for the pgBackRest restore Job pod. Changing this + value causes PostgreSQL to restart. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/ + type: string + repoName: + description: |- + The name of the pgBackRest repo within the source PostgresCluster that contains the backups + that should be utilized to perform a pgBackRest restore when initializing the data source + for the new PostgresCluster. + pattern: ^repo[1-4] + type: string + resources: + description: Resource requirements for the pgBackRest restore + Job. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + tolerations: + description: |- + Tolerations of the pgBackRest restore Job. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + required: + - repoName + type: object + volumes: + description: Defines any existing volumes to reuse for this PostgresCluster. + properties: + pgBackRestVolume: + description: |- + Defines the existing pgBackRest repo volume and directory to use in the + current PostgresCluster. + properties: + directory: + description: |- + The existing directory. When not set, a move Job is not created for the + associated volume. + type: string + pvcName: + description: The existing PVC name. + type: string + required: + - pvcName + type: object + pgDataVolume: + description: |- + Defines the existing pgData volume and directory to use in the current + PostgresCluster. + properties: + directory: + description: |- + The existing directory. When not set, a move Job is not created for the + associated volume. + type: string + pvcName: + description: The existing PVC name. + type: string + required: + - pvcName + type: object + pgWALVolume: + description: |- + Defines the existing pg_wal volume and directory to use in the current + PostgresCluster. Note that a defined pg_wal volume MUST be accompanied by + a pgData volume. + properties: + directory: + description: |- + The existing directory. When not set, a move Job is not created for the + associated volume. + type: string + pvcName: + description: The existing PVC name. + type: string + required: + - pvcName + type: object + type: object + type: object + databaseInitSQL: + description: |- + DatabaseInitSQL defines a ConfigMap containing custom SQL that will + be run after the cluster is initialized. This ConfigMap must be in the same + namespace as the cluster. + properties: + key: + description: Key is the ConfigMap data key that points to a SQL + string + type: string + name: + description: Name is the name of a ConfigMap + type: string + required: + - key + - name + type: object + disableDefaultPodScheduling: + description: |- + Whether or not the PostgreSQL cluster should use the defined default + scheduling constraints. If the field is unset or false, the default + scheduling constraints will be used in addition to any custom constraints + provided. + type: boolean + image: + description: |- + The image name to use for PostgreSQL containers. When omitted, the value + comes from an operator environment variable. For standard PostgreSQL images, + the format is RELATED_IMAGE_POSTGRES_{postgresVersion}, + e.g. RELATED_IMAGE_POSTGRES_13. For PostGIS enabled PostgreSQL images, + the format is RELATED_IMAGE_POSTGRES_{postgresVersion}_GIS_{postGISVersion}, + e.g. RELATED_IMAGE_POSTGRES_13_GIS_3.1. + type: string + imagePullPolicy: + description: |- + ImagePullPolicy is used to determine when Kubernetes will attempt to + pull (download) container images. + More info: https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy + enum: + - Always + - Never + - IfNotPresent + type: string + imagePullSecrets: + description: |- + The image pull secrets used to pull from a private registry + Changing this value causes all running pods to restart. + https://k8s.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + items: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + type: array + instances: + description: |- + Specifies one or more sets of PostgreSQL pods that replicate data for + this cluster. + items: + properties: + affinity: + description: |- + Scheduling constraints of a PostgreSQL pod. Changing this value causes + PostgreSQL to restart. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for + the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated + with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the + corresponding nodeSelectorTerm, in the range + 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of + label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of + label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, etc. + as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of + label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of + label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: object + containers: + description: |- + Custom sidecars for PostgreSQL instance pods. Changing this value causes + PostgreSQL to restart. + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the + pod's namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to + each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a + GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to + perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port + to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a + GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to + perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a + GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to + perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a raw + block device within a container. + properties: + devicePath: + description: devicePath is the path inside of the + container that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + dataVolumeClaimSpec: + description: |- + Defines a PersistentVolumeClaim for PostgreSQL data. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the + PersistentVolume backing this claim. + type: string + type: object + x-kubernetes-validations: + - message: missing accessModes + rule: has(self.accessModes) && size(self.accessModes) > 0 + - message: missing storage request + rule: has(self.resources) && has(self.resources.requests) + && has(self.resources.requests.storage) + metadata: + description: Metadata contains metadata for custom resources + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + minAvailable: + anyOf: + - type: integer + - type: string + description: |- + Minimum number of pods that should be available at a time. + Defaults to one when the replicas field is greater than one. + x-kubernetes-int-or-string: true + name: + default: "" + description: |- + Name that associates this set of PostgreSQL pods. This field is optional + when only one instance set is defined. Each instance set in a cluster + must have a unique name. The combined length of this and the cluster name + must be 46 characters or less. + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?)?$ + type: string + priorityClassName: + description: |- + Priority class name for the PostgreSQL pod. Changing this value causes + PostgreSQL to restart. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/ + type: string + replicas: + default: 1 + description: Number of desired PostgreSQL pods. + format: int32 + minimum: 1 + type: integer + resources: + description: Compute resources of a PostgreSQL container. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + sidecars: + description: Configuration for instance sidecar containers + properties: + replicaCertCopy: + description: Defines the configuration for the replica cert + copy sidecar container + properties: + resources: + description: Resource requirements for a sidecar container + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + type: object + type: object + tablespaceVolumes: + description: |- + The list of tablespaces volumes to mount for this postgrescluster + This field requires enabling TablespaceVolumes feature gate + items: + properties: + dataVolumeClaimSpec: + description: |- + Defines a PersistentVolumeClaim for a tablespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes + to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to + the PersistentVolume backing this claim. + type: string + type: object + x-kubernetes-validations: + - message: missing accessModes + rule: has(self.accessModes) && size(self.accessModes) + > 0 + - message: missing storage request + rule: has(self.resources) && has(self.resources.requests) + && has(self.resources.requests.storage) + name: + description: |- + The name for the tablespace, used as the path name for the volume. + Must be unique in the instance set since they become the directory names. + minLength: 1 + pattern: ^[a-z][a-z0-9]*$ + type: string + required: + - dataVolumeClaimSpec + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + tolerations: + description: |- + Tolerations of a PostgreSQL pod. Changing this value causes PostgreSQL to restart. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologySpreadConstraints: + description: |- + Topology spread constraints of a PostgreSQL pod. Changing this value causes + PostgreSQL to restart. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + items: + description: TopologySpreadConstraint specifies how to spread + matching pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + walVolumeClaimSpec: + description: |- + Defines a separate PersistentVolumeClaim for PostgreSQL's write-ahead log. + More info: https://www.postgresql.org/docs/current/wal.html + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the + PersistentVolume backing this claim. + type: string + type: object + x-kubernetes-validations: + - message: missing accessModes + rule: has(self.accessModes) && size(self.accessModes) > 0 + - message: missing storage request + rule: has(self.resources) && has(self.resources.requests) + && has(self.resources.requests.storage) + required: + - dataVolumeClaimSpec + type: object + minItems: 1 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + metadata: + description: Metadata contains metadata for custom resources + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + monitoring: + description: The specification of monitoring tools that connect to + PostgreSQL + properties: + pgmonitor: + description: PGMonitorSpec defines the desired state of the pgMonitor + tool suite + properties: + exporter: + properties: + configuration: + description: |- + Projected volumes containing custom PostgreSQL Exporter configuration. Currently supports + the customization of PostgreSQL Exporter queries. If a "queries.yml" file is detected in + any volume projected using this field, it will be loaded using the "extend.query-path" flag: + https://github.com/prometheus-community/postgres_exporter#flags + Changing the values of field causes PostgreSQL and the exporter to restart. + items: + description: Projection that may be projected along + with other supported volume types + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume root + to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the configMap + data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about the downwardAPI + data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects a field + of the pod: only annotations, labels, + name, namespace and uid are supported.' + properties: + apiVersion: + description: Version of the schema + the FieldPath is written in terms + of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to + select in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. + Must not be absolute or contain the + ''..'' path. Must be utf-8 encoded. + The first item of the relative path + must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to + select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the secret + data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify whether + the Secret or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information + about the serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + customTLSSecret: + description: |- + Projected secret containing custom TLS certificates to encrypt output from the exporter + web server + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify whether the Secret + or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + image: + description: |- + The image name to use for crunchy-postgres-exporter containers. The image may + also be set using the RELATED_IMAGE_PGEXPORTER environment variable. + type: string + resources: + description: |- + Changing this value causes PostgreSQL and the exporter to restart. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + type: object + type: object + type: object + openshift: + description: |- + Whether or not the PostgreSQL cluster is being deployed to an OpenShift + environment. If the field is unset, the operator will automatically + detect the environment. + type: boolean + patroni: + properties: + dynamicConfiguration: + description: |- + Patroni dynamic configuration settings. Changes to this value will be + automatically reloaded without validation. Changes to certain PostgreSQL + parameters cause PostgreSQL to restart. + More info: https://patroni.readthedocs.io/en/latest/dynamic_configuration.html + type: object + x-kubernetes-preserve-unknown-fields: true + leaderLeaseDurationSeconds: + default: 30 + description: |- + TTL of the cluster leader lock. "Think of it as the + length of time before initiation of the automatic failover process." + Changing this value causes PostgreSQL to restart. + format: int32 + minimum: 3 + type: integer + port: + default: 8008 + description: |- + The port on which Patroni should listen. + Changing this value causes PostgreSQL to restart. + format: int32 + minimum: 1024 + type: integer + switchover: + description: Switchover gives options to perform ad hoc switchovers + in a PostgresCluster. + properties: + enabled: + description: Whether or not the operator should allow switchovers + in a PostgresCluster + type: boolean + targetInstance: + description: |- + The instance that should become primary during a switchover. This field is + optional when Type is "Switchover" and required when Type is "Failover". + When it is not specified, a healthy replica is automatically selected. + type: string + type: + default: Switchover + description: |- + Type of switchover to perform. Valid options are Switchover and Failover. + "Switchover" changes the primary instance of a healthy PostgresCluster. + "Failover" forces a particular instance to be primary, regardless of other + factors. A TargetInstance must be specified to failover. + NOTE: The Failover type is reserved as the "last resort" case. + enum: + - Switchover + - Failover + type: string + required: + - enabled + type: object + syncPeriodSeconds: + default: 10 + description: |- + The interval for refreshing the leader lock and applying + dynamicConfiguration. Must be less than leaderLeaseDurationSeconds. + Changing this value causes PostgreSQL to restart. + format: int32 + minimum: 1 + type: integer + type: object + paused: + description: |- + Suspends the rollout and reconciliation of changes made to the + PostgresCluster spec. + type: boolean + port: + default: 5432 + description: The port on which PostgreSQL should listen. + format: int32 + minimum: 1024 + type: integer + postGISVersion: + description: |- + The PostGIS extension version installed in the PostgreSQL image. + When image is not set, indicates a PostGIS enabled image will be used. + type: string + postgresVersion: + description: The major version of PostgreSQL installed in the PostgreSQL + image + maximum: 17 + minimum: 11 + type: integer + proxy: + description: The specification of a proxy that connects to PostgreSQL. + properties: + pgBouncer: + description: Defines a PgBouncer proxy and connection pooler. + properties: + affinity: + description: |- + Scheduling constraints of a PgBouncer pod. Changing this value causes + PgBouncer to restart. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node + properties: + nodeAffinity: + description: Describes node affinity scheduling rules + for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated + with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching + the corresponding nodeSelectorTerm, in the + range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector + terms. The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, + etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: object + config: + description: |- + Configuration settings for the PgBouncer process. Changes to any of these + values will be automatically reloaded without validation. Be careful, as + you may put PgBouncer into an unusable state. + More info: https://www.pgbouncer.org/usage.html#reload + properties: + databases: + additionalProperties: + type: string + description: |- + PgBouncer database definitions. The key is the database requested by a + client while the value is a libpq-styled connection string. The special + key "*" acts as a fallback. When this field is empty, PgBouncer is + configured with a single "*" entry that connects to the primary + PostgreSQL instance. + More info: https://www.pgbouncer.org/config.html#section-databases + type: object + files: + description: |- + Files to mount under "/etc/pgbouncer". When specified, settings in the + "pgbouncer.ini" file are loaded before all others. From there, other + files may be included by absolute path. Changing these references causes + PgBouncer to restart, but changes to the file contents are automatically + reloaded. + More info: https://www.pgbouncer.org/config.html#include-directive + items: + description: Projection that may be projected along + with other supported volume types + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume root + to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the configMap + data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about the downwardAPI + data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects a field + of the pod: only annotations, labels, + name, namespace and uid are supported.' + properties: + apiVersion: + description: Version of the schema + the FieldPath is written in terms + of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to + select in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. + Must not be absolute or contain the + ''..'' path. Must be utf-8 encoded. + The first item of the relative path + must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to + select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the secret + data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify whether + the Secret or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information + about the serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + global: + additionalProperties: + type: string + description: |- + Settings that apply to the entire PgBouncer process. + More info: https://www.pgbouncer.org/config.html + type: object + users: + additionalProperties: + type: string + description: |- + Connection settings specific to particular users. + More info: https://www.pgbouncer.org/config.html#section-users + type: object + type: object + containers: + description: |- + Custom sidecars for a PgBouncer pod. Changing this value causes + PgBouncer to restart. + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that - relates the key and values. + description: ContainerPort represents a network port + in a single container. properties: - key: - description: key is the label key that the selector - applies to. + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. type: string - operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, - NotIn, Exists and DoesNotExist. + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". type: string - values: - description: values is an array of string values. - If the operator is In or NotIn, the values array - must be non-empty. If the operator is Exists - or DoesNotExist, the values array must be empty. - This array is replaced during a strategic merge - patch. - items: - type: string - type: array required: - - key - - operator + - containerPort type: object type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field - is "key", the operator is "In", and the values array - contains only "value". The requirements are ANDed. - type: object - type: object - storageClassName: - description: 'Name of the StorageClass required by the claim. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' - type: string - volumeMode: - description: volumeMode defines what type of volume is required - by the claim. Value of Filesystem is implied when not - included in claim spec. - type: string - volumeName: - description: VolumeName is the binding reference to the - PersistentVolume backing this claim. - type: string - type: object - required: - - dataVolumeClaimSpec - type: object - minItems: 1 - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - metadata: - description: Metadata contains metadata for PostgresCluster resources - properties: - annotations: - additionalProperties: - type: string - type: object - labels: - additionalProperties: - type: string - type: object - type: object - monitoring: - description: The specification of monitoring tools that connect to - PostgreSQL - properties: - pgmonitor: - description: PGMonitorSpec defines the desired state of the pgMonitor - tool suite - properties: - exporter: - properties: - configuration: - description: 'Projected volumes containing custom PostgreSQL - Exporter configuration. Currently supports the customization - of PostgreSQL Exporter queries. If a "queries.yaml" - file is detected in any volume projected using this - field, it will be loaded using the "extend.query-path" - flag: https://github.com/prometheus-community/postgres_exporter#flags - Changing the values of field causes PostgreSQL and the - exporter to restart.' - items: - description: Projection that may be projected along - with other supported volume types + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes properties: - configMap: - description: information about the configMap data - to project + exec: + description: Exec specifies the action to take. properties: - items: - description: If unspecified, each key-value - pair in the Data field of the referenced ConfigMap - will be projected into the volume as a file - whose name is the key and content is the value. - If specified, the listed keys will be projected - into the specified paths, and unlisted keys - will not be present. If a key is specified - which is not present in the ConfigMap, the - volume setup will error unless it is marked - optional. Paths must be relative and may not - contain the '..' path or start with '..'. + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. items: - description: Maps a string key to a path within - a volume. + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes properties: - key: - description: The key to project. + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string - mode: - description: 'Optional: mode bits used - to set permissions on this file. Must - be an octal value between 0000 and 0777 - or a decimal value between 0 and 511. - YAML accepts both octal and decimal - values, JSON requires decimal values - for mode bits. If not specified, the - volume defaultMode will be used. This - might be in conflict with other options - that affect the file mode, like fsGroup, - and the result can be other mode bits - set.' - format: int32 - type: integer - path: - description: The relative path of the - file to map the key to. May not be an - absolute path. May not contain the path - element '..'. May not start with the - string '..'. + value: + description: The header field value type: string required: - - key - - path + - name + - value type: object type: array - name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, - kind, uid?' + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. type: string - optional: - description: Specify whether the ConfigMap or - its keys must be defined - type: boolean + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port type: object - downwardAPI: - description: information about the downwardAPI data - to project + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. properties: - items: - description: Items is a list of DownwardAPIVolume - file + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities items: - description: DownwardAPIVolumeFile represents - information to create the file containing - the pod field - properties: - fieldRef: - description: 'Required: Selects a field - of the pod: only annotations, labels, - name and namespace are supported.' - properties: - apiVersion: - description: Version of the schema - the FieldPath is written in terms - of, defaults to "v1". - type: string - fieldPath: - description: Path of the field to - select in the specified API version. - type: string - required: - - fieldPath - type: object - mode: - description: 'Optional: mode bits used - to set permissions on this file, must - be an octal value between 0000 and 0777 - or a decimal value between 0 and 511. - YAML accepts both octal and decimal - values, JSON requires decimal values - for mode bits. If not specified, the - volume defaultMode will be used. This - might be in conflict with other options - that affect the file mode, like fsGroup, - and the result can be other mode bits - set.' - format: int32 - type: integer - path: - description: 'Required: Path is the relative - path name of the file to be created. - Must not be absolute or contain the - ''..'' path. Must be utf-8 encoded. - The first item of the relative path - must not start with ''..''' - type: string - resourceFieldRef: - description: 'Selects a resource of the - container: only resources limits and - requests (limits.cpu, limits.memory, - requests.cpu and requests.memory) are - currently supported.' - properties: - containerName: - description: 'Container name: required - for volumes, optional for env vars' - type: string - divisor: - anyOf: - - type: integer - - type: string - description: Specifies the output - format of the exposed resources, - defaults to "1" - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - description: 'Required: resource to - select' - type: string - required: - - resource - type: object - required: - - path - type: object + description: Capability represent POSIX capabilities + type + type: string type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic type: object - secret: - description: information about the secret data to - project + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. properties: - items: - description: If unspecified, each key-value - pair in the Data field of the referenced Secret - will be projected into the volume as a file - whose name is the key and content is the value. - If specified, the listed keys will be projected - into the specified paths, and unlisted keys - will not be present. If a key is specified - which is not present in the Secret, the volume - setup will error unless it is marked optional. - Paths must be relative and may not contain - the '..' path or start with '..'. + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. items: - description: Maps a string key to a path within - a volume. - properties: - key: - description: The key to project. - type: string - mode: - description: 'Optional: mode bits used - to set permissions on this file. Must - be an octal value between 0000 and 0777 - or a decimal value between 0 and 511. - YAML accepts both octal and decimal - values, JSON requires decimal values - for mode bits. If not specified, the - volume defaultMode will be used. This - might be in conflict with other options - that affect the file mode, like fsGroup, - and the result can be other mode bits - set.' - format: int32 - type: integer - path: - description: The relative path of the - file to map the key to. May not be an - absolute path. May not contain the path - element '..'. May not start with the - string '..'. + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value type: string required: - - key - - path + - name + - value type: object type: array - name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, - kind, uid?' + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. type: string - optional: - description: Specify whether the Secret or its - key must be defined - type: boolean + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port type: object - serviceAccountToken: - description: information about the serviceAccountToken - data to project + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. properties: - audience: - description: Audience is the intended audience - of the token. A recipient of a token must - identify itself with an identifier specified - in the audience of the token, and otherwise - should reject the token. The audience defaults - to the identifier of the apiserver. - type: string - expirationSeconds: - description: ExpirationSeconds is the requested - duration of validity of the service account - token. As the token approaches expiration, - the kubelet volume plugin will proactively - rotate the service account token. The kubelet - will start trying to rotate the token if the - token is older than 80 percent of its time - to live or if the token is older than 24 hours.Defaults - to 1 hour and must be at least 10 minutes. - format: int64 - type: integer - path: - description: Path is the path relative to the - mount point of the file to project the token - into. + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true required: - - path + - port type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + customTLSSecret: + description: |- + A secret projection containing a certificate and key with which to encrypt + connections to PgBouncer. The "tls.crt", "tls.key", and "ca.crt" paths must + be PEM-encoded certificates and keys. Changing this value causes PgBouncer + to restart. + More info: https://kubernetes.io/docs/concepts/configuration/secret/#projection-of-secret-keys-to-specific-paths + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path type: object type: array - image: - description: The image name to use for crunchy-postgres-exporter - containers. The image may also be set using the RELATED_IMAGE_PGEXPORTER - environment variable. + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string - resources: - description: 'Changing this value causes PostgreSQL and - the exporter to restart. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers' + optional: + description: optional field specify whether the Secret + or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + image: + description: |- + Name of a container image that can run PgBouncer 1.15 or newer. Changing + this value causes PgBouncer to restart. The image may also be set using + the RELATED_IMAGE_PGBOUNCER environment variable. + More info: https://kubernetes.io/docs/concepts/containers/images + type: string + metadata: + description: Metadata contains metadata for custom resources + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + minAvailable: + anyOf: + - type: integer + - type: string + description: |- + Minimum number of pods that should be available at a time. + Defaults to one when the replicas field is greater than one. + x-kubernetes-int-or-string: true + port: + default: 5432 + description: |- + Port on which PgBouncer should listen for client connections. Changing + this value causes PgBouncer to restart. + format: int32 + minimum: 1024 + type: integer + priorityClassName: + description: |- + Priority class name for the pgBouncer pod. Changing this value causes + PostgreSQL to restart. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/ + type: string + replicas: + default: 1 + description: Number of desired PgBouncer pods. + format: int32 + minimum: 0 + type: integer + resources: + description: |- + Compute resources of a PgBouncer container. Changing this value causes + PgBouncer to restart. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + service: + description: Specification of the service that exposes PgBouncer. + properties: + externalTrafficPolicy: + description: 'More info: https://kubernetes.io/docs/concepts/services-networking/service/#traffic-policies' + enum: + - Cluster + - Local + type: string + internalTrafficPolicy: + description: 'More info: https://kubernetes.io/docs/concepts/services-networking/service/#traffic-policies' + enum: + - Cluster + - Local + type: string + metadata: + description: Metadata contains metadata for custom resources properties: - limits: + annotations: additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount - of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: string type: object - requests: + labels: additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount - of compute resources required. If Requests is omitted - for a container, it defaults to Limits if that is - explicitly specified, otherwise to an implementation-defined - value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: string + type: object + type: object + nodePort: + description: |- + The port on which this service is exposed when type is NodePort or + LoadBalancer. Value must be in-range and not in use or the operation will + fail. If unspecified, a port will be allocated if this Service requires one. + - https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + format: int32 + type: integer + type: + default: ClusterIP + description: 'More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types' + enum: + - ClusterIP + - NodePort + - LoadBalancer + type: string + type: object + sidecars: + description: Configuration for pgBouncer sidecar containers + properties: + pgbouncerConfig: + description: Defines the configuration for the pgBouncer + config sidecar container + properties: + resources: + description: Resource requirements for a sidecar container + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object type: object type: object type: object + tolerations: + description: |- + Tolerations of a PgBouncer pod. Changing this value causes PgBouncer to + restart. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologySpreadConstraints: + description: |- + Topology spread constraints of a PgBouncer pod. Changing this value causes + PgBouncer to restart. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + items: + description: TopologySpreadConstraint specifies how to spread + matching pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + type: object + required: + - pgBouncer + type: object + replicaService: + description: Specification of the service that exposes PostgreSQL + replica instances + properties: + externalTrafficPolicy: + description: 'More info: https://kubernetes.io/docs/concepts/services-networking/service/#traffic-policies' + enum: + - Cluster + - Local + type: string + internalTrafficPolicy: + description: 'More info: https://kubernetes.io/docs/concepts/services-networking/service/#traffic-policies' + enum: + - Cluster + - Local + type: string + metadata: + description: Metadata contains metadata for custom resources + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object type: object + nodePort: + description: |- + The port on which this service is exposed when type is NodePort or + LoadBalancer. Value must be in-range and not in use or the operation will + fail. If unspecified, a port will be allocated if this Service requires one. + - https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + format: int32 + type: integer + type: + default: ClusterIP + description: 'More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types' + enum: + - ClusterIP + - NodePort + - LoadBalancer + type: string type: object - openshift: - description: Whether or not the PostgreSQL cluster is being deployed - to an OpenShift environment. If the field is unset, the operator - will automatically detect the environment. - type: boolean - patroni: + service: + description: Specification of the service that exposes the PostgreSQL + primary instance. properties: - dynamicConfiguration: + externalTrafficPolicy: + description: 'More info: https://kubernetes.io/docs/concepts/services-networking/service/#traffic-policies' + enum: + - Cluster + - Local + type: string + internalTrafficPolicy: + description: 'More info: https://kubernetes.io/docs/concepts/services-networking/service/#traffic-policies' + enum: + - Cluster + - Local + type: string + metadata: + description: Metadata contains metadata for custom resources + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object type: object - x-kubernetes-preserve-unknown-fields: true - leaderLeaseDurationSeconds: - default: 30 - description: TTL of the cluster leader lock. "Think of it as the - length of time before initiation of the automatic failover process." + nodePort: + description: |- + The port on which this service is exposed when type is NodePort or + LoadBalancer. Value must be in-range and not in use or the operation will + fail. If unspecified, a port will be allocated if this Service requires one. + - https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport format: int32 - minimum: 3 type: integer + type: + default: ClusterIP + description: 'More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types' + enum: + - ClusterIP + - NodePort + - LoadBalancer + type: string + type: object + shutdown: + description: |- + Whether or not the PostgreSQL cluster should be stopped. + When this is true, workloads are scaled to zero and CronJobs + are suspended. + Other resources, such as Services and Volumes, remain in place. + type: boolean + standby: + description: Run this cluster as a read-only copy of an existing cluster + or archive. + properties: + enabled: + default: true + description: |- + Whether or not the PostgreSQL cluster should be read-only. When this is + true, WAL files are applied from a pgBackRest repository or another + PostgreSQL server. + type: boolean + host: + description: Network address of the PostgreSQL server to follow + via streaming replication. + type: string port: - default: 8008 - description: The port on which Patroni should listen. + description: Network port of the PostgreSQL server to follow via + streaming replication. format: int32 minimum: 1024 type: integer - syncPeriodSeconds: - default: 10 - description: The interval for refreshing the leader lock and applying - dynamicConfiguration. Must be less than leaderLeaseDurationSeconds. - format: int32 - minimum: 1 - type: integer + repoName: + description: The name of the pgBackRest repository to follow for + WAL files. + pattern: ^repo[1-4] + type: string type: object - port: - default: 5432 - description: The port on which PostgreSQL should listen. - format: int32 - minimum: 1024 - type: integer - postGISVersion: - description: The PostGIS extension version installed in the PostgreSQL - image. When image is not set, indicates a PostGIS enabled image - will be used. - type: string - postgresVersion: - description: The major version of PostgreSQL installed in the PostgreSQL - image - maximum: 13 - minimum: 10 - type: integer - proxy: - description: The specification of a proxy that connects to PostgreSQL. + supplementalGroups: + description: |- + A list of group IDs applied to the process of a container. These can be + useful when accessing shared file systems with constrained permissions. + More info: https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context + items: + format: int64 + maximum: 2147483647 + minimum: 1 + type: integer + type: array + userInterface: + description: The specification of a user interface that connects to + PostgreSQL. properties: - pgBouncer: - description: Defines a PgBouncer proxy and connection pooler. + pgAdmin: + description: Defines a pgAdmin user interface. properties: affinity: - description: 'Scheduling constraints of a PgBouncer pod. Changing - this value causes PgBouncer to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node' + description: |- + Scheduling constraints of a pgAdmin pod. Changing this value causes + pgAdmin to restart. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node properties: nodeAffinity: description: Describes node affinity scheduling rules for the pod. properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule - pods to nodes that satisfy the affinity expressions - specified by this field, but it may choose a node - that violates one or more of the expressions. The - node that is most preferred is the one with the - greatest sum of weights, i.e. for each node that - meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, - etc.), compute a sum by iterating through the elements - of this field and adding "weight" to the sum if - the node matches the corresponding matchExpressions; - the node(s) with the highest sum are the most preferred. + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. items: - description: An empty preferred scheduling term - matches all objects with implicit weight 0 (i.e. - it's a no-op). A null preferred scheduling term - matches no objects (i.e. is also a no-op). + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). properties: preference: description: A node selector term, associated @@ -4483,79 +15039,72 @@ spec: description: A list of node selector requirements by node's labels. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators - are In, NotIn, Exists, DoesNotExist. - Gt, and Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string values. - If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. - If the operator is Gt or Lt, the - values array must have a single - element, which will be interpreted - as an integer. This array is replaced - during a strategic merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators - are In, NotIn, Exists, DoesNotExist. - Gt, and Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string values. - If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. - If the operator is Gt or Lt, the - values array must have a single - element, which will be interpreted - as an integer. This array is replaced - during a strategic merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic weight: description: Weight associated with matching the corresponding nodeSelectorTerm, in the @@ -4567,105 +15116,100 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified - by this field are not met at scheduling time, the - pod will not be scheduled onto the node. If the - affinity requirements specified by this field cease - to be met at some point during pod execution (e.g. - due to an update), the system may or may not try - to eventually evict the pod from its node. + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. properties: nodeSelectorTerms: description: Required. A list of node selector terms. The terms are ORed. items: - description: A null or empty node selector term - matches no objects. The requirements of them - are ANDed. The TopologySelectorTerm type implements - a subset of the NodeSelectorTerm. + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. properties: matchExpressions: description: A list of node selector requirements by node's labels. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators - are In, NotIn, Exists, DoesNotExist. - Gt, and Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string values. - If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. - If the operator is Gt or Lt, the - values array must have a single - element, which will be interpreted - as an integer. This array is replaced - during a strategic merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators - are In, NotIn, Exists, DoesNotExist. - Gt, and Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string values. - If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. - If the operator is Gt or Lt, the - values array must have a single - element, which will be interpreted - as an integer. This array is replaced - during a strategic merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object + x-kubernetes-map-type: atomic type: object podAffinity: description: Describes pod affinity scheduling rules (e.g. @@ -4673,19 +15217,16 @@ spec: other pod(s)). properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule - pods to nodes that satisfy the affinity expressions - specified by this field, but it may choose a node - that violates one or more of the expressions. The - node that is most preferred is the one with the - greatest sum of weights, i.e. for each node that - meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, - etc.), compute a sum by iterating through the elements - of this field and adding "weight" to the sum if - the node has pods which matches the corresponding - podAffinityTerm; the node(s) with the highest sum - are the most preferred. + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred @@ -4696,18 +15237,101 @@ spec: associated with the corresponding weight. properties: labelSelector: - description: A label query over a set of - resources, in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -4715,67 +15339,61 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies which - namespaces the labelSelector applies to - (matches against); null or empty list - means "this pod's namespace" + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where co-located - is defined as running on a node whose - value of the label with key topologyKey - matches that of any node on which any - of the selected pods is running. Empty - topologyKey is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object weight: - description: weight associated with matching - the corresponding podAffinityTerm, in the - range 1-100. + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. format: int32 type: integer required: @@ -4783,100 +15401,179 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified - by this field are not met at scheduling time, the - pod will not be scheduled onto the node. If the - affinity requirements specified by this field cease - to be met at some point during pod execution (e.g. - due to a pod label update), the system may or may - not try to eventually evict the pod from its node. - When there are multiple elements, the lists of nodes - corresponding to each podAffinityTerm are intersected, - i.e. all terms must be satisfied. + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. items: - description: Defines a set of pods (namely those - matching the labelSelector relative to the given - namespace(s)) that this pod should be co-located - (affinity) or not co-located (anti-affinity) with, - where co-located is defined as running on a node - whose value of the label with key - matches that of any node on which a pod of the - set of pods is running + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running properties: labelSelector: - description: A label query over a set of resources, - in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a - key's relationship to a set of values. - Valid operators are In, NotIn, Exists - and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of - string values. If the operator is - In or NotIn, the values array must - be non-empty. If the operator is - Exists or DoesNotExist, the values - array must be empty. This array - is replaced during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies which namespaces - the labelSelector applies to (matches against); - null or empty list means "this pod's namespace" + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the - pods matching the labelSelector in the specified - namespaces, where co-located is defined as - running on a node whose value of the label - with key topologyKey matches that of any node - on which any of the selected pods is running. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. Empty topologyKey is not allowed. type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object podAntiAffinity: description: Describes pod anti-affinity scheduling rules @@ -4884,19 +15581,16 @@ spec: etc. as some other pod(s)). properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule - pods to nodes that satisfy the anti-affinity expressions - specified by this field, but it may choose a node - that violates one or more of the expressions. The - node that is most preferred is the one with the - greatest sum of weights, i.e. for each node that - meets all of the scheduling requirements (resource - request, requiredDuringScheduling anti-affinity - expressions, etc.), compute a sum by iterating through - the elements of this field and adding "weight" to - the sum if the node has pods which matches the corresponding - podAffinityTerm; the node(s) with the highest sum - are the most preferred. + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred @@ -4907,18 +15601,101 @@ spec: associated with the corresponding weight. properties: labelSelector: - description: A label query over a set of - resources, in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -4926,67 +15703,61 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies which - namespaces the labelSelector applies to - (matches against); null or empty list - means "this pod's namespace" + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where co-located - is defined as running on a node whose - value of the label with key topologyKey - matches that of any node on which any - of the selected pods is running. Empty - topologyKey is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object weight: - description: weight associated with matching - the corresponding podAffinityTerm, in the - range 1-100. + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. format: int32 type: integer required: @@ -4994,193 +15765,349 @@ spec: - weight type: object type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements specified - by this field are not met at scheduling time, the - pod will not be scheduled onto the node. If the - anti-affinity requirements specified by this field - cease to be met at some point during pod execution - (e.g. due to a pod label update), the system may - or may not try to eventually evict the pod from - its node. When there are multiple elements, the - lists of nodes corresponding to each podAffinityTerm - are intersected, i.e. all terms must be satisfied. - items: - description: Defines a set of pods (namely those - matching the labelSelector relative to the given - namespace(s)) that this pod should be co-located - (affinity) or not co-located (anti-affinity) with, - where co-located is defined as running on a node - whose value of the label with key - matches that of any node on which a pod of the - set of pods is running + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: object + config: + description: |- + Configuration settings for the pgAdmin process. Changes to any of these + values will be loaded without validation. Be careful, as + you may put pgAdmin into an unusable state. + properties: + files: + description: |- + Files allows the user to mount projected volumes into the pgAdmin + container so that files can be referenced by pgAdmin as needed. + items: + description: Projection that may be projected along + with other supported volume types + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. properties: labelSelector: - description: A label query over a set of resources, - in this case pods. + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a - key's relationship to a set of values. - Valid operators are In, NotIn, Exists - and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of - string values. If the operator is - In or NotIn, the values array must - be non-empty. If the operator is - Exists or DoesNotExist, the values - array must be empty. This array - is replaced during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object - namespaces: - description: namespaces specifies which namespaces - the labelSelector applies to (matches against); - null or empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the - pods matching the labelSelector in the specified - namespaces, where co-located is defined as - running on a node whose value of the label - with key topologyKey matches that of any node - on which any of the selected pods is running. - Empty topologyKey is not allowed. + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume root + to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. type: string required: - - topologyKey + - path type: object - type: array - type: object - type: object - config: - description: 'Configuration settings for the PgBouncer process. - Changes to any of these values will be automatically reloaded - without validation. Be careful, as you may put PgBouncer - into an unusable state. More info: https://www.pgbouncer.org/usage.html#reload' - properties: - databases: - additionalProperties: - type: string - description: 'PgBouncer database definitions. The key - is the database requested by a client while the value - is a libpq-styled connection string. The special key - "*" acts as a fallback. When this field is empty, PgBouncer - is configured with a single "*" entry that connects - to the primary PostgreSQL instance. More info: https://www.pgbouncer.org/config.html#section-databases' - type: object - files: - description: 'Files to mount under "/etc/pgbouncer". When - specified, settings in the "pgbouncer.ini" file are - loaded before all others. From there, other files may - be included by absolute path. Changing these references - causes PgBouncer to restart, but changes to the file - contents are automatically reloaded. More info: https://www.pgbouncer.org/config.html#include-directive' - items: - description: Projection that may be projected along - with other supported volume types - properties: configMap: - description: information about the configMap data - to project + description: configMap information about the configMap + data to project properties: items: - description: If unspecified, each key-value - pair in the Data field of the referenced ConfigMap - will be projected into the volume as a file - whose name is the key and content is the value. - If specified, the listed keys will be projected - into the specified paths, and unlisted keys - will not be present. If a key is specified - which is not present in the ConfigMap, the - volume setup will error unless it is marked - optional. Paths must be relative and may not - contain the '..' path or start with '..'. + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. items: description: Maps a string key to a path within a volume. properties: key: - description: The key to project. + description: key is the key to project. type: string mode: - description: 'Optional: mode bits used - to set permissions on this file. Must - be an octal value between 0000 and 0777 - or a decimal value between 0 and 511. - YAML accepts both octal and decimal - values, JSON requires decimal values - for mode bits. If not specified, the - volume defaultMode will be used. This - might be in conflict with other options - that affect the file mode, like fsGroup, - and the result can be other mode bits - set.' + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: The relative path of the - file to map the key to. May not be an - absolute path. May not contain the path - element '..'. May not start with the - string '..'. + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. type: string required: - key - path type: object type: array + x-kubernetes-list-type: atomic name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, - kind, uid?' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: - description: Specify whether the ConfigMap or - its keys must be defined + description: optional specify whether the ConfigMap + or its keys must be defined type: boolean type: object + x-kubernetes-map-type: atomic downwardAPI: - description: information about the downwardAPI data - to project + description: downwardAPI information about the downwardAPI + data to project properties: items: description: Items is a list of DownwardAPIVolume @@ -5193,7 +16120,7 @@ spec: fieldRef: description: 'Required: Selects a field of the pod: only annotations, labels, - name and namespace are supported.' + name, namespace and uid are supported.' properties: apiVersion: description: Version of the schema @@ -5207,19 +16134,15 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic mode: - description: 'Optional: mode bits used - to set permissions on this file, must - be an octal value between 0000 and 0777 - or a decimal value between 0 and 511. - YAML accepts both octal and decimal - values, JSON requires decimal values - for mode bits. If not specified, the - volume defaultMode will be used. This - might be in conflict with other options - that affect the file mode, like fsGroup, - and the result can be other mode bits - set.' + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: @@ -5231,11 +16154,9 @@ spec: must not start with ''..''' type: string resourceFieldRef: - description: 'Selects a resource of the - container: only resources limits and - requests (limits.cpu, limits.memory, - requests.cpu and requests.memory) are - currently supported.' + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. properties: containerName: description: 'Container name: required @@ -5257,183 +16178,344 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic required: - path type: object type: array + x-kubernetes-list-type: atomic type: object secret: - description: information about the secret data to - project + description: secret information about the secret + data to project properties: items: - description: If unspecified, each key-value - pair in the Data field of the referenced Secret - will be projected into the volume as a file - whose name is the key and content is the value. - If specified, the listed keys will be projected - into the specified paths, and unlisted keys - will not be present. If a key is specified - which is not present in the Secret, the volume - setup will error unless it is marked optional. - Paths must be relative and may not contain - the '..' path or start with '..'. + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. items: description: Maps a string key to a path within a volume. properties: key: - description: The key to project. + description: key is the key to project. type: string mode: - description: 'Optional: mode bits used - to set permissions on this file. Must - be an octal value between 0000 and 0777 - or a decimal value between 0 and 511. - YAML accepts both octal and decimal - values, JSON requires decimal values - for mode bits. If not specified, the - volume defaultMode will be used. This - might be in conflict with other options - that affect the file mode, like fsGroup, - and the result can be other mode bits - set.' + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: The relative path of the - file to map the key to. May not be an - absolute path. May not contain the path - element '..'. May not start with the - string '..'. + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. type: string required: - key - path type: object type: array + x-kubernetes-list-type: atomic name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, - kind, uid?' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: - description: Specify whether the Secret or its - key must be defined + description: optional field specify whether + the Secret or its key must be defined type: boolean type: object + x-kubernetes-map-type: atomic serviceAccountToken: - description: information about the serviceAccountToken - data to project + description: serviceAccountToken is information + about the serviceAccountToken data to project properties: audience: - description: Audience is the intended audience - of the token. A recipient of a token must - identify itself with an identifier specified - in the audience of the token, and otherwise - should reject the token. The audience defaults - to the identifier of the apiserver. + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. type: string expirationSeconds: - description: ExpirationSeconds is the requested - duration of validity of the service account - token. As the token approaches expiration, - the kubelet volume plugin will proactively - rotate the service account token. The kubelet - will start trying to rotate the token if the - token is older than 80 percent of its time - to live or if the token is older than 24 hours.Defaults - to 1 hour and must be at least 10 minutes. + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. format: int64 type: integer path: - description: Path is the path relative to the - mount point of the file to project the token - into. + description: |- + path is the path relative to the mount point of the file to project the + token into. type: string required: - path type: object type: object type: array - global: - additionalProperties: - type: string - description: 'Settings that apply to the entire PgBouncer - process. More info: https://www.pgbouncer.org/config.html' + ldapBindPassword: + description: |- + A Secret containing the value for the LDAP_BIND_PASSWORD setting. + More info: https://www.pgadmin.org/docs/pgadmin4/latest/ldap.html + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key type: object - users: - additionalProperties: - type: string - description: 'Connection settings specific to particular - users. More info: https://www.pgbouncer.org/config.html#section-users' + x-kubernetes-map-type: atomic + settings: + description: |- + Settings for the pgAdmin server process. Keys should be uppercase and + values must be constants. + More info: https://www.pgadmin.org/docs/pgadmin4/latest/config_py.html type: object + x-kubernetes-preserve-unknown-fields: true type: object - customTLSSecret: - description: 'A secret projection containing a certificate - and key with which to encrypt connections to PgBouncer. - The "tls.crt", "tls.key", and "ca.crt" paths must be PEM-encoded - certificates and keys. Changing this value causes PgBouncer - to restart. More info: https://kubernetes.io/docs/concepts/configuration/secret/#projection-of-secret-keys-to-specific-paths' + dataVolumeClaimSpec: + description: |- + Defines a PersistentVolumeClaim for pgAdmin data. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes properties: - items: - description: If unspecified, each key-value pair in the - Data field of the referenced Secret will be projected - into the volume as a file whose name is the key and - content is the value. If specified, the listed keys - will be projected into the specified paths, and unlisted - keys will not be present. If a key is specified which - is not present in the Secret, the volume setup will - error unless it is marked optional. Paths must be relative - and may not contain the '..' path or start with '..'. + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 items: - description: Maps a string key to a path within a volume. - properties: - key: - description: The key to project. - type: string - mode: - description: 'Optional: mode bits used to set permissions - on this file. Must be an octal value between 0000 - and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON - requires decimal values for mode bits. If not - specified, the volume defaultMode will be used. - This might be in conflict with other options that - affect the file mode, like fsGroup, and the result - can be other mode bits set.' - format: int32 - type: integer - path: - description: The relative path of the file to map - the key to. May not be an absolute path. May not - contain the path element '..'. May not start with - the string '..'. - type: string - required: - - key - - path - type: object + type: string type: array - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to + consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the + PersistentVolume backing this claim. type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean type: object image: - description: 'Name of a container image that can run PgBouncer - 1.15 or newer. Changing this value causes PgBouncer to restart. - The image may also be set using the RELATED_IMAGE_PGBOUNCER - environment variable. More info: https://kubernetes.io/docs/concepts/containers/images' + description: |- + Name of a container image that can run pgAdmin 4. Changing this value causes + pgAdmin to restart. The image may also be set using the RELATED_IMAGE_PGADMIN + environment variable. + More info: https://kubernetes.io/docs/concepts/containers/images type: string metadata: - description: Metadata contains metadata for PostgresCluster - resources + description: Metadata contains metadata for custom resources properties: annotations: additionalProperties: @@ -5444,24 +16526,50 @@ spec: type: string type: object type: object - port: - default: 5432 - description: Port on which PgBouncer should listen for client - connections. Changing this value causes PgBouncer to restart. - format: int32 - minimum: 1024 - type: integer + priorityClassName: + description: |- + Priority class name for the pgAdmin pod. Changing this value causes pgAdmin + to restart. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/ + type: string replicas: default: 1 - description: Number of desired PgBouncer pods. + description: Number of desired pgAdmin pods. format: int32 + maximum: 1 minimum: 0 type: integer resources: - description: 'Compute resources of a PgBouncer container. - Changing this value causes PgBouncer to restart. More info: - https://kubernetes.io/docs/concepts/configuration/manage-resources-containers' + description: |- + Compute resources of a pgAdmin container. Changing this value causes + pgAdmin to restart. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -5469,8 +16577,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -5479,161 +16588,352 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of - compute resources required. If Requests is omitted for - a container, it defaults to Limits if that is explicitly - specified, otherwise to an implementation-defined value. - More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object service: - description: Specification of the service that exposes PgBouncer. + description: Specification of the service that exposes pgAdmin. properties: + externalTrafficPolicy: + description: 'More info: https://kubernetes.io/docs/concepts/services-networking/service/#traffic-policies' + enum: + - Cluster + - Local + type: string + internalTrafficPolicy: + description: 'More info: https://kubernetes.io/docs/concepts/services-networking/service/#traffic-policies' + enum: + - Cluster + - Local + type: string + metadata: + description: Metadata contains metadata for custom resources + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + nodePort: + description: |- + The port on which this service is exposed when type is NodePort or + LoadBalancer. Value must be in-range and not in use or the operation will + fail. If unspecified, a port will be allocated if this Service requires one. + - https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + format: int32 + type: integer type: + default: ClusterIP description: 'More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types' enum: - ClusterIP - NodePort - LoadBalancer type: string - required: - - type type: object tolerations: - description: 'Tolerations of a PgBouncer pod. Changing this - value causes PgBouncer to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration' + description: |- + Tolerations of a pgAdmin pod. Changing this value causes pgAdmin to restart. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration items: - description: The pod this Toleration is attached to tolerates - any taint that matches the triple using - the matching operator . + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . properties: effect: - description: Effect indicates the taint effect to match. - Empty means match all taint effects. When specified, - allowed values are NoSchedule, PreferNoSchedule and - NoExecute. + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. type: string key: - description: Key is the taint key that the toleration - applies to. Empty means match all taint keys. If the - key is empty, operator must be Exists; this combination - means to match all values and all keys. + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. type: string operator: - description: Operator represents a key's relationship - to the value. Valid operators are Exists and Equal. - Defaults to Equal. Exists is equivalent to wildcard - for value, so that a pod can tolerate all taints of - a particular category. + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. type: string tolerationSeconds: - description: TolerationSeconds represents the period - of time the toleration (which must be of effect NoExecute, - otherwise this field is ignored) tolerates the taint. - By default, it is not set, which means tolerate the - taint forever (do not evict). Zero and negative values - will be treated as 0 (evict immediately) by the system. + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. format: int64 type: integer value: - description: Value is the taint value the toleration - matches to. If the operator is Exists, the value should - be empty, otherwise just a regular string. + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologySpreadConstraints: + description: |- + Topology spread constraints of a pgAdmin pod. Changing this value causes + pgAdmin to restart. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + items: + description: TopologySpreadConstraint specifies how to spread + matching pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable type: object type: array + required: + - dataVolumeClaimSpec type: object required: - - pgBouncer - type: object - service: - description: Specification of the service that exposes the PostgreSQL - primary instance. - properties: - type: - description: 'More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types' - enum: - - ClusterIP - - NodePort - - LoadBalancer - type: string - required: - - type - type: object - shutdown: - description: Whether or not the PostgreSQL cluster should be stopped. - When this is true, workloads are scaled to zero and CronJobs are - suspended. Other resources, such as Services and Volumes, remain - in place. - type: boolean - standby: - description: Run this cluster as a read-only copy of an existing cluster - or archive. - properties: - enabled: - default: true - description: Whether or not the PostgreSQL cluster should be read-only. - When this is true, WAL files are applied from the pgBackRest - repository. - type: boolean - repoName: - description: The name of the pgBackRest repository to follow for - WAL files. - pattern: ^repo[1-4] - type: string - required: - - repoName + - pgAdmin type: object - supplementalGroups: - description: 'A list of group IDs applied to the process of a container. - These can be useful when accessing shared file systems with constrained - permissions. More info: https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context' - items: - format: int64 - type: integer - type: array users: - description: Users to create inside PostgreSQL and the databases they - should access. The default creates one user that can access one - database matching the PostgresCluster name. An empty list creates - no users. Removing a user from this list does NOT drop the user - nor revoke their access. + description: |- + Users to create inside PostgreSQL and the databases they should access. + The default creates one user that can access one database matching the + PostgresCluster name. An empty list creates no users. Removing a user + from this list does NOT drop the user nor revoke their access. items: properties: databases: - description: Databases to which this user can connect and create - objects. Removing a database from this list does NOT revoke - access. This field is ignored for the "postgres" user. + description: |- + Databases to which this user can connect and create objects. Removing a + database from this list does NOT revoke access. This field is ignored for + the "postgres" user. items: - description: 'PostgreSQL identifiers are limited in length - but may contain any character. More info: https://www.postgresql.org/docs/current/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS' + description: |- + PostgreSQL identifiers are limited in length but may contain any character. + More info: https://www.postgresql.org/docs/current/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS maxLength: 63 minLength: 1 type: string type: array x-kubernetes-list-type: set name: - description: The name of this PostgreSQL user. The value may - contain only lowercase letters, numbers, and hyphen so that - it fits into Kubernetes metadata. + description: |- + The name of this PostgreSQL user. The value may contain only lowercase + letters, numbers, and hyphen so that it fits into Kubernetes metadata. maxLength: 63 minLength: 1 pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ type: string options: - description: 'ALTER ROLE options except for PASSWORD. This field - is ignored for the "postgres" user. More info: https://www.postgresql.org/docs/current/role-attributes.html' + description: |- + ALTER ROLE options except for PASSWORD. This field is ignored for the + "postgres" user. + More info: https://www.postgresql.org/docs/current/role-attributes.html + maxLength: 200 pattern: ^[^;]*$ type: string + x-kubernetes-validations: + - message: cannot assign password + rule: '!self.matches("(?i:PASSWORD)")' + - message: cannot contain comments + rule: '!self.matches("(?:--|/[*]|[*]/)")' + password: + description: Properties of the password generated for this user. + properties: + type: + default: ASCII + description: |- + Type of password to generate. Defaults to ASCII. Valid options are ASCII + and AlphaNumeric. + "ASCII" passwords contain letters, numbers, and symbols from the US-ASCII character set. + "AlphaNumeric" passwords contain letters and numbers from the US-ASCII character set. + enum: + - ASCII + - AlphaNumeric + type: string + required: + - type + type: object required: - name type: object + maxItems: 64 type: array x-kubernetes-list-map-keys: - name x-kubernetes-list-type: map required: - - backups - instances - postgresVersion type: object @@ -5641,48 +16941,40 @@ spec: description: PostgresClusterStatus defines the observed state of PostgresCluster properties: conditions: - description: 'conditions represent the observations of postgrescluster''s - current state. Known .status.conditions.type are: "PersistentVolumeResizing", - "ProxyAvailable"' + description: |- + conditions represent the observations of postgrescluster's current state. + Known .status.conditions.type are: "PersistentVolumeResizing", + "Progressing", "ProxyAvailable" items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - type FooStatus struct{ // Represents the observations of a - foo's current state. // Known .status.conditions.type are: - \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type - \ // +patchStrategy=merge // +listType=map // +listMapKey=type - \ Conditions []metav1.Condition `json:\"conditions,omitempty\" - patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` - \n // other fields }" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. format: int64 minimum: 0 type: integer reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. This field may not be empty. maxLength: 1024 minLength: 1 @@ -5697,10 +16989,6 @@ spec: type: string type: description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -5715,6 +17003,10 @@ spec: x-kubernetes-list-map-keys: - type x-kubernetes-list-type: map + databaseInitSQL: + description: DatabaseInitSQL state of custom database initialization + in the cluster + type: string databaseRevision: description: Identifies the databases that have been installed into PostgreSQL. @@ -5723,6 +17015,11 @@ spec: description: Current state of PostgreSQL instances. items: properties: + desiredPGDataVolume: + additionalProperties: + type: string + description: Desired Size of the pgData volume + type: object name: type: string readyReplicas: @@ -5730,12 +17027,11 @@ spec: format: int32 type: integer replicas: - description: Total number of non-terminated pods. + description: Total number of pods. format: int32 type: integer updatedReplicas: - description: Total number of non-terminated pods that have the - desired specification. + description: Total number of pods that have the desired specification. format: int32 type: integer required: @@ -5759,6 +17055,13 @@ spec: type: integer patroni: properties: + switchover: + description: Tracks the execution of the switchover requests. + type: string + switchoverTimeline: + description: Tracks the current timeline during switchovers + format: int64 + type: integer systemIdentifier: description: The PostgreSQL system identifier reported by Patroni. type: string @@ -5775,11 +17078,10 @@ spec: format: int32 type: integer completionTime: - description: Represents the time the manual backup Job was - determined by the Job controller to be completed. This - field is only set if the backup completed successfully. - Additionally, it is represented in RFC3339 form and is in - UTC. + description: |- + Represents the time the manual backup Job was determined by the Job controller + to be completed. This field is only set if the backup completed successfully. + Additionally, it is represented in RFC3339 form and is in UTC. format: date-time type: string failed: @@ -5788,18 +17090,19 @@ spec: format: int32 type: integer finished: - description: Specifies whether or not the Job is finished - executing (does not indicate success or failure). + description: |- + Specifies whether or not the Job is finished executing (does not indicate success or + failure). type: boolean id: - description: A unique identifier for the manual backup as - provided using the "pgbackrest-backup" annotation when initiating - a backup. + description: |- + A unique identifier for the manual backup as provided using the "pgbackrest-backup" + annotation when initiating a backup. type: string startTime: - description: Represents the time the manual backup Job was - acknowledged by the Job controller. It is represented in - RFC3339 form and is in UTC. + description: |- + Represents the time the manual backup Job was acknowledged by the Job controller. + It is represented in RFC3339 form and is in UTC. format: date-time type: string succeeded: @@ -5816,16 +17119,19 @@ spec: host properties: apiVersion: - description: 'APIVersion defines the versioned schema of this - representation of an object. Servers should convert recognized - schemas to the latest internal value, and may reject unrecognized - values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST - resource this object represents. Servers may infer this - from the endpoint the client submits requests to. Cannot - be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string ready: description: Whether or not the pgBackRest repository host @@ -5845,14 +17151,14 @@ spec: description: The name of the pgBackRest repository type: string replicaCreateBackupComplete: - description: ReplicaCreateBackupReady indicates whether - a backup exists in the repository as needed to bootstrap - replicas. + description: |- + ReplicaCreateBackupReady indicates whether a backup exists in the repository as needed + to bootstrap replicas. type: boolean repoOptionsHash: - description: A hash of the required fields in the spec for - defining an Azure, GCS or S3 repository, Utilizd to detect - changes to these fields and then execute pgBackRest stanza-create + description: |- + A hash of the required fields in the spec for defining an Azure, GCS or S3 repository, + Utilized to detect changes to these fields and then execute pgBackRest stanza-create commands accordingly. type: string stanzaCreated: @@ -5879,11 +17185,10 @@ spec: format: int32 type: integer completionTime: - description: Represents the time the manual backup Job was - determined by the Job controller to be completed. This - field is only set if the backup completed successfully. - Additionally, it is represented in RFC3339 form and is in - UTC. + description: |- + Represents the time the manual backup Job was determined by the Job controller + to be completed. This field is only set if the backup completed successfully. + Additionally, it is represented in RFC3339 form and is in UTC. format: date-time type: string failed: @@ -5892,18 +17197,19 @@ spec: format: int32 type: integer finished: - description: Specifies whether or not the Job is finished - executing (does not indicate success or failure). + description: |- + Specifies whether or not the Job is finished executing (does not indicate success or + failure). type: boolean id: - description: A unique identifier for the manual backup as - provided using the "pgbackrest-backup" annotation when initiating - a backup. + description: |- + A unique identifier for the manual backup as provided using the "pgbackrest-backup" + annotation when initiating a backup. type: string startTime: - description: Represents the time the manual backup Job was - acknowledged by the Job controller. It is represented in - RFC3339 form and is in UTC. + description: |- + Represents the time the manual backup Job was acknowledged by the Job controller. + It is represented in RFC3339 form and is in UTC. format: date-time type: string succeeded: @@ -5925,11 +17231,10 @@ spec: format: int32 type: integer completionTime: - description: Represents the time the manual backup Job was - determined by the Job controller to be completed. This - field is only set if the backup completed successfully. - Additionally, it is represented in RFC3339 form and is - in UTC. + description: |- + Represents the time the manual backup Job was determined by the Job controller + to be completed. This field is only set if the backup completed successfully. + Additionally, it is represented in RFC3339 form and is in UTC. format: date-time type: string cronJobName: @@ -5945,9 +17250,9 @@ spec: description: The name of the associated pgBackRest repository type: string startTime: - description: Represents the time the manual backup Job was - acknowledged by the Job controller. It is represented - in RFC3339 form and is in UTC. + description: |- + Represents the time the manual backup Job was acknowledged by the Job controller. + It is represented in RFC3339 form and is in UTC. format: date-time type: string succeeded: @@ -5961,14 +17266,20 @@ spec: type: object type: array type: object + postgresVersion: + description: |- + Stores the current PostgreSQL major version following a successful + major PostgreSQL upgrade. + type: integer proxy: description: Current state of the PostgreSQL proxy. properties: pgBouncer: properties: postgresRevision: - description: Identifies the revision of PgBouncer assets that - have been installed into PostgreSQL. + description: |- + Identifies the revision of PgBouncer assets that have been installed into + PostgreSQL. type: string readyReplicas: description: Total number of ready pods. @@ -5980,13 +17291,33 @@ spec: type: integer type: object type: object + registrationRequired: + properties: + pgoVersion: + type: string + type: object startupInstance: - description: The instance that should be started first when bootstrapping - and/or starting a PostgresCluster. + description: |- + The instance that should be started first when bootstrapping and/or starting a + PostgresCluster. type: string startupInstanceSet: description: The instance set associated with the startupInstance type: string + tokenRequired: + type: string + userInterface: + description: Current state of the PostgreSQL user interface. + properties: + pgAdmin: + description: The state of the pgAdmin user interface. + properties: + usersRevision: + description: Hash that indicates which users have been installed + into pgAdmin. + type: string + type: object + type: object usersRevision: description: Identifies the users that have been installed into PostgreSQL. type: string @@ -5996,9 +17327,3 @@ spec: storage: true subresources: status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 127f80fecd..85b7cbdf29 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -1,13 +1,17 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: +- bases/postgres-operator.crunchydata.com_crunchybridgeclusters.yaml - bases/postgres-operator.crunchydata.com_postgresclusters.yaml +- bases/postgres-operator.crunchydata.com_pgupgrades.yaml +- bases/postgres-operator.crunchydata.com_pgadmins.yaml -patchesJson6902: +patches: - target: - group: apiextensions.k8s.io - version: v1 kind: CustomResourceDefinition - name: postgresclusters.postgres-operator.crunchydata.com - path: validation.yaml + patch: |- + - op: add + path: /metadata/labels + value: + app.kubernetes.io/name: pgo + app.kubernetes.io/version: latest diff --git a/config/crd/validation.yaml b/config/crd/validation.yaml deleted file mode 100644 index 864bf12b47..0000000000 --- a/config/crd/validation.yaml +++ /dev/null @@ -1,14 +0,0 @@ -# PostgresCluster "v1beta1" is in "/spec/versions/0" - -# Containers should not run with a root GID. -# - https://kubernetes.io/docs/concepts/security/pod-security-standards/ -- op: add - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/supplementalGroups/items/minimum - value: 1 - -# Supplementary GIDs must fit within int32. -# - https://releases.k8s.io/v1.18.0/pkg/apis/core/validation/validation.go#L3659-L3663 -# - https://releases.k8s.io/v1.22.0/pkg/apis/core/validation/validation.go#L3923-L3927 -- op: add - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/supplementalGroups/items/maximum - value: 2147483647 # math.MaxInt32 diff --git a/config/default/kustomization.yaml b/config/default/kustomization.yaml index c76feef7da..7001380693 100644 --- a/config/default/kustomization.yaml +++ b/config/default/kustomization.yaml @@ -1,14 +1,20 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + namespace: postgres-operator -commonLabels: - postgres-operator.crunchydata.com/control-plane: postgres-operator +labels: +- includeSelectors: true + pairs: + # Note: this label differs from the label set in postgres-operator-examples + postgres-operator.crunchydata.com/control-plane: postgres-operator -bases: +resources: - ../crd -- ../rbac/cluster +- ../rbac - ../manager images: - name: postgres-operator newName: registry.developers.crunchydata.com/crunchydata/postgres-operator - newTag: ubi8-5.0.2-0 + newTag: latest diff --git a/config/dev/kustomization.yaml b/config/dev/kustomization.yaml index 1d13941a42..2794e5fb69 100644 --- a/config/dev/kustomization.yaml +++ b/config/dev/kustomization.yaml @@ -1,8 +1,8 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization -bases: +resources: - ../default patches: -- manager-dev.yaml +- path: manager-dev.yaml diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml index 5c5f0b84cb..dfce22e6c5 100644 --- a/config/manager/kustomization.yaml +++ b/config/manager/kustomization.yaml @@ -1,2 +1,5 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + resources: - manager.yaml diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index b1ddb43226..2eb849e138 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -5,26 +5,48 @@ metadata: name: pgo spec: replicas: 1 + strategy: { type: Recreate } template: spec: containers: - name: operator image: postgres-operator env: + - name: PGO_INSTALLER + value: kustomize + - name: PGO_INSTALLER_ORIGIN + value: postgres-operator-repo + - name: PGO_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace - name: CRUNCHY_DEBUG value: "true" - - name: RELATED_IMAGE_POSTGRES_13 - value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres-ha:centos8-13.4-0" - - name: RELATED_IMAGE_POSTGRES_13_GIS_3.1 - value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis-ha:centos8-13.4-3.1-0" + - name: RELATED_IMAGE_POSTGRES_16 + value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-16.4-2" + - name: RELATED_IMAGE_POSTGRES_16_GIS_3.3 + value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-16.4-3.3-2" + - name: RELATED_IMAGE_POSTGRES_16_GIS_3.4 + value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-16.4-3.4-2" + - name: RELATED_IMAGE_POSTGRES_17 + value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-17.0-0" + - name: RELATED_IMAGE_POSTGRES_17_GIS_3.4 + value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-17.0-3.4-0" + - name: RELATED_IMAGE_PGADMIN + value: "registry.developers.crunchydata.com/crunchydata/crunchy-pgadmin4:ubi8-4.30-31" - name: RELATED_IMAGE_PGBACKREST - value: "registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:centos8-2.33-2" + value: "registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi8-2.53.1-0" - name: RELATED_IMAGE_PGBOUNCER - value: "registry.developers.crunchydata.com/crunchydata/crunchy-pgbouncer:centos8-1.15-2" + value: "registry.developers.crunchydata.com/crunchydata/crunchy-pgbouncer:ubi8-1.23-0" - name: RELATED_IMAGE_PGEXPORTER - value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres-exporter:ubi8-5.0.2-0" + value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres-exporter:latest" + - name: RELATED_IMAGE_PGUPGRADE + value: "registry.developers.crunchydata.com/crunchydata/crunchy-upgrade:latest" + - name: RELATED_IMAGE_STANDALONE_PGADMIN + value: "registry.developers.crunchydata.com/crunchydata/crunchy-pgadmin4:ubi8-8.12-0" securityContext: allowPrivilegeEscalation: false + capabilities: { drop: [ALL] } readOnlyRootFilesystem: true runAsNonRoot: true serviceAccountName: pgo diff --git a/config/namespace/kustomization.yaml b/config/namespace/kustomization.yaml index bf20f4df68..e06cce134a 100644 --- a/config/namespace/kustomization.yaml +++ b/config/namespace/kustomization.yaml @@ -1,2 +1,5 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + resources: - namespace.yaml diff --git a/config/rbac/.gitignore b/config/rbac/.gitignore deleted file mode 100644 index 2ad5901955..0000000000 --- a/config/rbac/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/role.yaml diff --git a/config/rbac/cluster/kustomization.yaml b/config/rbac/cluster/kustomization.yaml deleted file mode 100644 index 664fcac061..0000000000 --- a/config/rbac/cluster/kustomization.yaml +++ /dev/null @@ -1,4 +0,0 @@ -resources: -- service_account.yaml -- role.yaml -- role_binding.yaml diff --git a/installers/olm/config/redhat/kustomization.yaml b/config/rbac/kustomization.yaml similarity index 53% rename from installers/olm/config/redhat/kustomization.yaml rename to config/rbac/kustomization.yaml index ba0fce9a49..82cfb0841b 100644 --- a/installers/olm/config/redhat/kustomization.yaml +++ b/config/rbac/kustomization.yaml @@ -2,8 +2,6 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: -- ../operator -- ../examples - -patches: -- path: related-images.yaml +- service_account.yaml +- role.yaml +- role_binding.yaml diff --git a/config/rbac/namespace/kustomization.yaml b/config/rbac/namespace/kustomization.yaml deleted file mode 100644 index 664fcac061..0000000000 --- a/config/rbac/namespace/kustomization.yaml +++ /dev/null @@ -1,4 +0,0 @@ -resources: -- service_account.yaml -- role.yaml -- role_binding.yaml diff --git a/config/rbac/namespace/role.yaml b/config/rbac/namespace/role.yaml deleted file mode 100644 index 1f5120764b..0000000000 --- a/config/rbac/namespace/role.yaml +++ /dev/null @@ -1,122 +0,0 @@ ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: postgres-operator -rules: -- apiGroups: - - '' - resources: - - configmaps - - persistentvolumeclaims - - secrets - - services - verbs: - - create - - delete - - get - - list - - patch - - watch -- apiGroups: - - '' - resources: - - endpoints - verbs: - - create - - delete - - deletecollection - - get - - list - - patch - - watch -- apiGroups: - - '' - resources: - - endpoints/restricted - - pods/exec - verbs: - - create -- apiGroups: - - '' - resources: - - events - verbs: - - create - - patch -- apiGroups: - - '' - resources: - - pods - verbs: - - delete - - get - - list - - patch - - watch -- apiGroups: - - '' - resources: - - serviceaccounts - verbs: - - create - - get - - list - - patch - - watch -- apiGroups: - - apps - resources: - - deployments - - statefulsets - verbs: - - create - - delete - - get - - list - - patch - - watch -- apiGroups: - - batch - resources: - - cronjobs - - jobs - verbs: - - create - - delete - - get - - list - - patch - - watch -- apiGroups: - - postgres-operator.crunchydata.com - resources: - - postgresclusters - verbs: - - get - - list - - patch - - watch -- apiGroups: - - postgres-operator.crunchydata.com - resources: - - postgresclusters/finalizers - verbs: - - update -- apiGroups: - - postgres-operator.crunchydata.com - resources: - - postgresclusters/status - verbs: - - patch -- apiGroups: - - rbac.authorization.k8s.io - resources: - - rolebindings - - roles - verbs: - - create - - get - - list - - patch - - watch diff --git a/config/rbac/namespace/role_binding.yaml b/config/rbac/namespace/role_binding.yaml deleted file mode 100644 index d7c16c8a5b..0000000000 --- a/config/rbac/namespace/role_binding.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: postgres-operator -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: postgres-operator -subjects: -- kind: ServiceAccount - name: pgo diff --git a/config/rbac/namespace/service_account.yaml b/config/rbac/namespace/service_account.yaml deleted file mode 100644 index 364f797171..0000000000 --- a/config/rbac/namespace/service_account.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: pgo diff --git a/config/rbac/cluster/role.yaml b/config/rbac/role.yaml similarity index 63% rename from config/rbac/cluster/role.yaml rename to config/rbac/role.yaml index 9eafa0c550..d5783d00b1 100644 --- a/config/rbac/cluster/role.yaml +++ b/config/rbac/role.yaml @@ -5,11 +5,12 @@ metadata: name: postgres-operator rules: - apiGroups: - - '' + - "" resources: - configmaps - persistentvolumeclaims - secrets + - serviceaccounts - services verbs: - create @@ -19,7 +20,7 @@ rules: - patch - watch - apiGroups: - - '' + - "" resources: - endpoints verbs: @@ -31,21 +32,21 @@ rules: - patch - watch - apiGroups: - - '' + - "" resources: - endpoints/restricted - pods/exec verbs: - create - apiGroups: - - '' + - "" resources: - events verbs: - create - patch - apiGroups: - - '' + - "" resources: - pods verbs: @@ -55,20 +56,22 @@ rules: - patch - watch - apiGroups: - - '' + - apps resources: - - serviceaccounts + - deployments + - statefulsets verbs: - create + - delete - get - list - patch - watch - apiGroups: - - apps + - batch resources: - - deployments - - statefulsets + - cronjobs + - jobs verbs: - create - delete @@ -77,10 +80,18 @@ rules: - patch - watch - apiGroups: - - batch + - coordination.k8s.io resources: - - cronjobs - - jobs + - leases + verbs: + - create + - get + - update + - watch +- apiGroups: + - policy + resources: + - poddisruptionbudgets verbs: - create - delete @@ -91,24 +102,55 @@ rules: - apiGroups: - postgres-operator.crunchydata.com resources: - - postgresclusters + - crunchybridgeclusters verbs: - get - list - patch + - update + - watch +- apiGroups: + - postgres-operator.crunchydata.com + resources: + - crunchybridgeclusters/finalizers + - crunchybridgeclusters/status + verbs: + - patch + - update +- apiGroups: + - postgres-operator.crunchydata.com + resources: + - pgadmins + - pgupgrades + verbs: + - get + - list - watch - apiGroups: - postgres-operator.crunchydata.com resources: + - pgadmins/finalizers + - pgupgrades/finalizers - postgresclusters/finalizers verbs: - update - apiGroups: - postgres-operator.crunchydata.com resources: + - pgadmins/status + - pgupgrades/status - postgresclusters/status verbs: - patch +- apiGroups: + - postgres-operator.crunchydata.com + resources: + - postgresclusters + verbs: + - get + - list + - patch + - watch - apiGroups: - rbac.authorization.k8s.io resources: @@ -116,6 +158,18 @@ rules: - roles verbs: - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshots + verbs: + - create + - delete - get - list - patch diff --git a/config/rbac/cluster/role_binding.yaml b/config/rbac/role_binding.yaml similarity index 100% rename from config/rbac/cluster/role_binding.yaml rename to config/rbac/role_binding.yaml diff --git a/config/rbac/cluster/service_account.yaml b/config/rbac/service_account.yaml similarity index 100% rename from config/rbac/cluster/service_account.yaml rename to config/rbac/service_account.yaml diff --git a/config/singlenamespace/kustomization.yaml b/config/singlenamespace/kustomization.yaml deleted file mode 100644 index b203431367..0000000000 --- a/config/singlenamespace/kustomization.yaml +++ /dev/null @@ -1,17 +0,0 @@ -namespace: postgres-operator - -commonLabels: - postgres-operator.crunchydata.com/control-plane: postgres-operator - -bases: -- ../crd -- ../rbac/namespace -- ../manager - -patches: -- manager-target.yaml - -images: -- name: postgres-operator - newName: registry.developers.crunchydata.com/crunchydata/postgres-operator - newTag: ubi8-5.0.2-0 diff --git a/config/singlenamespace/manager-target.yaml b/config/singlenamespace/manager-target.yaml deleted file mode 100644 index f8597ec584..0000000000 --- a/config/singlenamespace/manager-target.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: postgres-operator -spec: - template: - spec: - containers: - - name: operator - env: - - name: PGO_TARGET_NAMESPACE - valueFrom: { fieldRef: { apiVersion: v1, fieldPath: metadata.namespace } } diff --git a/docs/archetypes/default.md b/docs/archetypes/default.md deleted file mode 100644 index 00e77bd79b..0000000000 --- a/docs/archetypes/default.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -title: "{{ replace .Name "-" " " | title }}" -date: {{ .Date }} -draft: true ---- - diff --git a/docs/config.toml b/docs/config.toml deleted file mode 100644 index a26b80efc3..0000000000 --- a/docs/config.toml +++ /dev/null @@ -1,69 +0,0 @@ -baseURL= "" - -languageCode = "en-us" -DefaultContentLanguage = "en" -title = "PGO, the Postgres Operator from Crunchy Data" -theme = "crunchy-hugo-theme" -pygmentsCodeFences = true -pygmentsStyle = "monokailight" -publishDir = "" -canonifyurls = true -relativeURLs = true - -defaultContentLanguage = "en" -defaultContentLanguageInSubdir= false -enableMissingTranslationPlaceholders = false - -[params] -editURL = "https://github.com/CrunchyData/postgres-operator/edit/master/docs/content/" -showVisitedLinks = false # default is false -themeStyle = "flex" # "original" or "flex" # default "flex" -themeVariant = "" # choose theme variant "green", "gold" , "gray", "blue" (default) -ordersectionsby = "weight" # ordersectionsby = "title" -disableHomeIcon = true # default is false -disableSearch = false # default is false -disableNavChevron = false # set true to hide next/prev chevron, default is false -highlightClientSide = false # set true to use highlight.pack.js instead of the default hugo chroma highlighter -menushortcutsnewtab = true # set true to open shortcuts links to a new tab/window -enableGitInfo = true -operatorVersion = "5.0.2" -centosBase = "centos8" - -[outputs] -home = [ "HTML", "RSS", "JSON"] - -[[menu.shortcuts]] -name = "" -url = "/" -weight = 1 - -[[menu.shortcuts]] -name = " " -url = "https://github.com/CrunchyData/postgres-operator" -weight = 10 - -[[menu.shortcuts]] -name = " " -identifier = "kubedoc" -url = "https://kubernetes.io/docs/" -weight = 20 - -[[menu.shortcuts]] -name = " " -url = "https://github.com/CrunchyData/postgres-operator/blob/master/LICENSE.md" -weight = 30 - -[[menu.downloads]] -name = " " -url = "/pdf/postgres_operator.pdf" -weight = 20 - -[[menu.downloads]] -name = " " -url = "/epub/postgres_operator.epub" -weight = 30 - -[markup] - [markup.goldmark] - [markup.goldmark.renderer] - unsafe = true diff --git a/docs/content/_index.md b/docs/content/_index.md deleted file mode 100644 index 678be6ee45..0000000000 --- a/docs/content/_index.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -title: "PGO, the Postgres Operator from Crunchy Data" -date: -draft: false ---- - -# PGO, the Postgres Operator from Crunchy Data - - PGO: The Postgres Operator from Crunchy Data - -Latest Release: {{< param operatorVersion >}} - -# Production Postgres Made Easy - -[PGO](https://github.com/CrunchyData/postgres-operator), the [Postgres Operator]((https://github.com/CrunchyData/postgres-operator)) from [Crunchy Data](https://www.crunchydata.com), gives you a **declarative Postgres** solution that automatically manages your [PostgreSQL](https://www.postgresql.org) clusters. - -Designed for your GitOps workflows, it is [easy to get started]({{< relref "quickstart/_index.md" >}}) with Postgres on Kubernetes with PGO. Within a few moments, you can have a production grade Postgres cluster complete with high availability, disaster recovery, and monitoring, all over secure TLS communications.Even better, PGO lets you easily customize your Postgres cluster to tailor it to your workload! - -With conveniences like cloning Postgres clusters to using rolling updates to roll out disruptive changes with minimal downtime, PGO is ready to support your Postgres data at every stage of your release pipeline. Built for resiliency and uptime, PGO will keep your desired Postgres in a desired state so you do not need to worry about it. - -PGO is developed with many years of production experience in automating Postgres management on Kubernetes, providing a seamless cloud native Postgres solution to keep your data always available. - -## Supported Platforms - -PGO, the Postgres Operator from Crunchy Data, is tested on the following platforms: - -- Kubernetes 1.18+ -- OpenShift 4.5+ -- Google Kubernetes Engine (GKE), including Anthos -- Amazon EKS -- Microsoft AKS -- VMware Tanzu - -This list only includes the platforms that the Postgres Operator is specifically -tested on as part of the release process: PGO works on other Kubernetes -distributions as well, such as Rancher. - -The PGO Postgres Operator project source code is available subject to the [Apache 2.0 license](https://raw.githubusercontent.com/CrunchyData/postgres-operator/master/LICENSE.md) with the PGO logo and branding assets covered by [our trademark guidelines](/logos/TRADEMARKS.md). diff --git a/docs/content/architecture/_index.md b/docs/content/architecture/_index.md deleted file mode 100644 index 452f695c33..0000000000 --- a/docs/content/architecture/_index.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -title: "Architecture" -date: -draft: false -weight: 40 ---- diff --git a/docs/content/architecture/backups.md b/docs/content/architecture/backups.md deleted file mode 100644 index c9cea9c962..0000000000 --- a/docs/content/architecture/backups.md +++ /dev/null @@ -1,96 +0,0 @@ ---- -title: "Backup Management" -date: -draft: false -weight: 120 ---- - -When using the PostgreSQL Operator, the answer to the question "do you take -backups of your database" is automatically "yes!" - -The PostgreSQL Operator uses the open source -[pgBackRest](https://pgbackrest.org) backup and restore utility that is designed -for working with databases that are many terabytes in size. As described in the -[Provisioning](/architecture/provisioning/) section, pgBackRest is enabled by -default as it permits the PostgreSQL Operator to automate some advanced as well -as convenient behaviors, including: - -- Efficient provisioning of new replicas that are added to the PostgreSQL -cluster -- Preventing replicas from falling out of sync from the PostgreSQL primary by -allowing them to replay old WAL logs -- Allowing failed primaries to automatically and efficiently heal using the -"delta restore" feature -- Serving as the basis for the cluster cloning feature -- ...and of course, allowing for one to take full, differential, and incremental -backups and perform full and point-in-time restores - -Below is one example of how PGO manages backups with both a local storage and a Amazon S3 configuration. - -![PostgreSQL Operator pgBackRest Integration](/images/postgresql-cluster-dr-base.png) - -The PostgreSQL Operator leverages a pgBackRest repository to facilitate the -usage of the pgBackRest features in a PostgreSQL cluster. When a new PostgreSQL -cluster is created, it simultaneously creates a pgBackRest repository. - -You can store your pgBackRest backups in up to four different locations and using four different storage types: - -- Any Kubernetes supported storage class -- Amazon S3 (or S3 equivalents like MinIO) -- Google Cloud Storage (GCS) -- Azure Blob Storage - -The pgBackRest repository consists of the following Kubernetes objects: - -- A Deployment -- A Secret that contains information that is specific to the PostgreSQL cluster -that it is deployed with (e.g. SSH keys, AWS S3 keys, etc.) -- A Service - -The PostgreSQL primary is automatically configured to use the -`pgbackrest archive-push` and push the write-ahead log (WAL) archives to the -correct repository. - -## Backups - -PGO supports three types of pgBackRest backups: - -- Full (`full`): A full backup of all the contents of the PostgreSQL cluster -- Differential (`diff`): A backup of only the files that have changed since the -last full backup -- Incremental (`incr`): A backup of only the files that have changed since the -last full or differential backup - -## Scheduling Backups - -Any effective disaster recovery strategy includes having regularly scheduled -backups. PGO enables this by managing a series of Kubernetes CronJobs to ensure that backups are executed at scheduled times. - -Note that pgBackRest presently only supports taking one backup at a time. This may change in a future release, but for the time being we suggest that you stagger your backup times. - -Please see the [backup configuration tutorial]({{< relref "tutorial/backups.md" >}}) for how to set up backup schedules. - -## Restores - -The PostgreSQL Operator supports the ability to perform a full restore on a -PostgreSQL cluster as well as a point-in-time-recovery. There are two types of -ways to restore a cluster: - -- Restore to a new cluster -- Restore in-place - -For examples for this, please see the [disaster recovery tutorial]({{< relref "tutorial/disaster-recovery.md" >}}) - -### Setting Backup Retention Policies - -Unless specified, pgBackRest will keep an unlimited number of backups. You can specify backup retention using the `repoN-retention-` options. Please see the [backup configuration tutorial]({{< relref "tutorial/backups.md" >}}) for examples. - -## Deleting a Backup - -{{% notice warning %}} -If you delete a backup that is *not* set to expire, you may be unable to meet -your retention requirements. If you are deleting backups to free space, it is -recommended to delete your oldest backups first. -{{% /notice %}} - -A backup can be deleted by running the [`pgbackrest expire`](https://pgbackrest.org/command.html#command-expire) command directly on the pgBackRest repository Pod or a Postgres instance. diff --git a/docs/content/architecture/disaster-recovery.md b/docs/content/architecture/disaster-recovery.md deleted file mode 100644 index bb8c0a2436..0000000000 --- a/docs/content/architecture/disaster-recovery.md +++ /dev/null @@ -1,119 +0,0 @@ ---- -title: "Disaster Recovery" -date: -draft: false -weight: 140 ---- - -![PostgreSQL Operator High-Availability Overview](/images/postgresql-ha-multi-data-center.png) - -Advanced [high-availability]({{< relref "architecture/high-availability.md" >}}) -and [backup management]({{< relref "architecture/backups.md" >}}) -strategies involve spreading your database clusters across multiple data centers -to help maximize uptime. In Kubernetes, this technique is known as "[federation](https://en.wikipedia.org/wiki/Federation_(information_technology))". -Federated Kubernetes clusters are able to communicate with each other, -coordinate changes, and provide resiliency for applications that have high -uptime requirements. - -As of this writing, federation in Kubernetes is still in ongoing development -and is something we monitor with intense interest. As Kubernetes federation -continues to mature, we wanted to provide a way to deploy PostgreSQL clusters -managed by the [PostgreSQL Operator](https://www.crunchydata.com/developers/download-postgres/containers/postgres-operator) -that can span multiple Kubernetes clusters. This can be accomplished with a -few environmental setups: - -- Two Kubernetes clusters -- An external storage system, using one of the following: - - S3, or an external storage system that uses the S3 protocol - - GCS - - Azure Blog Storage - - A Kubernetes storage system that can span multiple clusters - -At a high-level, the PostgreSQL Operator follows the "active-standby" data -center deployment model for managing the PostgreSQL clusters across Kubernetes -clusters. In one Kubernetes cluster, the PostgreSQL Operator deploy PostgreSQL as an -"active" PostgreSQL cluster, which means it has one primary and one-or-more -replicas. In another Kubernetes cluster, the PostgreSQL cluster is deployed as -a "standby" cluster: every PostgreSQL instance is a replica. - -A side-effect of this is that in each of the Kubernetes clusters, the PostgreSQL -Operator can be used to deploy both active and standby PostgreSQL clusters, -allowing you to mix and match! While the mixing and matching may not ideal for -how you deploy your PostgreSQL clusters, it does allow you to perform online -moves of your PostgreSQL data to different Kubernetes clusters as well as manual -online upgrades. - -Lastly, while this feature does extend high-availability, promoting a standby -cluster to an active cluster is **not** automatic. While the PostgreSQL clusters -within a Kubernetes cluster do support self-managed high-availability, a -cross-cluster deployment requires someone to specifically promote the cluster -from standby to active. - -## Standby Cluster Overview - -Standby PostgreSQL clusters are managed just like any other PostgreSQL cluster -that is managed by the PostgreSQL Operator. For example, adding replicas to a -standby cluster is identical as adding them to a primary cluster. - -As the architecture diagram above shows, the main difference is that there is -no primary instance: one PostgreSQL instance is reading in the database changes -from the backup repository, while the other replicas are replicas of that instance. -This is known as [cascading replication](https://www.postgresql.org/docs/current/warm-standby.html#CASCADING-REPLICATION). - replicas are cascading replicas, i.e. replicas replicating from a database server that itself is replicating from another database server. - -Because standby clusters are effectively read-only, certain functionality -that involves making changes to a database, e.g. PostgreSQL user changes, is -blocked while a cluster is in standby mode. Additionally, backups and restores -are blocked as well. While [pgBackRest](https://pgbackrest.org/) does support -backups from standbys, this requires direct access to the primary database, -which cannot be done until the PostgreSQL Operator supports Kubernetes -federation. - -## Creating a Standby PostgreSQL Cluster - -For creating a standby Postgres cluster with PGO, please see the [disaster recovery tutorial]({{< relref "tutorial/disaster-recovery.md" >}}#standby-cluster) - -## Promoting a Standby Cluster - -There comes a time where a standby cluster needs to be promoted to an active -cluster. Promoting a standby cluster means that a PostgreSQL instance within -it will become a primary and start accepting both reads and writes. This has the -net effect of pushing WAL (transaction archives) to the pgBackRest repository, -so we need to take a few steps first to ensure we don't accidentally create a -split-brain scenario. - -First, if this is not a disaster scenario, you will want to "shutdown" the -active PostgreSQL cluster. This can be done by setting: - -``` -spec: - shutdown: true -``` - -The effect of this is that all the Kubernetes Statefulsets and Deployments for this cluster are -scaled to 0. - -We can then promote the standby cluster using the following: - -``` -spec: - standby: - enabled: false -``` - -This command essentially removes the standby configuration from the Kubernetes -cluster’s DCS, which triggers the promotion of the current standby leader to a -primary PostgreSQL instance. You can view this promotion in the PostgreSQL -standby leader's (soon to be active leader's) logs: - -With the standby cluster now promoted, the cluster with the original active -PostgreSQL cluster can now be turned into a standby PostgreSQL cluster. This is -done by deleting and recreating all PVCs for the cluster and re-initializing it -as a standby using the backup repository. Being that this is a destructive action -(i.e. data will only be retained if any Storage Classes and/or Persistent -Volumes have the appropriate reclaim policy configured) a warning is shown -when attempting to enable standby. - -The cluster will reinitialize from scratch as a standby, just -like the original standby that was created above. Therefore any transactions -written to the original standby, should now replicate back to this cluster. diff --git a/docs/content/architecture/high-availability.md b/docs/content/architecture/high-availability.md deleted file mode 100644 index 1d61ad42da..0000000000 --- a/docs/content/architecture/high-availability.md +++ /dev/null @@ -1,230 +0,0 @@ ---- -title: "High Availability" -date: -draft: false -weight: 110 ---- - -One of the great things about PostgreSQL is its reliability: it is very stable -and typically "just works." However, there are certain things that can happen in -the environment that PostgreSQL is deployed in that can affect its uptime, -including: - -- The database storage disk fails or some other hardware failure occurs -- The network on which the database resides becomes unreachable -- The host operating system becomes unstable and crashes -- A key database file becomes corrupted -- A data center is lost - -There may also be downtime events that are due to the normal case of operations, -such as performing a minor upgrade, security patching of operating system, -hardware upgrade, or other maintenance. - -Fortunately, PGO, the Postgres Operator from Crunchy Data, is prepared for this. - -![PostgreSQL Operator high availability Overview](/images/postgresql-ha-overview.png) - -The Crunchy PostgreSQL Operator supports a distributed-consensus based -high availability (HA) system that keeps its managed PostgreSQL clusters up and -running, even if the PostgreSQL Operator disappears. Additionally, it leverages -Kubernetes specific features such as -[Pod Anti-Affinity](#how-the-crunchy-postgresql-operator-uses-pod-anti-affinity) -to limit the surface area that could lead to a PostgreSQL cluster becoming -unavailable. The PostgreSQL Operator also supports automatic healing of failed -primaries and leverages the efficient pgBackRest "delta restore" method, which -eliminates the need to fully reprovision a failed cluster! - -The Crunchy PostgreSQL Operator also maintains high availability during a -routine task such as a PostgreSQL minor version upgrade. - -For workloads that are sensitive to transaction loss, PGO supports PostgreSQL synchronous replication. - -The high availability backing for your PostgreSQL cluster is only as good as -your high availability backing for Kubernetes. To learn more about creating a -[high availability Kubernetes cluster](https://kubernetes.io/docs/tasks/administer-cluster/highly-available-master/), -please review the [Kubernetes documentation](https://kubernetes.io/docs/tasks/administer-cluster/highly-available-master/) -or consult your systems administrator. - -## The Crunchy Postgres Operator High Availability Algorithm - -A critical aspect of any production-grade PostgreSQL deployment is a reliable -and effective high availability (HA) solution. Organizations want to know that -their PostgreSQL deployments can remain available despite various issues that -have the potential to disrupt operations, including hardware failures, network -outages, software errors, or even human mistakes. - -The key portion of high availability that the PostgreSQL Operator provides is -that it delegates the management of HA to the PostgreSQL clusters themselves. -This ensures that the PostgreSQL Operator is not a single-point of failure for -the availability of any of the PostgreSQL clusters that it manages, as the -PostgreSQL Operator is only maintaining the definitions of what should be in the -cluster (e.g. how many instances in the cluster, etc.). - -Each HA PostgreSQL cluster maintains its availability using concepts that come -from the [Raft algorithm](https://raft.github.io/) to achieve distributed -consensus. The Raft algorithm ("Reliable, Replicated, Redundant, -Fault-Tolerant") was developed for systems that have one "leader" (i.e. a -primary) and one-to-many followers (i.e. replicas) to provide the same fault -tolerance and safety as the PAXOS algorithm while being easier to implement. - -For the PostgreSQL cluster group to achieve distributed consensus on who the -primary (or leader) is, each PostgreSQL cluster leverages the distributed etcd -key-value store that is bundled with Kubernetes. After it is elected as the -leader, a primary will place a lock in the distributed etcd cluster to indicate -that it is the leader. The "lock" serves as the method for the primary to -provide a heartbeat: the primary will periodically update the lock with the -latest time it was able to access the lock. As long as each replica sees that -the lock was updated within the allowable automated failover time, the replicas -will continue to follow the leader. - -The "log replication" portion that is defined in the Raft algorithm is handled -by PostgreSQL in two ways. First, the primary instance will replicate changes to -each replica based on the rules set up in the provisioning process. For -PostgreSQL clusters that leverage "synchronous replication," a transaction is -not considered complete until all changes from those transactions have been sent -to all replicas that are subscribed to the primary. - -In the above section, note the key word that the transaction are sent to each -replica: the replicas will acknowledge receipt of the transaction, but they may -not be immediately replayed. We will address how we handle this further down in -this section. - -During this process, each replica keeps track of how far along in the recovery -process it is using a "log sequence number" (LSN), a built-in PostgreSQL serial -representation of how many logs have been replayed on each replica. For the -purposes of HA, there are two LSNs that need to be considered: the LSN for the -last log received by the replica, and the LSN for the changes replayed for the -replica. The LSN for the latest changes received can be compared amongst the -replicas to determine which one has replayed the most changes, and an important -part of the automated failover process. - -The replicas periodically check in on the lock to see if it has been updated by -the primary within the allowable automated failover timeout. Each replica checks -in at a randomly set interval, which is a key part of Raft algorithm that helps -to ensure consensus during an election process. If a replica believes that the -primary is unavailable, it becomes a candidate and initiates an election and -votes for itself as the new primary. A candidate must receive a majority of -votes in a cluster in order to be elected as the new primary. - -There are several cases for how the election can occur. If a replica believes -that a primary is down and starts an election, but the primary is actually not -down, the replica will not receive enough votes to become a new primary and will -go back to following and replaying the changes from the primary. - -In the case where the primary is down, the first replica to notice this starts -an election. Per the Raft algorithm, each available replica compares which one -has the latest changes available, based upon the LSN of the latest logs -received. The replica with the latest LSN wins and receives the vote of the -other replica. The replica with the majority of the votes wins. In the event -that two replicas' logs have the same LSN, the tie goes to the replica that -initiated the voting request. - -Once an election is decided, the winning replica is immediately promoted to be a -primary and takes a new lock in the distributed etcd cluster. If the new primary -has not finished replaying all of its transactions logs, it must do so in order -to reach the desired state based on the LSN. Once the logs are finished being -replayed, the primary is able to accept new queries. - -At this point, any existing replicas are updated to follow the new primary. - -When the old primary tries to become available again, it realizes that it has -been deposed as the leader and must be healed. The old primary determines what -kind of replica it should be based upon the CRD, which allows it to set itself -up with appropriate attributes. It is then restored from the pgBackRest backup -archive using the "delta restore" feature, which heals the instance and makes it -ready to follow the new primary, which is known as "auto healing." - -## How The Crunchy PostgreSQL Operator Uses Pod Anti-Affinity - -Kubernetes has two types of Pod anti-affinity: - -- Preferred: With preferred (`preferredDuringSchedulingIgnoredDuringExecution`) Pod anti-affinity, Kubernetes will make a best effort to schedule Pods matching the anti-affinity rules to different Nodes. However, if it is not possible to do so, then Kubernetes may schedule one or more Pods to the same Node. -- Required: With required (`requiredDuringSchedulingIgnoredDuringExecution`) Pod anti-affinity, Kubernetes mandates that each Pod matching the anti-affinity rules **must** be scheduled to different Nodes. However, a Pod may not be scheduled if Kubernetes cannot find a Node that does not contain a Pod matching the rules. - -There is a tradeoff with these two types of pod anti-affinity: while "required" anti-affinity will ensure that all the matching Pods are scheduled on different Nodes, if Kubernetes cannot find an available Node, your Postgres instance may not be scheduled. Likewise, while "preferred" anti-affinity will make a best effort to scheduled your Pods on different Nodes, Kubernetes may compromise and schedule more than one Postgres instance of the same cluster on the same Node. - -By understanding these tradeoffs, the makeup of your Kubernetes cluster, and your requirements, you can choose the method that makes the most sense for your Postgres deployment. We'll show examples of both methods below! - -For an example for how pod anti-affinity works with PGO, please see the [high availability tutorial]({{< relref "tutorial/high-availability.md" >}}#pod-anti-affinity). - -## Synchronous Replication: Guarding Against Transactions Loss - -Clusters managed by the Crunchy PostgreSQL Operator can be deployed with -synchronous replication, which is useful for workloads that are sensitive to -losing transactions, as PostgreSQL will not consider a transaction to be -committed until it is committed to all synchronous replicas connected to a -primary. This provides a higher guarantee of data consistency and, when a -healthy synchronous replica is present, a guarantee of the most up-to-date data -during a failover event. - -This comes at a cost of performance: PostgreSQL has to wait for -a transaction to be committed on all synchronous replicas, and a connected client -will have to wait longer than if the transaction only had to be committed on the -primary (which is how asynchronous replication works). Additionally, there is a -potential impact to availability: if a synchronous replica crashes, any writes -to the primary will be blocked until a replica is promoted to become a new -synchronous replica of the primary. - -## Node Affinity - -Kubernetes [Node Affinity](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity) -can be used to scheduled Pods to specific Nodes within a Kubernetes cluster. -This can be useful when you want your PostgreSQL instances to take advantage of -specific hardware (e.g. for geospatial applications) or if you want to have a -replica instance deployed to a specific region within your Kubernetes cluster -for high availability purposes. - -For an example for how node affinity works with PGO, please see the [high availability tutorial]({{< relref "tutorial/high-availability.md" >}}##node-affinity). - -## Tolerations - -Kubernetes [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) -can help with the scheduling of Pods to appropriate nodes. There are many -reasons that a Kubernetes administrator may want to use tolerations, such as -restricting the types of Pods that can be assigned to particular Nodes. -Reasoning and strategy for using taints and tolerations is outside the scope of -this documentation. - -You can configure the tolerations for your Postgres instances on the `postgresclusters` custom resource. - -## Rolling Updates - -During the lifecycle of a PostgreSQL cluster, there are certain events that may -require a planned restart, such as an update to a "restart required" PostgreSQL -configuration setting (e.g. [`shared_buffers`](https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-SHARED-BUFFERS)) -or a change to a Kubernetes Deployment template (e.g. [changing the memory request]({{< relref "tutorial/resize-cluster.md">}}#customize-cpu-memory)). Restarts can be disruptive in a high availability deployment, which is -why many setups employ a ["rolling update" strategy](https://kubernetes.io/docs/tutorials/kubernetes-basics/update/update-intro/) -(aka a "rolling restart") to minimize or eliminate downtime during a planned -restart. - -Because PostgreSQL is a stateful application, a simple rolling restart strategy -will not work: PostgreSQL needs to ensure that there is a primary available that -can accept reads and writes. This requires following a method that will minimize -the amount of downtime when the primary is taken offline for a restart. - -The PostgreSQL Operator uses the following algorithm to perform the rolling restart to minimize any potential interruptions: - -1. Each replica is updated in sequential order. This follows the following -process: - - 1. The replica is explicitly shut down to ensure any outstanding changes are - flushed to disk. - - 2. If requested, the PostgreSQL Operator will apply any changes to the - Deployment. - - 3. The replica is brought back online. The PostgreSQL Operator waits for the - replica to become available before it proceeds to the next replica. - -2. The above steps are repeated until all of the replicas are restarted. - -3. A controlled switchover is performed. The PostgreSQL Operator determines -which replica is the best candidate to become the new primary. It then demotes -the primary to become a replica and promotes the best candidate to become the -new primary. - -4. The former primary follows a process similar to what is described in step 1. - -The downtime is thus constrained to the amount of time the switchover takes. - -PGO will automatically detect when to apply a rolling update. diff --git a/docs/content/architecture/monitoring.md b/docs/content/architecture/monitoring.md deleted file mode 100644 index c4312ac9d4..0000000000 --- a/docs/content/architecture/monitoring.md +++ /dev/null @@ -1,299 +0,0 @@ ---- -title: "Monitoring" -date: -draft: false -weight: 130 ---- - -![PostgreSQL Operator Monitoring](/images/postgresql-monitoring.png) - -While having [high availability]({{< relref "architecture/high-availability.md" >}}), -[backups]({{< relref "architecture/backups.md" >}}), an disaster recovery systems in place helps in the event of something going wrong with your -PostgreSQL cluster, monitoring helps you anticipate problems before they happen. -Additionally, monitoring can help you diagnose and resolve additional issues -that may not result in downtime, but cause degraded performance. - -There are many different ways to monitor systems within Kubernetes, including -tools that come with Kubernetes itself. This is by no means to be a -comprehensive on how to monitor everything in Kubernetes, but rather what the -PostgreSQL Operator provides to give you an -[out-of-the-box monitoring solution]({{< relref "installation/monitoring/_index.md" >}}). - -## Getting Started - -If you want to install the metrics stack, please visit the [installation]({{< relref "installation/monitoring/_index.md" >}}) -instructions for the [PostgreSQL Operator Monitoring]({{< relref "installation/monitoring/_index.md" >}}) -stack. - -## Components - -The [PostgreSQL Operator Monitoring]({{< relref "installation/monitoring/_index.md" >}}) -stack is made up of several open source components: - -- [pgMonitor](https://github.com/CrunchyData/pgmonitor), which provides the core -of the monitoring infrastructure including the following components: - - [postgres_exporter](https://github.com/CrunchyData/pgmonitor/tree/master/exporter/postgres), - which provides queries used to collect metrics information about a PostgreSQL - instance. - - [Prometheus](https://github.com/prometheus/prometheus), a time-series - database that scrapes and stores the collected metrics so they can be consumed - by other services. - - [Grafana](https://github.com/grafana/grafana), a visualization tool that - provides charting and other capabilities for viewing the collected monitoring - data. - - [Alertmanager](https://github.com/prometheus/alertmanager), a tool that - can send alerts when metrics hit a certain threshold that require someone to - intervene. -- [pgnodemx](https://github.com/CrunchyData/pgnodemx), a PostgreSQL extension -that is able to pull container-specific metrics (e.g. CPU utilization, memory -consumption) from the container itself via SQL queries. - -## Visualizations - -Below is a brief description of all the visualizations provided by the -[PostgreSQL Operator Monitoring]({{< relref "installation/monitoring/_index.md" >}}) -stack. Some of the descriptions may include some directional guidance on how to -interpret the charts, though this is only to provide a starting point: actual -causes and effects of issues can vary between systems. - -Many of the visualizations can be broken down based on the following groupings: - -- Cluster: which PostgreSQL cluster should be viewed -- Pod: the specific Pod or PostgreSQL instance - -### Overview - -![PostgreSQL Operator Monitoring - Overview](/images/postgresql-monitoring-overview.png) - -The overview provides an overview of all of the PostgreSQL clusters that are -being monitoring by the PostgreSQL Operator Monitoring stack. This includes the -following information: - -- The name of the PostgreSQL cluster and the namespace that it is in -- The type of PostgreSQL cluster (HA [high availability] or standalone) -- The status of the cluster, as indicate by color. Green indicates the cluster -is available, red indicates that it is not. - -Each entry is clickable to provide additional cluster details. - -### PostgreSQL Details - -![PostgreSQL Operator Monitoring - Cluster Cluster Details](/images/postgresql-monitoring.png) - -The PostgreSQL Details view provides more information about a specific -PostgreSQL cluster that is being managed and monitored by the PostgreSQL -Operator. These include many key PostgreSQL-specific metrics that help make -decisions around managing a PostgreSQL cluster. These include: - -- Backup Status: The last time a backup was taken of the cluster. Green is good. -Orange means that a backup has not been taken in more than a day and may warrant -investigation. -- Active Connections: How many clients are connected to the database. Too many -clients connected could impact performance and, for values approaching 100%, can -lead to clients being unable to connect. -- Idle in Transaction: How many clients have a connection state of "idle in -transaction". Too many clients in this state can cause performance issues and, -in certain cases, maintenance issues. -- Idle: How many clients are connected but are in an "idle" state. -- TPS: The number of "transactions per second" that are occurring. Usually needs -to be combined with another metric to help with analysis. "Higher is better" -when performing benchmarking. -- Connections: An aggregated view of active, idle, and idle in transaction -connections. -- Database Size: How large databases are within a PostgreSQL cluster. Typically -combined with another metric for analysis. Helps keep track of overall disk -usage and if any triage steps need to occur around PVC size. -- WAL Size: How much space write-ahead logs (WAL) are taking up on disk. This -can contribute to extra space being used on your data disk, or can give you an -indication of how much space is being utilized on a separate WAL PVC. If you -are using replication slots, this can help indicate if a slot is not being -acknowledged if the numbers are much larger than the `max_wal_size` setting (the -PostgreSQL Operator does not use slots by default). -- Row Activity: The number of rows that are selected, inserted, updated, and -deleted. This can help you determine what percentage of your workload is read -vs. write, and help make database tuning decisions based on that, in conjunction -with other metrics. -- Replication Status: Provides guidance information on how much replication lag -there is between primary and replica PostgreSQL instances, both in bytes and -time. This can provide an indication of how much data could be lost in the event -of a failover. - -![PostgreSQL Operator Monitoring - Cluster Cluster Details 2](/images/postgresql-monitoring-cluster.png) - -- Conflicts / Deadlocks: These occur when PostgreSQL is unable to complete -operations, which can result in transaction loss. The goal is for these numbers -to be `0`. If these are occurring, check your data access and writing patterns. -- Cache Hit Ratio: A measure of how much of the "working data", e.g. data that -is being accessed and manipulated, resides in memory. This is used to understand -how much PostgreSQL is having to utilize the disk. The target number of this -should be as high as possible. How to achieve this is the subject of books, but -certain takes efforts on your applications use PostgreSQL. -- Buffers: The buffer usage of various parts of the PostgreSQL system. This can -be used to help understand the overall throughput between various parts of the -system. -- Commit & Rollback: How many transactions are committed and rolled back. -- Locks: The number of locks that are present on a given system. - -### Pod Details - -![PostgreSQL Operator Monitoring - Pod Details](/images/postgresql-monitoring-pod.png) - -Pod details provide information about a given Pod or Pods that are being used -by a PostgreSQL cluster. These are similar to "operating system" or "node" -metrics, with the differences that these are looking at resource utilization by -a container, not the entire node. - -It may be helpful to view these metrics on a "pod" basis, by using the Pod -filter at the top of the dashboard. - -- Disk Usage: How much space is being consumed by a volume. -- Disk Activity: How many reads and writes are occurring on a volume. -- Memory: Various information about memory utilization, including the request -and limit as well as actually utilization. -- CPU: The amount of CPU being utilized by a Pod -- Network Traffic: The amount of networking traffic passing through each network -device. -- Container ResourceS: The CPU and memory limits and requests. - -### Backups - -![PostgreSQL Operator - Monitoring - Backup Health](/images/postgresql-monitoring-backups.png) - -There are a variety of reasons why you need to monitoring your backups, starting -from answering the fundamental question of "do I have backups available?" -Backups can be used for a variety of situations, from cloning new clusters to -restoring clusters after a disaster. Additionally, Postgres can run into issues -if your backup repository is not healthy, e.g. if it cannot push WAL archives. -If your backups are set up properly and healthy, you will be set up to mitigate -the risk of data loss! - -The backup, or pgBackRest panel, will provide information about the overall -state of your backups. This includes: - -- Recovery Window: This is an indicator of how far back you are able to restore -your data from. This represents all of the backups and archives available in -your backup repository. Typically, your recovery window should be close to your -overall data retention specifications. -- Time Since Last Backup: this indicates how long it has been since your last -backup. This is broken down into pgBackRest backup type (full, incremental, -differential) as well as time since the last WAL archive was pushed. -- Backup Runtimes: How long the last backup of a given type (full, incremental -differential) took to execute. If your backups are slow, consider providing more -resources to the backup jobs and tweaking pgBackRest's performance tuning -settings. -- Backup Size: How large the backups of a given type (full, incremental, -differential). -- WAL Stats: Shows the metrics around WAL archive pushes. If you have failing -pushes, you should to see if there is a transient or permanent error that is -preventing WAL archives from being pushed. If left untreated, this could end up -causing issues for your Postgres cluster. - -### PostgreSQL Service Health Overview - -![PostgreSQL Operator Monitoring - Service Health Overview](/images/postgresql-monitoring-service.png) - -The Service Health Overview provides information about the Kubernetes Services -that sit in front of the PostgreSQL Pods. This provides information about the -status of the network. - -- Saturation: How much of the available network to the Service is being -consumed. High saturation may cause degraded performance to clients or create -an inability to connect to the PostgreSQL cluster. -- Traffic: Displays the number of transactions per minute that the Service is -handling. -- Errors: Displays the total number of errors occurring at a particular Service. -- Latency: What the overall network latency is when interfacing with the -Service. - -### Query Runtime - -![PostgreSQL Operator Monitoring - Query Performance](/images/postgresql-monitoring-query-total.png) - -Looking at the overall performance of queries can help optimize a Postgres -deployment, both from [providing resources]({{< relref "tutorial/customize-cluster.md" >}}) to query tuning in the application -itself. - -You can get a sense of the overall activity of a PostgreSQL cluster from the -chart that is visualized above: - -- Queries Executed: The total number of queries executed on a system during the -period. -- Query runtime: The aggregate runtime of all the queries combined across the -system that were executed in the period. -- Query mean runtime: The average query time across all queries executed on the -system in the given period. -- Rows retrieved or affected: The total number of rows in a database that were -either retrieved or had modifications made to them. - -PostgreSQL Operator Monitoring also further breaks down the queries so you can -identify queries that are being executed too frequently or are taking up too -much time. - -![PostgreSQL Operator Monitoring - Query Analysis](/images/postgresql-monitoring-query-topn.png) - -- Query Mean Runtime (Top N): This highlights the N number of slowest queries by -average runtime on the system. This might indicate you are missing an index -somewhere, or perhaps the query could be rewritten to be more efficient. -- Query Max Runtime (Top N): This highlights the N number of slowest queries by -absolute runtime. This could indicate that a specific query or the system as a -whole may need more resources. -- Query Total Runtime (Top N): This highlights the N of slowest queries by -aggregate runtime. This could indicate that a ORM is looping over a single query -and executing it many times that could possibly be rewritten as a single, faster -query. - -### Alerts - -![PostgreSQL Operator Monitoring - Alerts](/images/postgresql-monitoring-alerts.png) - -Alerting lets one view and receive alerts about actions that require -intervention, for example, a HA cluster that cannot self-heal. The alerting -system is powered by [Alertmanager](https://github.com/prometheus/alertmanager). - -The alerts that come installed by default include: - -- `PGExporterScrapeError`: The Crunchy PostgreSQL Exporter is having issues -scraping statistics used as part of the monitoring stack. -- `PGIsUp`: A PostgreSQL instance is down. -- `PGIdleTxn`: There are too many connections that are in the -"idle in transaction" state. -- `PGQueryTime`: A single PostgreSQL query is taking too long to run. Issues a -warning at 12 hours and goes critical after 24. -- `PGConnPerc`: Indicates that there are too many connection slots being used. -Issues a warning at 75% and goes critical above 90%. -- `PGDiskSize`: Indicates that a PostgreSQL database is too large and could be in -danger of running out of disk space. Issues a warning at 75% and goes critical -at 90%. -- `PGReplicationByteLag`: Indicates that a replica is too far behind a primary -instance, which could risk data loss in a failover scenario. Issues a warning at -50MB an goes critical at 100MB. -- `PGReplicationSlotsInactive`: Indicates that a replication slot is inactive. -Not attending to this can lead to out-of-disk errors. -- `PGXIDWraparound`: Indicates that a PostgreSQL instance is nearing transaction -ID wraparound. Issues a warning at 50% and goes critical at 75%. It's important -that you [vacuum your database](https://info.crunchydata.com/blog/managing-transaction-id-wraparound-in-postgresql) -to prevent this. -- `PGEmergencyVacuum`: Indicates that autovacuum is not running or cannot keep -up with ongoing changes, i.e. it's past its "freeze" age. Issues a warning at -110% and goes critical at 125%. -- `PGArchiveCommandStatus`: Indicates that the archive command, which is used -to ship WAL archives to pgBackRest, is failing. -- `PGSequenceExhaustion`: Indicates that a sequence is over 75% used. -- `PGSettingsPendingRestart`: Indicates that there are settings changed on a -PostgreSQL instance that requires a restart. - -Optional alerts that can be enabled: - -- `PGMinimumVersion`: Indicates if PostgreSQL is below a desired version. -- `PGRecoveryStatusSwitch_Replica`: Indicates that a replica has been promoted -to a primary. -- `PGConnectionAbsent_Prod`: Indicates that metrics collection is absent from a -PostgresQL instance. -- `PGSettingsChecksum`: Indicates that PostgreSQL settings have changed from a -previous state. -- `PGDataChecksum`: Indicates that there are data checksum failures on a -PostgreSQL instance. This could be a sign of data corruption. - -You can modify these alerts as you see fit, and add your own alerts as well! -Please see the [installation instructions]({{< relref "installation/monitoring/_index.md" >}}) -for general setup of the PostgreSQL Operator Monitoring stack. diff --git a/docs/content/architecture/overview.md b/docs/content/architecture/overview.md deleted file mode 100644 index 9ec1fb4c3b..0000000000 --- a/docs/content/architecture/overview.md +++ /dev/null @@ -1,101 +0,0 @@ ---- -title: "Overview" -date: -draft: false -weight: 100 ---- - -The goal of PGO, the Postgres Operator from Crunchy Data is to provide a means to quickly get -your applications up and running on Postgres for both development and -production environments. To understand how PGO does this, we -want to give you a tour of its architecture, with explains both the architecture -of the PostgreSQL Operator itself as well as recommended deployment models for -PostgreSQL in production! - -# PGO Architecture - -The Crunchy PostgreSQL Operator extends Kubernetes to provide a higher-level -abstraction for rapid creation and management of PostgreSQL clusters. The -Crunchy PostgreSQL Operator leverages a Kubernetes concept referred to as -"[Custom Resources](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/)” -to create several -[custom resource definitions (CRDs)](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/#customresourcedefinitions) -that allow for the management of PostgreSQL clusters. - -The main custom resource definition is [`postgresclusters.postgres-operator.crunchydata.com`]({{< relref "references/crd.md" >}}). This allows you to control all the information about a Postgres cluster, including: - -- General information -- Resource allocation -- High availability -- Backup management -- Where and how it is deployed (affinity, tolerations) -- Disaster Recovery / standby clusters -- Monitoring - -and more. - -PGO itself runs as a Deployment and is composed of a single container. - -- `operator` (image: postgres-operator) - This is the heart of the PostgreSQL -Operator. It contains a series of Kubernetes -[controllers](https://kubernetes.io/docs/concepts/architecture/controller/) that -place watch events on a series of native Kubernetes resources (Jobs, Pods) as -well as the Custom Resources that come with the PostgreSQL Operator (Pgcluster, -Pgtask) - -The main purpose of PGO is to create and update information -around the structure of a Postgres Cluster, and to relay information about the -overall status and health of a PostgreSQL cluster. The goal is to also simplify -this process as much as possible for users. For example, let's say we want to -create a high-availability PostgreSQL cluster that has a single replica, -supports having backups in both a local storage area and Amazon S3 and has -built-in metrics and connection pooling, similar to: - -![PostgreSQL HA Cluster](/images/postgresql-cluster-ha-s3.png) - -This can be accomplished with a relatively simple manifest. Please refer to the [tutorial]({{< relref "tutorial/_index.md" >}}) for how to accomplish this, or see the [Postgres Operator examples](https://github.com/CrunchyData/postgres-operator-examples/fork) repo. - -The Postgres Operator handles setting up all of the various StatefulSets, Deployments, Services and other Kubernetes objects. - -You will also notice that **high-availability is enabled by default** if you deploy at least one Postgres replica. The -Crunchy PostgreSQL Operator uses a distributed-consensus method for PostgreSQL -cluster high-availability, and as such delegates the management of each -cluster's availability to the clusters themselves. This removes the PostgreSQL -Operator from being a single-point-of-failure, and has benefits such as faster -recovery times for each PostgreSQL cluster. For a detailed discussion on -high-availability, please see the [High-Availability]({{< relref "architecture/high-availability.md" >}}) -section. - -## Kubernetes StatefulSets: The PGO Deployment Model - -PGO, the Postgres Operator from Crunchy Data, uses [Kubernetes StatefulSets](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) -for running Postgres instances, and will use [Deployments](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) for more ephemeral services. - -PGO deploys Kubernetes Statefulsets in a way to allow for creating both different Postgres instance groups and be able to support advanced operations such as rolling updates that minimize or eliminate Postgres downtime. Additional components in our -PostgreSQL cluster, such as the pgBackRest repository or an optional pgBouncer, -are deployed with Kubernetes Deployments. - -With the PGO architecture, we can also leverage Statefulsets to apply affinity and toleration rules across every Postgres instance or individual ones. For instance, we may want to force one or more of our PostgreSQL replicas to run on Nodes in a different region than -our primary PostgreSQL instances. - -What's great about this is that PGO manages this for you so you don't have to worry! Being aware of -this model can help you understand how the Postgres Operator gives you maximum -flexibility for your PostgreSQL clusters while giving you the tools to -troubleshoot issues in production. - -The last piece of this model is the use of [Kubernetes Services](https://kubernetes.io/docs/concepts/services-networking/service/) -for accessing your PostgreSQL clusters and their various components. The -PostgreSQL Operator puts services in front of each Deployment to ensure you have -a known, consistent means of accessing your PostgreSQL components. - -Note that in some production environments, there can be delays in accessing -Services during transition events. The PostgreSQL Operator attempts to mitigate -delays during critical operations (e.g. failover, restore, etc.) by directly -accessing the Kubernetes Pods to perform given actions. - -# Additional Architecture Information - -There is certainly a lot to unpack in the overall architecture of PGO. Understanding the architecture will help you to plan -the deployment model that is best for your environment. For more information on -the architectures of various components of the PostgreSQL Operator, please read -onward! diff --git a/docs/content/architecture/user-management.md b/docs/content/architecture/user-management.md deleted file mode 100644 index ad7829241b..0000000000 --- a/docs/content/architecture/user-management.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: "User Management" -date: -draft: false -weight: 125 ---- - -PGO manages PostgreSQL users that you define in [`PostgresCluster.spec.users`]({{< relref "/references/crd#postgresclusterspecusersindex" >}}). -There, you can list their [role attributes](https://www.postgresql.org/docs/current/role-attributes.html) and which databases they can access. - -Below is some information on how the user and database management systems work. To try out some examples, please see the [user and database management]({{< relref "tutorial/user-management.md" >}}) section of the [tutorial]({{< relref "tutorial/_index.md" >}}). - -## Understanding Default User Management - -When you create a Postgres cluster with PGO and do not specify any additional users or databases, PGO will do the following: - -- Create a database that matches the name of the Postgres cluster. -- Create an unprivileged Postgres user with the name of the cluster. This user has access to the database created in the previous step. -- Create a Secret with the login credentials and connection details for the Postgres user in relation to the database. This is stored in a Secret named `-pguser-`. These credentials include: - - `user`: The name of the user account. - - `password`: The password for the user account. - - `dbname`: The name of the database that the user has access to by default. - - `host`: The name of the host of the database. This references the [Service](https://kubernetes.io/docs/concepts/services-networking/service/) of the primary Postgres instance. - - `port`: The port that the database is listening on. - - `uri`: A [PostgreSQL connection URI](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING) that provides all the information for logging into the Postgres database. - -You can see this default behavior in the [connect to a cluster]({{< relref "tutorial/connect-cluster.md" >}}) portion of the tutorial. - -As an example, using our `hippo` Postgres cluster, we would see the following created: - -- A database named `hippo`. -- A Postgres user named `hippo`. -- A Secret named `hippo-pguser-hippo` that contains the user credentials and connection information. - -While the above defaults may work for your application, there are certain cases where you may need to customize your user and databases: - -- You may require access to the `postgres` superuser. -- You may need to define privileges for your users. -- You may need multiple databases in your cluster, e.g. in a multi-tenant application. -- Certain users may only be able to access certain databases. - -## Custom Users and Databases - -Users and databases can be customized in the `spec.users` section of the custom resource. These can be adding during cluster creation and adjusted over time, but it's important to note the following: - -- If `spec.users` is set during cluster creation, PGO will **not** create any default users or databases except for `postgres`. If you want additional databases, you will need to specify them. -- For any users added in `spec.users`, PGO will created a Secret of the format `-pguser-`. This will contain the user credentials. - - If no databases are specified, `dbname` and `uri` will not be present in the Secret. - - If at least one `spec.users.databases` is specified, the first database in the list will be populated into the connection credentials. -- To prevent accidental data loss, PGO will not automatically drop users. We will see how to drop a user below. -- Similarly, to prevent accidental data loss PGO will not automatically drop databases. We will see how to drop a database below. -- Role attributes are not automatically dropped if you remove them. You will have to set the inverse attribute to drop them (e.g. `NOSUPERUSER`). -- The special `postgres` user can be added as one of the custom users; however, the privileges of the users cannot be adjusted. - -For specific examples for how to manage users, please see the [user and database management]({{< relref "tutorial/user-management.md" >}}) section of the [tutorial]({{< relref "tutorial/_index.md" >}}). diff --git a/docs/content/faq/_index.md b/docs/content/faq/_index.md deleted file mode 100644 index 910e217a68..0000000000 --- a/docs/content/faq/_index.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: "FAQ" -date: -draft: false -weight: 105 ---- - -## Project FAQ - -### What is The PGO Project? - -The PGO Project is the open source project associated with the development of [PGO](https://github.com/CrunchyData/postgres-operator), the [Postgres Operator](https://github.com/CrunchyData/postgres-operator) for Kubernetes from [Crunchy Data](https://www.crunchydata.com). - -PGO is a [Kubernetes Operator](https://kubernetes.io/docs/concepts/extend-kubernetes/operator/), providing a declarative solution for managing your PostgreSQL clusters. Within a few moments, you can have a Postgres cluster complete with high availability, disaster recovery, and monitoring, all over secure TLS communications. - -PGO is the upstream project from which [Crunchy PostgreSQL for Kubernetes](https://www.crunchydata.com/products/crunchy-postgresql-for-kubernetes/) is derived. You can find more information on Crunchy PostgreSQL for Kubernetes [here](https://www.crunchydata.com/products/crunchy-postgresql-for-kubernetes/). - -### What’s the difference between PGO and Crunchy PostgreSQL for Kubernetes? - -PGO is the Postgres Operator from Crunchy Data. It developed pursuant to the PGO Project and is designed to be a frequently released, fast-moving project where all new development happens. - -[Crunchy PostgreSQL for Kubernetes](https://www.crunchydata.com/products/crunchy-postgresql-for-kubernetes/) is produced by taking selected releases of PGO, combining them with Crunchy Certified PostgreSQL and PostgreSQL containers certified by Crunchy Data, maintained for commercial support, and made available to customers as the Crunchy PostgreSQL for Kubernetes offering. - -### Where can I find support for PGO? - -The community can help answer questions about PGO via the [PGO mailing list](https://groups.google.com/a/crunchydata.com/forum/#!forum/postgres-operator/join). - -Information regarding support for PGO is available in the [Support]({{< relref "support/_index.md" >}}) section of the PGO documentation, which you can find [here]({{< relref "support/_index.md" >}}). - -For additional information regarding commercial support and Crunchy PostgreSQL for Kubernetes, you can [contact Crunchy Data](https://www.crunchydata.com/contact/). - -### Under which open source license is PGO source code available? - -The PGO source code is available under the [Apache License 2.0](https://github.com/CrunchyData/postgres-operator/blob/master/LICENSE.md). - -### How can I get involved with the PGO Project? - -PGO is developed by the PGO Project. The PGO Project that welcomes community engagement and contribution. - -The PGO source code and community issue trackers are hosted at [GitHub](https://github.com/CrunchyData/postgres-operator). - -For community questions and support, please sign up for the [PGO mailing list](https://groups.google.com/a/crunchydata.com/forum/#!forum/postgres-operator/join). - -For information regarding contribution, please review the contributor guide [here](https://github.com/CrunchyData/postgres-operator/blob/master/CONTRIBUTING.md). - -Please register for the [Crunchy Data Developer Portal mailing list](https://www.crunchydata.com/developers/newsletter) to receive updates regarding Crunchy PostgreSQL for Kubernetes releases and the [Crunchy Data newsletter](https://www.crunchydata.com/newsletter/) for general updates from Crunchy Data. - -### Where do I report a PGO bug? - -The PGO Project uses GitHub for its [issue tracking](https://github.com/CrunchyData/postgres-operator/issues/new/choose). You can file your issue [here](https://github.com/CrunchyData/postgres-operator/issues/new/choose). - -### How often is PGO released? - -The PGO team currently plans to release new builds approximately every few weeks. The PGO team will flag certain builds as “stable” at their discretion. Note that the term “stable” does not imply fitness for production usage or any kind of warranty whatsoever. diff --git a/docs/content/installation/_index.md b/docs/content/installation/_index.md deleted file mode 100644 index add5679273..0000000000 --- a/docs/content/installation/_index.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: "Installation" -date: -draft: false -weight: 30 ---- - -This section provides detailed instructions for anything and everything related to installing PGO -in your Kubernetes environment. This includes instructions for installing PGO according to a -variety of supported installation methods, along with information for customizing the installation -of PGO according your specific needs. - -Additionally, instructions are provided for installing and configuring [PGO Monitoring]({{< relref "./monitoring" >}}). - -## Installing PGO - -- [PGO Kustomize Install]({{< relref "./kustomize.md" >}}) -- [PGO Helm Install]({{< relref "./helm.md" >}}) - -## Installing PGO Monitoring - -- [PGO Monitoring Kustomize Install]({{< relref "./monitoring/kustomize.md" >}}) diff --git a/docs/content/installation/helm.md b/docs/content/installation/helm.md deleted file mode 100644 index 0660ef6985..0000000000 --- a/docs/content/installation/helm.md +++ /dev/null @@ -1,89 +0,0 @@ ---- -title: "Helm" -date: -draft: false -weight: 20 ---- - -# Installing PGO Using Helm - -This section provides instructions for installing and configuring PGO using Helm. - -## Prerequisites - -First, go to GitHub and [fork the Postgres Operator examples](https://github.com/CrunchyData/postgres-operator-examples/fork) -repository, which contains the PGO Helm installer. - -[https://github.com/CrunchyData/postgres-operator-examples/fork](https://github.com/CrunchyData/postgres-operator-examples/fork) - -Once you have forked this repo, you can download it to your working environment with a command -similar to this: - -``` -YOUR_GITHUB_UN="" -git clone --depth 1 "git@github.com:${YOUR_GITHUB_UN}/postgres-operator-examples.git" -cd postgres-operator-examples -``` - -The PGO Helm chart is located in the `helm/install` directory of this repository. - -## Configuration - -The `values.yaml` file for the Helm chart contains all of the available configuration settings for -PGO. The default `values.yaml` settings should work in most Kubernetes environments, but it may -require some customization depending on your specific environment and needs. - -For instance, it might be necessary to customize the image tags that are utilized using the -`image` setting: - -```yaml -image: - repository: registry.developers.crunchydata.com/crunchydata - tag: "ubi8-5.0.2-0" -``` - -Please note that the `values.yaml` file is located in `helm/install`. - -### Installation Mode - -When PGO is installed, it can be configured to manage PostgreSQL clusters in all namespaces within -the Kubernetes cluster, or just those within a single namespace. When managing PostgreSQL -clusters in all namespaces, a ClusterRole and ClusterRoleBinding is created to ensure PGO has -the permissions it requires to properly manage PostgreSQL clusters across all namespaces. However, -when PGO is configured to manage PostgreSQL clusters within a single namespace only, a Role and -RoleBinding is created instead. - -In order to select between these two modes when installing PGO using Helm, the `singleNamespace` -setting in the `values.yaml` file can be utilized: - -```yaml -singleNamespace: false -``` - -Specifically, if this setting is set to `false` (which is the default), then a ClusterRole and -ClusterRoleBinding will be created, and PGO will manage PostgreSQL clusters in all namespaces. -However, if this setting is set to `true`, then a Role and RoleBinding will be created instead, -allowing PGO to only manage PostgreSQL clusters in the same namespace utilized when installing -the PGO Helm chart. - -## Install - -Once you have configured the Helm chart according to your specific needs, it can then be installed -using `helm`: - -```shell -helm install -n helm/install -``` - -## Upgrade and Uninstall - -And once PGO has been installed, it can then be upgraded and uninstalled using applicable `helm` -commands: - -```shell -helm upgrade -n helm/install -``` - -```shell -helm uninstall -n -``` diff --git a/docs/content/installation/kustomize.md b/docs/content/installation/kustomize.md deleted file mode 100644 index 99ac381be1..0000000000 --- a/docs/content/installation/kustomize.md +++ /dev/null @@ -1,138 +0,0 @@ ---- -title: "Kustomize" -date: -draft: false -weight: 10 ---- - -# Installing PGO Using Kustomize - -This section provides instructions for installing and configuring PGO using Kustomize. - -## Prerequisites - -First, go to GitHub and [fork the Postgres Operator examples](https://github.com/CrunchyData/postgres-operator-examples/fork) -repository, which contains the PGO Kustomize installer. - -[https://github.com/CrunchyData/postgres-operator-examples/fork](https://github.com/CrunchyData/postgres-operator-examples/fork) - -Once you have forked this repo, you can download it to your working environment with a command -similar to this: - -``` -YOUR_GITHUB_UN="" -git clone --depth 1 "git@github.com:${YOUR_GITHUB_UN}/postgres-operator-examples.git" -cd postgres-operator-examples -``` - -The PGO installation project is located in the `kustomize/install` directory. - -## Configuration - -While the default Kustomize install should work in most Kubernetes environments, it may be -necessary to further customize the Kustomize project(s) according to your specific needs. - -For instance, to customize the image tags utilized for the PGO Deployment, the `images` setting -in the `kustomize/install/bases/kustomization.yaml` file can be modified: - -```yaml -images: -- name: postgres-operator - newName: registry.developers.crunchydata.com/crunchydata - newTag: ubi8-5.0.2-0 -``` - -Additionally, please note that the Kustomize install project will also create a namespace for PGO -by default (though it is possible to install without creating the namespace, as shown below). To -modify the name of namespace created by the installer, the `kustomize/install/namespace.yaml` -should be modified: - -```yaml -apiVersion: v1 -kind: Namespace -metadata: - name: custom-namespace -``` - -Additionally, the `namespace` setting in `kustomize/install/bases/kustomization.yaml` should be -modified accordingly. - -```yaml -namespace: custom-namespace -``` - -Additional Kustomize overlays can then also be created to further patch and customize the -installation according to your specific needs. - -### Installation Mode - -When PGO is installed, it can be configured to manage PostgreSQL clusters in all namespaces within -the Kubernetes cluster, or just those within a single namespace. When managing PostgreSQL -clusters in all namespaces, a ClusterRole and ClusterRoleBinding is created to ensure PGO has -the permissions it requires to properly manage PostgreSQL clusters across all namespaces. However, -when PGO is configured to manage PostgreSQL clusters within a single namespace only, a Role and -RoleBinding is created instead. - -By default, the Kustomize installer will configure PGO to manage PostgreSQL clusters in all -namespaces, which means a ClusterRole and ClusterRoleBinding will also be created by default. -To instead configure PGO to manage PostgreSQL clusters in only a single namespace, simply modify -the `bases` section of the `kustomize/install/bases/kustomization.yaml` file as follows: - -```yaml -bases: -- crd -- rbac/namespace -- manager -``` - -Note that `rbac/cluster` has been changed to `rbac/namespace`. - -Add the PGO_TARGET_NAMESPACE environment variable to the env section of the `kustomize/install/bases/manager/manager.yaml` file to facilitate the ability to specify the single namespace as follows: - -```yaml - env: - - name: PGO_TARGET_NAMESPACE - valueFrom: { fieldRef: { apiVersion: v1, fieldPath: metadata.namespace } } -``` - -With these configuration changes, PGO will create a Role and RoleBinding, and will therefore only manage PostgreSQL clusters created within the namespace defined using the `namespace` setting in the -`kustomize/install/bases/kustomization.yaml` file: - -```yaml -namespace: postgres-operator -``` - -## Install - -Once the Kustomize project has been modified according to your specific needs, PGO can then -be installed using `kubectl` and Kustomize. To create both the target namespace for PGO and -then install PGO itself, the following command can be utilized: - -```shell -kubectl apply -k kustomize/install -``` - -However, if the namespace has already been created, the following command can be utilized to -install PGO only: - -```shell -kubectl apply -k kustomize/install/bases -``` - -## Uninstall - -Once PGO has been installed, it can also be uninstalled using `kubectl` and Kustomize. -To uninstall PGO and then also delete the namespace it had been deployed into (assuming the -namespace was previously created using the Kustomize installer as described above), the -following command can be utilized: - -```shell -kubectl delete -k kustomize/install -``` - -To uninstall PGO only (e.g. if Kustomize was not initially utilized to create the PGO namespace), -the following command can be utilized: - -```shell -kubectl delete -k kustomize/install/bases -``` diff --git a/docs/content/installation/monitoring/_index.md b/docs/content/installation/monitoring/_index.md deleted file mode 100644 index f73b4689f1..0000000000 --- a/docs/content/installation/monitoring/_index.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: "PGO Monitoring" -date: -draft: false -weight: 100 ---- - -The PGO Monitoring stack is a fully integrated solution for monitoring and visualizing metrics -captured from PostgreSQL clusters created using PGO. By leveraging [pgMonitor][] to configure -and integrate the various tools, components and metrics needed to effectively monitor PostgreSQL -clusters, PGO Monitoring provides an powerful and easy-to-use solution to effectively monitor -and visualize pertinent PostgreSQL database and container metrics. Included in the monitoring -infrastructure are the following components: - -- [pgMonitor][] - Provides the configuration needed to enable the effective capture and -visualization of PostgreSQL database metrics using the various tools comprising the PostgreSQL -Operator Monitoring infrastructure -- [Grafana](https://grafana.com/) - Enables visual dashboard capabilities for monitoring -PostgreSQL clusters, specifically using Crunchy PostgreSQL Exporter data stored within Prometheus -- [Prometheus](https://prometheus.io/) - A multi-dimensional data model with time series data, -which is used in collaboration with the Crunchy PostgreSQL Exporter to provide and store -metrics -- [Alertmanager](https://prometheus.io/docs/alerting/latest/alertmanager/) - Handles alerts -sent by Prometheus by deduplicating, grouping, and routing them to receiver integrations. - -By leveraging the installation method described in this section, PGO Monitoring can be deployed -alongside PGO. - - - -[pgMonitor]: https://github.com/CrunchyData/pgmonitor diff --git a/docs/content/installation/monitoring/kustomize.md b/docs/content/installation/monitoring/kustomize.md deleted file mode 100644 index 89284a912d..0000000000 --- a/docs/content/installation/monitoring/kustomize.md +++ /dev/null @@ -1,98 +0,0 @@ ---- -title: "Kustomize" -date: -draft: false -weight: 10 ---- - -# Installing PGO Monitoring Using Kustomize - -This section provides instructions for installing and configuring PGO Monitoring using Kustomize. - -## Prerequisites - -First, go to GitHub and [fork the Postgres Operator examples](https://github.com/CrunchyData/postgres-operator-examples/fork) -repository, which contains the PGO Monitoring Kustomize installer. - -[https://github.com/CrunchyData/postgres-operator-examples/fork](https://github.com/CrunchyData/postgres-operator-examples/fork) - -Once you have forked this repo, you can download it to your working environment with a command -similar to this: - -``` -YOUR_GITHUB_UN="" -git clone --depth 1 "git@github.com:${YOUR_GITHUB_UN}/postgres-operator-examples.git" -cd postgres-operator-examples -``` - -The PGO Monitoring project is located in the `kustomize/monitoring` directory. - -## Configuration - -While the default Kustomize install should work in most Kubernetes environments, it may be -necessary to further customize the project according to your specific needs. - -For instance, by default `fsGroup` is set to `26` for the `securityContext` defined for the -various Deployments comprising the PGO Monitoring stack: - -```yaml -securityContext: - fsGroup: 26 -``` - -In most Kubernetes environments this setting is needed to ensure processes within the container -have the permissions needed to write to any volumes mounted to each of the Pods comprising the PGO -Monitoring stack. However, when installing in an OpenShift environment (and more specifically when -using the `restricted` Security Context Constraint), the `fsGroup` setting should be removed -since OpenShift will automatically handle setting the proper `fsGroup` within the Pod's -`securityContext`. - -Additionally, within this same section it may also be necessary to modify the `supplmentalGroups` -setting according to your specific storage configuration: - -```yaml -securityContext: - supplementalGroups : 65534 -``` - -Therefore, the following files (located under `kustomize/monitoring`) should be modified and/or -patched (e.g. using additional overlays) as needed to ensure the `securityContext` is properly -defined for your Kubernetes environment: - -- `deploy-alertmanager.yaml` -- `deploy-grafana.yaml` -- `deploy-prometheus.yaml` - -And to modify the configuration for the various storage resources (i.e. PersistentVolumeClaims) -created by the PGO Monitoring installer, the `kustomize/monitoring/pvcs.yaml` file can also -be modified. - -Additionally, it is also possible to further customize the configuration for the various components -comprising the PGO Monitoring stack (Grafana, Prometheus and/or AlertManager) by modifying the -following configuration resources: - -- `alertmanager-config.yaml` -- `alertmanager-rules-config.yaml` -- `grafana-datasources.yaml` -- `prometheus-config.yaml` - -Finally, please note that the default username and password for Grafana can be updated by -modifying the Grafana Secret in file `kustomize/monitoring/grafana-secret.yaml`. - -## Install - -Once the Kustomize project has been modified according to your specific needs, PGO Monitoring can -then be installed using `kubectl` and Kustomize: - -```shell -kubectl apply -k kustomize/monitoring -``` - -## Uninstall - -And similarly, once PGO Monitoring has been installed, it can uninstalled using `kubectl` and -Kustomize: - -```shell -kubectl delete -k kustomize/monitoring -``` diff --git a/docs/content/installation/upgrade.md b/docs/content/installation/upgrade.md deleted file mode 100644 index ce0e23d812..0000000000 --- a/docs/content/installation/upgrade.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: "Upgrade" -date: -draft: false -weight: 50 ---- - -# Overview - -Upgrading to a new version of PGO is typically as simple as following the various installation -guides defined within the PGO documentation: - -- [PGO Kustomize Install]({{< relref "./kustomize.md" >}}) -- [PGO Helm Install]({{< relref "./helm.md" >}}) - -However, when upgrading to or from certain versions of PGO, extra steps may be required in order -to ensure a clean and successful upgrade. This page will therefore document any additional -steps that must be completed when upgrading PGO. - -## Upgrading from PGO 5.0.0 Using Kustomize - -Starting with PGO 5.0.1, both the Deployment and ServiceAccount created when installing PGO via -the installers in the -[Postgres Operator examples repository](https://github.com/CrunchyData/postgres-operator-examples) -have been renamed from `postgres-operator` to `pgo`. As a result of this change, if using -Kustomize to install PGO and upgrading from PGO 5.0.0, the following step must be completed prior -to upgrading. This will ensure multiple versions of PGO are not installed and running concurrently -within your Kubernetes environment. - -Prior to upgrading PGO, first manually delete the PGO 5.0.0 `postgres-operator` Deployment and -ServiceAccount: - -```bash -kubectl -n postgres-operator delete deployment,serviceaccount postgres-operator -``` - -Then, once both the Deployment and ServiceAccount have been deleted, proceed with upgrading PGO -by applying the new version of the Kustomize installer: - -```bash -kubectl apply -k kustomize/install/bases -``` diff --git a/docs/content/quickstart/_index.md b/docs/content/quickstart/_index.md deleted file mode 100644 index 729c0bbfe6..0000000000 --- a/docs/content/quickstart/_index.md +++ /dev/null @@ -1,197 +0,0 @@ ---- -title: "Quickstart" -date: -draft: false -weight: 10 ---- - -Can't wait to try out the [PGO](https://github.com/CrunchyData/postgres-operator), the [Postgres Operator](https://github.com/CrunchyData/postgres-operator) from [Crunchy Data](https://www.crunchydata.com)? Let us show you the quickest possible path to getting up and running. - -## Prerequisites - -Please be sure you have the following utilities installed on your host machine: - -- `kubectl` -- `git` - -## Installation - -### Step 1: Download the Examples - -First, go to GitHub and [fork the Postgres Operator examples](https://github.com/CrunchyData/postgres-operator-examples/fork) repository: - -[https://github.com/CrunchyData/postgres-operator-examples/fork](https://github.com/CrunchyData/postgres-operator-examples/fork) - -Once you have forked this repo, you can download it to your working environment with a command similar to this: - -``` -YOUR_GITHUB_UN="" -git clone --depth 1 "git@github.com:${YOUR_GITHUB_UN}/postgres-operator-examples.git" -cd postgres-operator-examples -``` -### Step 2: Install PGO, the Postgres Operator - -You can install PGO, the Postgres Operator from Crunchy Data, using the command below: - -``` -kubectl apply -k kustomize/install -``` - -This will create a namespace called `postgres-operator` and create all of the objects required to deploy PGO. - -To check on the status of your installation, you can run the following command: - -``` -kubectl -n postgres-operator get pods \ - --selector=postgres-operator.crunchydata.com/control-plane=postgres-operator \ - --field-selector=status.phase=Running -``` - -If the PGO Pod is healthy, you should see output similar to: - -``` -NAME READY STATUS RESTARTS AGE -postgres-operator-9dd545d64-t4h8d 1/1 Running 0 3s -``` - -## Create a Postgres Cluster - -Let's create a simple Postgres cluster. You can do this by executing the following command: - -``` -kubectl apply -k kustomize/postgres -``` - -This will create a Postgres cluster named `hippo` in the `postgres-operator` namespace. You can track the progress of your cluster using the following command: - -``` -kubectl -n postgres-operator describe postgresclusters.postgres-operator.crunchydata.com hippo -``` - -## Connect to the Postgres cluster - -As part of creating a Postgres cluster, the Postgres Operator creates a PostgreSQL user account. The credentials for this account are stored in a Secret that has the name `-pguser-`. - -Within this Secret are attributes that provide information to let you log into the PostgreSQL cluster. These include: - -- `user`: The name of the user account. -- `password`: The password for the user account. -- `dbname`: The name of the database that the user has access to by default. -- `host`: The name of the host of the database. This references the [Service](https://kubernetes.io/docs/concepts/services-networking/service/) of the primary Postgres instance. -- `port`: The port that the database is listening on. -- `uri`: A [PostgreSQL connection URI](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING) that provides all the information for logging into the Postgres database. - -If you deploy your Postgres cluster with the [PgBouncer](https://www.pgbouncer.org/) connection pooler, there are additional values that are populated in the user Secret, including: - -- `pgbouncer-host`: The name of the host of the PgBouncer connection pooler. This references the [Service](https://kubernetes.io/docs/concepts/services-networking/service/) of the PgBouncer connection pooler. -- `pgbouncer-port`: The port that the PgBouncer connection pooler is listening on. -- `pgbouncer-uri`: A [PostgreSQL connection URI](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING) that provides all the information for logging into the Postgres database via the PgBouncer connection pooler. - -Note that **all connections use TLS**. PGO sets up a PKI for your Postgres clusters. You can also choose to bring your own PKI / certificate authority; this is covered later in the documentation. - -### Connect via `psql` in the Terminal - -#### Connect Directly - -If you are on the same network as your PostgreSQL cluster, you can connect directly to it using the following command: - -``` -psql $(kubectl -n postgres-operator get secrets hippo-pguser-hippo -o go-template='{{.data.uri | base64decode}}') -``` - -#### Connect Using a Port-Forward - -In a new terminal, create a port forward: - -``` -PG_CLUSTER_PRIMARY_POD=$(kubectl get pod -n postgres-operator -o name \ - -l postgres-operator.crunchydata.com/cluster=hippo,postgres-operator.crunchydata.com/role=master) -kubectl -n postgres-operator port-forward "${PG_CLUSTER_PRIMARY_POD}" 5432:5432 -``` - -Establish a connection to the PostgreSQL cluster. - -``` -PG_CLUSTER_USER_SECRET_NAME=hippo-pguser-hippo - -PGPASSWORD=$(kubectl get secrets -n postgres-operator "${PG_CLUSTER_USER_SECRET_NAME}" -o go-template='{{.data.password | base64decode}}') \ -PGUSER=$(kubectl get secrets -n postgres-operator "${PG_CLUSTER_USER_SECRET_NAME}" -o go-template='{{.data.user | base64decode}}') \ -PGDATABASE=$(kubectl get secrets -n postgres-operator "${PG_CLUSTER_USER_SECRET_NAME}" -o go-template='{{.data.dbname | base64decode}}') \ -psql -h localhost -``` - -### Connect an Application - -The information provided in the user Secret will allow you to connect an application directly to your PostgreSQL database. - -For example, let's connect [Keycloak](https://www.keycloak.org/). Keycloak is a popular open source identity management tool that is backed by a PostgreSQL database. Using the `hippo` cluster we created, we can deploy the following manifest file: - -``` -cat <> keycloak.yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: keycloak - namespace: postgres-operator - labels: - app.kubernetes.io/name: keycloak -spec: - selector: - matchLabels: - app: keycloak - template: - metadata: - labels: - app.kubernetes.io/name: keycloak - spec: - containers: - - image: quay.io/keycloak/keycloak:latest - name: keycloak - env: - - name: DB_VENDOR - value: "postgres" - - name: DB_ADDR - valueFrom: { secretKeyRef: { name: hippo-pguser-hippo, key: host } } - - name: DB_PORT - valueFrom: { secretKeyRef: { name: hippo-pguser-hippo, key: port } } - - name: DB_DATABASE - valueFrom: { secretKeyRef: { name: hippo-pguser-hippo, key: dbname } } - - name: DB_USER - valueFrom: { secretKeyRef: { name: hippo-pguser-hippo, key: user } } - - name: DB_PASSWORD - valueFrom: { secretKeyRef: { name: hippo-pguser-hippo, key: password } } - - name: KEYCLOAK_USER - value: "admin" - - name: KEYCLOAK_PASSWORD - value: "admin" - - name: PROXY_ADDRESS_FORWARDING - value: "true" - ports: - - name: http - containerPort: 8080 - - name: https - containerPort: 8443 - readinessProbe: - httpGet: - path: /auth/realms/master - port: 8080 - restartPolicy: Always - -EOF - -kubectl apply -f keycloak.yaml -``` - -There is a full example for how to deploy Keycloak with the Postgres Operator in the `kustomize/keycloak` folder. - -## Next Steps - -Congratulations, you've got your Postgres cluster up and running, perhaps with an application connected to it! 👏 👏 👏 - -You can find out more about the [`postgresclusters` custom resource definition]({{< relref "references/crd.md" >}}) through the [documentation]({{< relref "references/crd.md" >}}) and through `kubectl explain`, i.e: - -``` -kubectl explain postgresclusters -``` - -Let's work through a tutorial together to better understand the various components of PGO, the Postgres Operator, and how you can fine tune your settings to tailor your Postgres cluster to your application. diff --git a/docs/content/references/_index.md b/docs/content/references/_index.md deleted file mode 100644 index f5b4f37f0b..0000000000 --- a/docs/content/references/_index.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -title: "References" -date: -draft: false -weight: 100 ---- diff --git a/docs/content/references/components.md b/docs/content/references/components.md deleted file mode 100644 index dc266700dd..0000000000 --- a/docs/content/references/components.md +++ /dev/null @@ -1,81 +0,0 @@ ---- -title: "Components and Compatibility" -date: -draft: false -weight: 110 ---- - -## Kubernetes Compatibility - -PGO, the Postgres Operator from Crunchy Data, is tested on the following platforms: - -- Kubernetes 1.18+ -- OpenShift 4.5+ -- Google Kubernetes Engine (GKE), including Anthos -- Amazon EKS -- Microsoft AKS -- VMware Tanzu - -## Components Compatibility - -The following table defines the compatibility between PGO and the various component containers -needed to deploy PostgreSQL clusters using PGO. - -The listed versions of Postgres show the latest minor release (e.g. 13.4) of each major version (e.g. 13). Older minor releases may still be compatible with PGO. We generally recommend to run the latest minor release for the [same reasons that the PostgreSQL community provides](https://www.postgresql.org/support/versioning/). - -| Component | Version | PGO Version Min. | PGO Version Max. | -|-----------|---------|------------------|------------------| -| `crunchy-pgbackrest` | 2.33 | 5.0.0 | 5.0.2 | -| `crunchy-pgbouncer` | 1.15 | 5.0.0 | 5.0.2 | -| `crunchy-postgres-ha` | 13.4 | 5.0.0 | 5.0.2 | -| `crunchy-postgres-ha` | 12.8 | 5.0.0 | 5.0.2 | -| `crunchy-postgres-ha` | 11.13 | 5.0.0 | 5.0.2 | -| `crunchy-postgres-ha` | 10.18 | 5.0.0 | 5.0.2 | -| `crunchy-postgres-gis-ha` | 13.4-3.1 | 5.0.0 | 5.0.2 | -| `crunchy-postgres-gis-ha` | 13.4-3.0 | 5.0.0 | 5.0.2 | -| `crunchy-postgres-gis-ha` | 12.8-3.0 | 5.0.0 | 5.0.2 | -| `crunchy-postgres-gis-ha` | 12.8-2.5 | 5.0.0 | 5.0.2 | -| `crunchy-postgres-gis-ha` | 11.13-2.5 | 5.0.0 | 5.0.2 | -| `crunchy-postgres-gis-ha` | 11.13-2.4 | 5.0.0 | 5.0.2 | -| `crunchy-postgres-gis-ha` | 10.18-2.4 | 5.0.0 | 5.0.2 | -| `crunchy-postgres-gis-ha` | 10.18-2.3 | 5.0.0 | 5.0.2 | - -The Crunchy Postgres components include Patroni 2.1.0. - -## Extensions Compatibility - -The following table defines the compatibility between Postgres extensions and versions of Postgres they are available in. The "Postgres version" corresponds with the major version of a Postgres container. - -The table also lists the initial PGO version that the version of the extension is available in. - -| Extension | Version | Postgres Versions | Initial PGO Version | -|-----------|---------|-------------------|---------------------| -| `pgAudit` | 1.5.0 | 13 | 5.0.0 | -| `pgAudit` | 1.4.1 | 12 | 5.0.0 | -| `pgAudit` | 1.3.2 | 11 | 5.0.0 | -| `pgAudit` | 1.2.2 | 10 | 5.0.0 | -| `pgAudit Analyze` | 1.0.7 | 13, 12, 11, 10 | 5.0.0 | -| `pg_cron` | 1.3.1 | 13, 12, 11, 10 | 5.0.0 | -| `pg_partman` | 4.5.1 | 13, 12, 11, 10 | 5.0.0 | -| `pgnodemx` | 1.0.4 | 13, 12, 11, 10 | 5.0.0 | -| `set_user` | 2.0.1 | 13, 12, 11, 10 | 5.0.2 | -| `set_user` | 2.0.0 | 13, 12, 11, 10 | 5.0.0 | -| `TimescaleDB` | 2.4.0 | 13, 12 | 5.0.2 | -| `TimescaleDB` | 2.3.1 | 11 | 5.0.1 | -| `TimescaleDB` | 2.2.0 | 13, 12, 11 | 5.0.0 | -| `wal2json` | 2.3 | 13, 12, 11, 10 | 5.0.0 | - -### Geospatial Extensions - -The following extensions are available in the geospatially aware containers (`crunchy-postgres-gis-ha`): - -| Extension | Version | Postgres Versions | Initial PGO Version | -|-----------|---------|-------------------|---------------------| -| `PostGIS` | 3.1 | 13 | 5.0.0 | -| `PostGIS` | 3.0 | 13, 12 | 5.0.0 | -| `PostGIS` | 2.5 | 12, 11 | 5.0.0 | -| `PostGIS` | 2.4 | 11, 10 | 5.0.0 | -| `PostGIS` | 2.3 | 10 | 5.0.0 | -| `pgrouting` | 3.1.3 | 13 | 5.0.0 | -| `pgrouting` | 3.0.5 | 13, 12 | 5.0.0 | -| `pgrouting` | 2.6.3 | 12, 11, 10 | 5.0.0 | diff --git a/docs/content/references/crd.md b/docs/content/references/crd.md deleted file mode 100644 index 19e7ef0c49..0000000000 --- a/docs/content/references/crd.md +++ /dev/null @@ -1,8862 +0,0 @@ ---- -title: CRD Reference -draft: false -weight: 100 ---- - -Packages: - -- [postgres-operator.crunchydata.com/v1beta1](#postgres-operatorcrunchydatacomv1beta1) - -

postgres-operator.crunchydata.com/v1beta1

- -Resource Types: - -- [PostgresCluster](#postgrescluster) - - - - -

PostgresCluster

- - - - - - -PostgresCluster is the Schema for the postgresclusters API - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
apiVersionstringpostgres-operator.crunchydata.com/v1beta1true
kindstringPostgresClustertrue
metadataobjectRefer to the Kubernetes API documentation for the fields of the `metadata` field.true
specobjectPostgresClusterSpec defines the desired state of PostgresClusterfalse
statusobjectPostgresClusterStatus defines the observed state of PostgresClusterfalse
- - -

- PostgresCluster.spec - ↩ Parent -

- - - -PostgresClusterSpec defines the desired state of PostgresCluster - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
customReplicationTLSSecretobjectThe secret containing the replication client certificates and keys for secure connections to the PostgreSQL server. It will need to contain the client TLS certificate, TLS key and the Certificate Authority certificate with the data keys set to tls.crt, tls.key and ca.crt, respectively. NOTE: If CustomReplicationClientTLSSecret is provided, CustomTLSSecret MUST be provided and the ca.crt provided must be the same.false
customTLSSecretobjectThe secret containing the Certificates and Keys to encrypt PostgreSQL traffic will need to contain the server TLS certificate, TLS key and the Certificate Authority certificate with the data keys set to tls.crt, tls.key and ca.crt, respectively. It will then be mounted as a volume projection to the '/pgconf/tls' directory. For more information on Kubernetes secret projections, please see https://k8s.io/docs/concepts/configuration/secret/#projection-of-secret-keys-to-specific-paths NOTE: If CustomTLSSecret is provided, CustomReplicationClientTLSSecret MUST be provided and the ca.crt provided must be the same.false
dataSourceobjectSpecifies a data source for bootstrapping the PostgreSQL cluster.false
imagestringThe image name to use for PostgreSQL containers. When omitted, the value comes from an operator environment variable. For standard PostgreSQL images, the format is RELATED_IMAGE_POSTGRES_{postgresVersion}, e.g. RELATED_IMAGE_POSTGRES_13. For PostGIS enabled PostgreSQL images, the format is RELATED_IMAGE_POSTGRES_{postgresVersion}_GIS_{postGISVersion}, e.g. RELATED_IMAGE_POSTGRES_13_GIS_3.1.false
imagePullSecrets[]objectThe image pull secrets used to pull from a private registry Changing this value causes all running pods to restart. https://k8s.io/docs/tasks/configure-pod-container/pull-image-private-registry/false
metadataobjectMetadata contains metadata for PostgresCluster resourcesfalse
monitoringobjectThe specification of monitoring tools that connect to PostgreSQLfalse
openshiftbooleanWhether or not the PostgreSQL cluster is being deployed to an OpenShift environment. If the field is unset, the operator will automatically detect the environment.false
patroniobjectfalse
portintegerThe port on which PostgreSQL should listen.false
postGISVersionstringThe PostGIS extension version installed in the PostgreSQL image. When image is not set, indicates a PostGIS enabled image will be used.false
proxyobjectThe specification of a proxy that connects to PostgreSQL.false
serviceobjectSpecification of the service that exposes the PostgreSQL primary instance.false
shutdownbooleanWhether or not the PostgreSQL cluster should be stopped. When this is true, workloads are scaled to zero and CronJobs are suspended. Other resources, such as Services and Volumes, remain in place.false
standbyobjectRun this cluster as a read-only copy of an existing cluster or archive.false
supplementalGroups[]integerA list of group IDs applied to the process of a container. These can be useful when accessing shared file systems with constrained permissions. More info: https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-contextfalse
users[]objectUsers to create inside PostgreSQL and the databases they should access. The default creates one user that can access one database matching the PostgresCluster name. An empty list creates no users. Removing a user from this list does NOT drop the user nor revoke their access.false
backupsobjectPostgreSQL backup configurationtrue
instances[]objecttrue
postgresVersionintegerThe major version of PostgreSQL installed in the PostgreSQL imagetrue
- - -

- PostgresCluster.spec.customReplicationTLSSecret - ↩ Parent -

- - - -The secret containing the replication client certificates and keys for secure connections to the PostgreSQL server. It will need to contain the client TLS certificate, TLS key and the Certificate Authority certificate with the data keys set to tls.crt, tls.key and ca.crt, respectively. NOTE: If CustomReplicationClientTLSSecret is provided, CustomTLSSecret MUST be provided and the ca.crt provided must be the same. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
items[]objectIf unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.false
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?false
optionalbooleanSpecify whether the Secret or its key must be definedfalse
- - -

- PostgresCluster.spec.customReplicationTLSSecret.items[index] - ↩ Parent -

- - - -Maps a string key to a path within a volume. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
modeintegerOptional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.false
keystringThe key to project.true
pathstringThe relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.true
- - -

- PostgresCluster.spec.customTLSSecret - ↩ Parent -

- - - -The secret containing the Certificates and Keys to encrypt PostgreSQL traffic will need to contain the server TLS certificate, TLS key and the Certificate Authority certificate with the data keys set to tls.crt, tls.key and ca.crt, respectively. It will then be mounted as a volume projection to the '/pgconf/tls' directory. For more information on Kubernetes secret projections, please see https://k8s.io/docs/concepts/configuration/secret/#projection-of-secret-keys-to-specific-paths NOTE: If CustomTLSSecret is provided, CustomReplicationClientTLSSecret MUST be provided and the ca.crt provided must be the same. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
items[]objectIf unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.false
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?false
optionalbooleanSpecify whether the Secret or its key must be definedfalse
- - -

- PostgresCluster.spec.customTLSSecret.items[index] - ↩ Parent -

- - - -Maps a string key to a path within a volume. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
modeintegerOptional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.false
keystringThe key to project.true
pathstringThe relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.true
- - -

- PostgresCluster.spec.dataSource - ↩ Parent -

- - - -Specifies a data source for bootstrapping the PostgreSQL cluster. - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
postgresClusterobjectDefines a pgBackRest data source that can be used to pre-populate the PostgreSQL data directory for a new PostgreSQL cluster using a pgBackRest restore.false
- - -

- PostgresCluster.spec.dataSource.postgresCluster - ↩ Parent -

- - - -Defines a pgBackRest data source that can be used to pre-populate the PostgreSQL data directory for a new PostgreSQL cluster using a pgBackRest restore. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
affinityobjectScheduling constraints of the pgBackRest restore Job. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-nodefalse
clusterNamestringThe name of an existing PostgresCluster to use as the data source for the new PostgresCluster. Defaults to the name of the PostgresCluster being created if not provided.false
clusterNamespacestringThe namespace of the cluster specified as the data source using the clusterName field. Defaults to the namespace of the PostgresCluster being created if not provided.false
options[]stringCommand line options to include when running the pgBackRest restore command. https://pgbackrest.org/command.html#command-restorefalse
resourcesobjectResource requirements for the pgBackRest restore Job.false
tolerations[]objectTolerations of the pgBackRest restore Job. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-tolerationfalse
repoNamestringThe name of the pgBackRest repo within the source PostgresCluster that contains the backups that should be utilized to perform a pgBackRest restore when initializing the data source for the new PostgresCluster.true
- - -

- PostgresCluster.spec.dataSource.postgresCluster.affinity - ↩ Parent -

- - - -Scheduling constraints of the pgBackRest restore Job. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
nodeAffinityobjectDescribes node affinity scheduling rules for the pod.false
podAffinityobjectDescribes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).false
podAntiAffinityobjectDescribes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).false
- - -

- PostgresCluster.spec.dataSource.postgresCluster.affinity.nodeAffinity - ↩ Parent -

- - - -Describes node affinity scheduling rules for the pod. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecutionobjectIf the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.false
- - -

- PostgresCluster.spec.dataSource.postgresCluster.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
preferenceobjectA node selector term, associated with the corresponding weight.true
weightintegerWeight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.true
- - -

- PostgresCluster.spec.dataSource.postgresCluster.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference - ↩ Parent -

- - - -A node selector term, associated with the corresponding weight. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectA list of node selector requirements by node's labels.false
matchFields[]objectA list of node selector requirements by node's fields.false
- - -

- PostgresCluster.spec.dataSource.postgresCluster.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference.matchExpressions[index] - ↩ Parent -

- - - -A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
- - -

- PostgresCluster.spec.dataSource.postgresCluster.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference.matchFields[index] - ↩ Parent -

- - - -A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
- - -

- PostgresCluster.spec.dataSource.postgresCluster.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution - ↩ Parent -

- - - -If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
nodeSelectorTerms[]objectRequired. A list of node selector terms. The terms are ORed.true
- - -

- PostgresCluster.spec.dataSource.postgresCluster.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index] - ↩ Parent -

- - - -A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectA list of node selector requirements by node's labels.false
matchFields[]objectA list of node selector requirements by node's fields.false
- - -

- PostgresCluster.spec.dataSource.postgresCluster.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index].matchExpressions[index] - ↩ Parent -

- - - -A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
- - -

- PostgresCluster.spec.dataSource.postgresCluster.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index].matchFields[index] - ↩ Parent -

- - - -A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
- - -

- PostgresCluster.spec.dataSource.postgresCluster.affinity.podAffinity - ↩ Parent -

- - - -Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecution[]objectIf the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.false
- - -

- PostgresCluster.spec.dataSource.postgresCluster.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
podAffinityTermobjectRequired. A pod affinity term, associated with the corresponding weight.true
weightintegerweight associated with matching the corresponding podAffinityTerm, in the range 1-100.true
- - -

- PostgresCluster.spec.dataSource.postgresCluster.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm - ↩ Parent -

- - - -Required. A pod affinity term, associated with the corresponding weight. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaces[]stringnamespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"false
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
- - -

- PostgresCluster.spec.dataSource.postgresCluster.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector - ↩ Parent -

- - - -A label query over a set of resources, in this case pods. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.dataSource.postgresCluster.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
- - -

- PostgresCluster.spec.dataSource.postgresCluster.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaces[]stringnamespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"false
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
- - -

- PostgresCluster.spec.dataSource.postgresCluster.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector - ↩ Parent -

- - - -A label query over a set of resources, in this case pods. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.dataSource.postgresCluster.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
- - -

- PostgresCluster.spec.dataSource.postgresCluster.affinity.podAntiAffinity - ↩ Parent -

- - - -Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecution[]objectIf the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.false
- - -

- PostgresCluster.spec.dataSource.postgresCluster.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
podAffinityTermobjectRequired. A pod affinity term, associated with the corresponding weight.true
weightintegerweight associated with matching the corresponding podAffinityTerm, in the range 1-100.true
- - -

- PostgresCluster.spec.dataSource.postgresCluster.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm - ↩ Parent -

- - - -Required. A pod affinity term, associated with the corresponding weight. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaces[]stringnamespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"false
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
- - -

- PostgresCluster.spec.dataSource.postgresCluster.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector - ↩ Parent -

- - - -A label query over a set of resources, in this case pods. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.dataSource.postgresCluster.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
- - -

- PostgresCluster.spec.dataSource.postgresCluster.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaces[]stringnamespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"false
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
- - -

- PostgresCluster.spec.dataSource.postgresCluster.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector - ↩ Parent -

- - - -A label query over a set of resources, in this case pods. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.dataSource.postgresCluster.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
- - -

- PostgresCluster.spec.dataSource.postgresCluster.resources - ↩ Parent -

- - - -Resource requirements for the pgBackRest restore Job. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
limitsmap[string]int or stringLimits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/false
requestsmap[string]int or stringRequests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/false
- - -

- PostgresCluster.spec.dataSource.postgresCluster.tolerations[index] - ↩ Parent -

- - - -The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
effectstringEffect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.false
keystringKey is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.false
operatorstringOperator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.false
tolerationSecondsintegerTolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.false
valuestringValue is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.false
- - -

- PostgresCluster.spec.imagePullSecrets[index] - ↩ Parent -

- - - -LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?false
- - -

- PostgresCluster.spec.metadata - ↩ Parent -

- - - -Metadata contains metadata for PostgresCluster resources - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
annotationsmap[string]stringfalse
labelsmap[string]stringfalse
- - -

- PostgresCluster.spec.monitoring - ↩ Parent -

- - - -The specification of monitoring tools that connect to PostgreSQL - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
pgmonitorobjectPGMonitorSpec defines the desired state of the pgMonitor tool suitefalse
- - -

- PostgresCluster.spec.monitoring.pgmonitor - ↩ Parent -

- - - -PGMonitorSpec defines the desired state of the pgMonitor tool suite - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
exporterobjectfalse
- - -

- PostgresCluster.spec.monitoring.pgmonitor.exporter - ↩ Parent -

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
configuration[]objectProjected volumes containing custom PostgreSQL Exporter configuration. Currently supports the customization of PostgreSQL Exporter queries. If a "queries.yaml" file is detected in any volume projected using this field, it will be loaded using the "extend.query-path" flag: https://github.com/prometheus-community/postgres_exporter#flags Changing the values of field causes PostgreSQL and the exporter to restart.false
imagestringThe image name to use for crunchy-postgres-exporter containers. The image may also be set using the RELATED_IMAGE_PGEXPORTER environment variable.false
resourcesobjectChanging this value causes PostgreSQL and the exporter to restart. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containersfalse
- - -

- PostgresCluster.spec.monitoring.pgmonitor.exporter.configuration[index] - ↩ Parent -

- - - -Projection that may be projected along with other supported volume types - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
configMapobjectinformation about the configMap data to projectfalse
downwardAPIobjectinformation about the downwardAPI data to projectfalse
secretobjectinformation about the secret data to projectfalse
serviceAccountTokenobjectinformation about the serviceAccountToken data to projectfalse
- - -

- PostgresCluster.spec.monitoring.pgmonitor.exporter.configuration[index].configMap - ↩ Parent -

- - - -information about the configMap data to project - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
items[]objectIf unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.false
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?false
optionalbooleanSpecify whether the ConfigMap or its keys must be definedfalse
- - -

- PostgresCluster.spec.monitoring.pgmonitor.exporter.configuration[index].configMap.items[index] - ↩ Parent -

- - - -Maps a string key to a path within a volume. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
modeintegerOptional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.false
keystringThe key to project.true
pathstringThe relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.true
- - -

- PostgresCluster.spec.monitoring.pgmonitor.exporter.configuration[index].downwardAPI - ↩ Parent -

- - - -information about the downwardAPI data to project - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
items[]objectItems is a list of DownwardAPIVolume filefalse
- - -

- PostgresCluster.spec.monitoring.pgmonitor.exporter.configuration[index].downwardAPI.items[index] - ↩ Parent -

- - - -DownwardAPIVolumeFile represents information to create the file containing the pod field - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
fieldRefobjectRequired: Selects a field of the pod: only annotations, labels, name and namespace are supported.false
modeintegerOptional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.false
resourceFieldRefobjectSelects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.false
pathstringRequired: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'true
- - -

- PostgresCluster.spec.monitoring.pgmonitor.exporter.configuration[index].downwardAPI.items[index].fieldRef - ↩ Parent -

- - - -Required: Selects a field of the pod: only annotations, labels, name and namespace are supported. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
apiVersionstringVersion of the schema the FieldPath is written in terms of, defaults to "v1".false
fieldPathstringPath of the field to select in the specified API version.true
- - -

- PostgresCluster.spec.monitoring.pgmonitor.exporter.configuration[index].downwardAPI.items[index].resourceFieldRef - ↩ Parent -

- - - -Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
containerNamestringContainer name: required for volumes, optional for env varsfalse
divisorint or stringSpecifies the output format of the exposed resources, defaults to "1"false
resourcestringRequired: resource to selecttrue
- - -

- PostgresCluster.spec.monitoring.pgmonitor.exporter.configuration[index].secret - ↩ Parent -

- - - -information about the secret data to project - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
items[]objectIf unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.false
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?false
optionalbooleanSpecify whether the Secret or its key must be definedfalse
- - -

- PostgresCluster.spec.monitoring.pgmonitor.exporter.configuration[index].secret.items[index] - ↩ Parent -

- - - -Maps a string key to a path within a volume. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
modeintegerOptional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.false
keystringThe key to project.true
pathstringThe relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.true
- - -

- PostgresCluster.spec.monitoring.pgmonitor.exporter.configuration[index].serviceAccountToken - ↩ Parent -

- - - -information about the serviceAccountToken data to project - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
audiencestringAudience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver.false
expirationSecondsintegerExpirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes.false
pathstringPath is the path relative to the mount point of the file to project the token into.true
- - -

- PostgresCluster.spec.monitoring.pgmonitor.exporter.resources - ↩ Parent -

- - - -Changing this value causes PostgreSQL and the exporter to restart. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
limitsmap[string]int or stringLimits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/false
requestsmap[string]int or stringRequests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/false
- - -

- PostgresCluster.spec.patroni - ↩ Parent -

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
dynamicConfigurationobjectfalse
leaderLeaseDurationSecondsintegerTTL of the cluster leader lock. "Think of it as the length of time before initiation of the automatic failover process."false
portintegerThe port on which Patroni should listen.false
syncPeriodSecondsintegerThe interval for refreshing the leader lock and applying dynamicConfiguration. Must be less than leaderLeaseDurationSeconds.false
- - -

- PostgresCluster.spec.proxy - ↩ Parent -

- - - -The specification of a proxy that connects to PostgreSQL. - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
pgBouncerobjectDefines a PgBouncer proxy and connection pooler.true
- - -

- PostgresCluster.spec.proxy.pgBouncer - ↩ Parent -

- - - -Defines a PgBouncer proxy and connection pooler. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
affinityobjectScheduling constraints of a PgBouncer pod. Changing this value causes PgBouncer to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-nodefalse
configobjectConfiguration settings for the PgBouncer process. Changes to any of these values will be automatically reloaded without validation. Be careful, as you may put PgBouncer into an unusable state. More info: https://www.pgbouncer.org/usage.html#reloadfalse
customTLSSecretobjectA secret projection containing a certificate and key with which to encrypt connections to PgBouncer. The "tls.crt", "tls.key", and "ca.crt" paths must be PEM-encoded certificates and keys. Changing this value causes PgBouncer to restart. More info: https://kubernetes.io/docs/concepts/configuration/secret/#projection-of-secret-keys-to-specific-pathsfalse
imagestringName of a container image that can run PgBouncer 1.15 or newer. Changing this value causes PgBouncer to restart. The image may also be set using the RELATED_IMAGE_PGBOUNCER environment variable. More info: https://kubernetes.io/docs/concepts/containers/imagesfalse
metadataobjectMetadata contains metadata for PostgresCluster resourcesfalse
portintegerPort on which PgBouncer should listen for client connections. Changing this value causes PgBouncer to restart.false
replicasintegerNumber of desired PgBouncer pods.false
resourcesobjectCompute resources of a PgBouncer container. Changing this value causes PgBouncer to restart. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containersfalse
serviceobjectSpecification of the service that exposes PgBouncer.false
tolerations[]objectTolerations of a PgBouncer pod. Changing this value causes PgBouncer to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-tolerationfalse
- - -

- PostgresCluster.spec.proxy.pgBouncer.affinity - ↩ Parent -

- - - -Scheduling constraints of a PgBouncer pod. Changing this value causes PgBouncer to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
nodeAffinityobjectDescribes node affinity scheduling rules for the pod.false
podAffinityobjectDescribes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).false
podAntiAffinityobjectDescribes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).false
- - -

- PostgresCluster.spec.proxy.pgBouncer.affinity.nodeAffinity - ↩ Parent -

- - - -Describes node affinity scheduling rules for the pod. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecutionobjectIf the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
preferenceobjectA node selector term, associated with the corresponding weight.true
weightintegerWeight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.true
- - -

- PostgresCluster.spec.proxy.pgBouncer.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference - ↩ Parent -

- - - -A node selector term, associated with the corresponding weight. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectA list of node selector requirements by node's labels.false
matchFields[]objectA list of node selector requirements by node's fields.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference.matchExpressions[index] - ↩ Parent -

- - - -A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
- - -

- PostgresCluster.spec.proxy.pgBouncer.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference.matchFields[index] - ↩ Parent -

- - - -A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
- - -

- PostgresCluster.spec.proxy.pgBouncer.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution - ↩ Parent -

- - - -If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
nodeSelectorTerms[]objectRequired. A list of node selector terms. The terms are ORed.true
- - -

- PostgresCluster.spec.proxy.pgBouncer.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index] - ↩ Parent -

- - - -A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectA list of node selector requirements by node's labels.false
matchFields[]objectA list of node selector requirements by node's fields.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index].matchExpressions[index] - ↩ Parent -

- - - -A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
- - -

- PostgresCluster.spec.proxy.pgBouncer.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index].matchFields[index] - ↩ Parent -

- - - -A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
- - -

- PostgresCluster.spec.proxy.pgBouncer.affinity.podAffinity - ↩ Parent -

- - - -Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecution[]objectIf the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
podAffinityTermobjectRequired. A pod affinity term, associated with the corresponding weight.true
weightintegerweight associated with matching the corresponding podAffinityTerm, in the range 1-100.true
- - -

- PostgresCluster.spec.proxy.pgBouncer.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm - ↩ Parent -

- - - -Required. A pod affinity term, associated with the corresponding weight. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaces[]stringnamespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"false
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
- - -

- PostgresCluster.spec.proxy.pgBouncer.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector - ↩ Parent -

- - - -A label query over a set of resources, in this case pods. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
- - -

- PostgresCluster.spec.proxy.pgBouncer.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaces[]stringnamespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"false
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
- - -

- PostgresCluster.spec.proxy.pgBouncer.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector - ↩ Parent -

- - - -A label query over a set of resources, in this case pods. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
- - -

- PostgresCluster.spec.proxy.pgBouncer.affinity.podAntiAffinity - ↩ Parent -

- - - -Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecution[]objectIf the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
podAffinityTermobjectRequired. A pod affinity term, associated with the corresponding weight.true
weightintegerweight associated with matching the corresponding podAffinityTerm, in the range 1-100.true
- - -

- PostgresCluster.spec.proxy.pgBouncer.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm - ↩ Parent -

- - - -Required. A pod affinity term, associated with the corresponding weight. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaces[]stringnamespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"false
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
- - -

- PostgresCluster.spec.proxy.pgBouncer.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector - ↩ Parent -

- - - -A label query over a set of resources, in this case pods. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
- - -

- PostgresCluster.spec.proxy.pgBouncer.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaces[]stringnamespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"false
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
- - -

- PostgresCluster.spec.proxy.pgBouncer.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector - ↩ Parent -

- - - -A label query over a set of resources, in this case pods. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
- - -

- PostgresCluster.spec.proxy.pgBouncer.config - ↩ Parent -

- - - -Configuration settings for the PgBouncer process. Changes to any of these values will be automatically reloaded without validation. Be careful, as you may put PgBouncer into an unusable state. More info: https://www.pgbouncer.org/usage.html#reload - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
databasesmap[string]stringPgBouncer database definitions. The key is the database requested by a client while the value is a libpq-styled connection string. The special key "*" acts as a fallback. When this field is empty, PgBouncer is configured with a single "*" entry that connects to the primary PostgreSQL instance. More info: https://www.pgbouncer.org/config.html#section-databasesfalse
files[]objectFiles to mount under "/etc/pgbouncer". When specified, settings in the "pgbouncer.ini" file are loaded before all others. From there, other files may be included by absolute path. Changing these references causes PgBouncer to restart, but changes to the file contents are automatically reloaded. More info: https://www.pgbouncer.org/config.html#include-directivefalse
globalmap[string]stringSettings that apply to the entire PgBouncer process. More info: https://www.pgbouncer.org/config.htmlfalse
usersmap[string]stringConnection settings specific to particular users. More info: https://www.pgbouncer.org/config.html#section-usersfalse
- - -

- PostgresCluster.spec.proxy.pgBouncer.config.files[index] - ↩ Parent -

- - - -Projection that may be projected along with other supported volume types - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
configMapobjectinformation about the configMap data to projectfalse
downwardAPIobjectinformation about the downwardAPI data to projectfalse
secretobjectinformation about the secret data to projectfalse
serviceAccountTokenobjectinformation about the serviceAccountToken data to projectfalse
- - -

- PostgresCluster.spec.proxy.pgBouncer.config.files[index].configMap - ↩ Parent -

- - - -information about the configMap data to project - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
items[]objectIf unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.false
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?false
optionalbooleanSpecify whether the ConfigMap or its keys must be definedfalse
- - -

- PostgresCluster.spec.proxy.pgBouncer.config.files[index].configMap.items[index] - ↩ Parent -

- - - -Maps a string key to a path within a volume. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
modeintegerOptional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.false
keystringThe key to project.true
pathstringThe relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.true
- - -

- PostgresCluster.spec.proxy.pgBouncer.config.files[index].downwardAPI - ↩ Parent -

- - - -information about the downwardAPI data to project - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
items[]objectItems is a list of DownwardAPIVolume filefalse
- - -

- PostgresCluster.spec.proxy.pgBouncer.config.files[index].downwardAPI.items[index] - ↩ Parent -

- - - -DownwardAPIVolumeFile represents information to create the file containing the pod field - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
fieldRefobjectRequired: Selects a field of the pod: only annotations, labels, name and namespace are supported.false
modeintegerOptional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.false
resourceFieldRefobjectSelects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.false
pathstringRequired: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'true
- - -

- PostgresCluster.spec.proxy.pgBouncer.config.files[index].downwardAPI.items[index].fieldRef - ↩ Parent -

- - - -Required: Selects a field of the pod: only annotations, labels, name and namespace are supported. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
apiVersionstringVersion of the schema the FieldPath is written in terms of, defaults to "v1".false
fieldPathstringPath of the field to select in the specified API version.true
- - -

- PostgresCluster.spec.proxy.pgBouncer.config.files[index].downwardAPI.items[index].resourceFieldRef - ↩ Parent -

- - - -Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
containerNamestringContainer name: required for volumes, optional for env varsfalse
divisorint or stringSpecifies the output format of the exposed resources, defaults to "1"false
resourcestringRequired: resource to selecttrue
- - -

- PostgresCluster.spec.proxy.pgBouncer.config.files[index].secret - ↩ Parent -

- - - -information about the secret data to project - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
items[]objectIf unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.false
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?false
optionalbooleanSpecify whether the Secret or its key must be definedfalse
- - -

- PostgresCluster.spec.proxy.pgBouncer.config.files[index].secret.items[index] - ↩ Parent -

- - - -Maps a string key to a path within a volume. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
modeintegerOptional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.false
keystringThe key to project.true
pathstringThe relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.true
- - -

- PostgresCluster.spec.proxy.pgBouncer.config.files[index].serviceAccountToken - ↩ Parent -

- - - -information about the serviceAccountToken data to project - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
audiencestringAudience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver.false
expirationSecondsintegerExpirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes.false
pathstringPath is the path relative to the mount point of the file to project the token into.true
- - -

- PostgresCluster.spec.proxy.pgBouncer.customTLSSecret - ↩ Parent -

- - - -A secret projection containing a certificate and key with which to encrypt connections to PgBouncer. The "tls.crt", "tls.key", and "ca.crt" paths must be PEM-encoded certificates and keys. Changing this value causes PgBouncer to restart. More info: https://kubernetes.io/docs/concepts/configuration/secret/#projection-of-secret-keys-to-specific-paths - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
items[]objectIf unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.false
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?false
optionalbooleanSpecify whether the Secret or its key must be definedfalse
- - -

- PostgresCluster.spec.proxy.pgBouncer.customTLSSecret.items[index] - ↩ Parent -

- - - -Maps a string key to a path within a volume. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
modeintegerOptional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.false
keystringThe key to project.true
pathstringThe relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.true
- - -

- PostgresCluster.spec.proxy.pgBouncer.metadata - ↩ Parent -

- - - -Metadata contains metadata for PostgresCluster resources - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
annotationsmap[string]stringfalse
labelsmap[string]stringfalse
- - -

- PostgresCluster.spec.proxy.pgBouncer.resources - ↩ Parent -

- - - -Compute resources of a PgBouncer container. Changing this value causes PgBouncer to restart. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
limitsmap[string]int or stringLimits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/false
requestsmap[string]int or stringRequests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/false
- - -

- PostgresCluster.spec.proxy.pgBouncer.service - ↩ Parent -

- - - -Specification of the service that exposes PgBouncer. - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
typeenumMore info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-typestrue
- - -

- PostgresCluster.spec.proxy.pgBouncer.tolerations[index] - ↩ Parent -

- - - -The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
effectstringEffect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.false
keystringKey is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.false
operatorstringOperator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.false
tolerationSecondsintegerTolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.false
valuestringValue is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.false
- - -

- PostgresCluster.spec.service - ↩ Parent -

- - - -Specification of the service that exposes the PostgreSQL primary instance. - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
typeenumMore info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-typestrue
- - -

- PostgresCluster.spec.standby - ↩ Parent -

- - - -Run this cluster as a read-only copy of an existing cluster or archive. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
enabledbooleanWhether or not the PostgreSQL cluster should be read-only. When this is true, WAL files are applied from the pgBackRest repository.false
repoNamestringThe name of the pgBackRest repository to follow for WAL files.true
- - -

- PostgresCluster.spec.users[index] - ↩ Parent -

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
databases[]stringDatabases to which this user can connect and create objects. Removing a database from this list does NOT revoke access. This field is ignored for the "postgres" user.false
optionsstringALTER ROLE options except for PASSWORD. This field is ignored for the "postgres" user. More info: https://www.postgresql.org/docs/current/role-attributes.htmlfalse
namestringThe name of this PostgreSQL user. The value may contain only lowercase letters, numbers, and hyphen so that it fits into Kubernetes metadata.true
- - -

- PostgresCluster.spec.backups - ↩ Parent -

- - - -PostgreSQL backup configuration - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
pgbackrestobjectpgBackRest archive configurationtrue
- - -

- PostgresCluster.spec.backups.pgbackrest - ↩ Parent -

- - - -pgBackRest archive configuration - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
configuration[]objectProjected volumes containing custom pgBackRest configuration. These files are mounted under "/etc/pgbackrest/conf.d" alongside any pgBackRest configuration generated by the PostgreSQL Operator: https://pgbackrest.org/configuration.htmlfalse
globalmap[string]stringGlobal pgBackRest configuration settings. These settings are included in the "global" section of the pgBackRest configuration generated by the PostgreSQL Operator, and then mounted under "/etc/pgbackrest/conf.d": https://pgbackrest.org/configuration.htmlfalse
imagestringThe image name to use for pgBackRest containers. Utilized to run pgBackRest repository hosts and backups. The image may also be set using the RELATED_IMAGE_PGBACKREST environment variablefalse
manualobjectDefines details for manual pgBackRest backup Jobsfalse
metadataobjectMetadata contains metadata for PostgresCluster resourcesfalse
repoHostobjectDefines configuration for a pgBackRest dedicated repository host. This section is only applicable if at least one "volume" (i.e. PVC-based) repository is defined in the "repos" section, therefore enabling a dedicated repository host Deployment.false
repos[]objectDefines a pgBackRest repositoryfalse
restoreobjectDefines details for performing an in-place restore using pgBackRestfalse
- - -

- PostgresCluster.spec.backups.pgbackrest.configuration[index] - ↩ Parent -

- - - -Projection that may be projected along with other supported volume types - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
configMapobjectinformation about the configMap data to projectfalse
downwardAPIobjectinformation about the downwardAPI data to projectfalse
secretobjectinformation about the secret data to projectfalse
serviceAccountTokenobjectinformation about the serviceAccountToken data to projectfalse
- - -

- PostgresCluster.spec.backups.pgbackrest.configuration[index].configMap - ↩ Parent -

- - - -information about the configMap data to project - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
items[]objectIf unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.false
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?false
optionalbooleanSpecify whether the ConfigMap or its keys must be definedfalse
- - -

- PostgresCluster.spec.backups.pgbackrest.configuration[index].configMap.items[index] - ↩ Parent -

- - - -Maps a string key to a path within a volume. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
modeintegerOptional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.false
keystringThe key to project.true
pathstringThe relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.true
- - -

- PostgresCluster.spec.backups.pgbackrest.configuration[index].downwardAPI - ↩ Parent -

- - - -information about the downwardAPI data to project - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
items[]objectItems is a list of DownwardAPIVolume filefalse
- - -

- PostgresCluster.spec.backups.pgbackrest.configuration[index].downwardAPI.items[index] - ↩ Parent -

- - - -DownwardAPIVolumeFile represents information to create the file containing the pod field - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
fieldRefobjectRequired: Selects a field of the pod: only annotations, labels, name and namespace are supported.false
modeintegerOptional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.false
resourceFieldRefobjectSelects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.false
pathstringRequired: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'true
- - -

- PostgresCluster.spec.backups.pgbackrest.configuration[index].downwardAPI.items[index].fieldRef - ↩ Parent -

- - - -Required: Selects a field of the pod: only annotations, labels, name and namespace are supported. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
apiVersionstringVersion of the schema the FieldPath is written in terms of, defaults to "v1".false
fieldPathstringPath of the field to select in the specified API version.true
- - -

- PostgresCluster.spec.backups.pgbackrest.configuration[index].downwardAPI.items[index].resourceFieldRef - ↩ Parent -

- - - -Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
containerNamestringContainer name: required for volumes, optional for env varsfalse
divisorint or stringSpecifies the output format of the exposed resources, defaults to "1"false
resourcestringRequired: resource to selecttrue
- - -

- PostgresCluster.spec.backups.pgbackrest.configuration[index].secret - ↩ Parent -

- - - -information about the secret data to project - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
items[]objectIf unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.false
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?false
optionalbooleanSpecify whether the Secret or its key must be definedfalse
- - -

- PostgresCluster.spec.backups.pgbackrest.configuration[index].secret.items[index] - ↩ Parent -

- - - -Maps a string key to a path within a volume. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
modeintegerOptional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.false
keystringThe key to project.true
pathstringThe relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.true
- - -

- PostgresCluster.spec.backups.pgbackrest.configuration[index].serviceAccountToken - ↩ Parent -

- - - -information about the serviceAccountToken data to project - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
audiencestringAudience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver.false
expirationSecondsintegerExpirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes.false
pathstringPath is the path relative to the mount point of the file to project the token into.true
- - -

- PostgresCluster.spec.backups.pgbackrest.manual - ↩ Parent -

- - - -Defines details for manual pgBackRest backup Jobs - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
options[]stringCommand line options to include when running the pgBackRest backup command. https://pgbackrest.org/command.html#command-backupfalse
repoNamestringThe name of the pgBackRest repo to run the backup command against.true
- - -

- PostgresCluster.spec.backups.pgbackrest.metadata - ↩ Parent -

- - - -Metadata contains metadata for PostgresCluster resources - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
annotationsmap[string]stringfalse
labelsmap[string]stringfalse
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost - ↩ Parent -

- - - -Defines configuration for a pgBackRest dedicated repository host. This section is only applicable if at least one "volume" (i.e. PVC-based) repository is defined in the "repos" section, therefore enabling a dedicated repository host Deployment. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
affinityobjectScheduling constraints of the Dedicated repo host pod. Changing this value causes repo host to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-nodefalse
resourcesobjectResource requirements for a pgBackRest repository hostfalse
sshConfigMapobjectConfigMap containing custom SSH configurationfalse
sshSecretobjectSecret containing custom SSH keysfalse
tolerations[]objectTolerations of a PgBackRest repo host pod. Changing this value causes a restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-tolerationfalse
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.affinity - ↩ Parent -

- - - -Scheduling constraints of the Dedicated repo host pod. Changing this value causes repo host to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
nodeAffinityobjectDescribes node affinity scheduling rules for the pod.false
podAffinityobjectDescribes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).false
podAntiAffinityobjectDescribes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).false
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.affinity.nodeAffinity - ↩ Parent -

- - - -Describes node affinity scheduling rules for the pod. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecutionobjectIf the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.false
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
preferenceobjectA node selector term, associated with the corresponding weight.true
weightintegerWeight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.true
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference - ↩ Parent -

- - - -A node selector term, associated with the corresponding weight. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectA list of node selector requirements by node's labels.false
matchFields[]objectA list of node selector requirements by node's fields.false
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference.matchExpressions[index] - ↩ Parent -

- - - -A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference.matchFields[index] - ↩ Parent -

- - - -A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution - ↩ Parent -

- - - -If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
nodeSelectorTerms[]objectRequired. A list of node selector terms. The terms are ORed.true
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index] - ↩ Parent -

- - - -A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectA list of node selector requirements by node's labels.false
matchFields[]objectA list of node selector requirements by node's fields.false
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index].matchExpressions[index] - ↩ Parent -

- - - -A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index].matchFields[index] - ↩ Parent -

- - - -A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.affinity.podAffinity - ↩ Parent -

- - - -Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecution[]objectIf the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.false
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
podAffinityTermobjectRequired. A pod affinity term, associated with the corresponding weight.true
weightintegerweight associated with matching the corresponding podAffinityTerm, in the range 1-100.true
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm - ↩ Parent -

- - - -Required. A pod affinity term, associated with the corresponding weight. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaces[]stringnamespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"false
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector - ↩ Parent -

- - - -A label query over a set of resources, in this case pods. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaces[]stringnamespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"false
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector - ↩ Parent -

- - - -A label query over a set of resources, in this case pods. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.affinity.podAntiAffinity - ↩ Parent -

- - - -Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecution[]objectIf the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.false
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
podAffinityTermobjectRequired. A pod affinity term, associated with the corresponding weight.true
weightintegerweight associated with matching the corresponding podAffinityTerm, in the range 1-100.true
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm - ↩ Parent -

- - - -Required. A pod affinity term, associated with the corresponding weight. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaces[]stringnamespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"false
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector - ↩ Parent -

- - - -A label query over a set of resources, in this case pods. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaces[]stringnamespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"false
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector - ↩ Parent -

- - - -A label query over a set of resources, in this case pods. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.resources - ↩ Parent -

- - - -Resource requirements for a pgBackRest repository host - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
limitsmap[string]int or stringLimits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/false
requestsmap[string]int or stringRequests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/false
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.sshConfigMap - ↩ Parent -

- - - -ConfigMap containing custom SSH configuration - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
items[]objectIf unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.false
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?false
optionalbooleanSpecify whether the ConfigMap or its keys must be definedfalse
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.sshConfigMap.items[index] - ↩ Parent -

- - - -Maps a string key to a path within a volume. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
modeintegerOptional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.false
keystringThe key to project.true
pathstringThe relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.true
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.sshSecret - ↩ Parent -

- - - -Secret containing custom SSH keys - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
items[]objectIf unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.false
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?false
optionalbooleanSpecify whether the Secret or its key must be definedfalse
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.sshSecret.items[index] - ↩ Parent -

- - - -Maps a string key to a path within a volume. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
modeintegerOptional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.false
keystringThe key to project.true
pathstringThe relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.true
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.tolerations[index] - ↩ Parent -

- - - -The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
effectstringEffect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.false
keystringKey is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.false
operatorstringOperator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.false
tolerationSecondsintegerTolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.false
valuestringValue is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.false
- - -

- PostgresCluster.spec.backups.pgbackrest.repos[index] - ↩ Parent -

- - - -PGBackRestRepo represents a pgBackRest repository. Only one of its members may be specified. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
azureobjectRepresents a pgBackRest repository that is created using Azure storagefalse
gcsobjectRepresents a pgBackRest repository that is created using Google Cloud Storagefalse
s3objectRepoS3 represents a pgBackRest repository that is created using AWS S3 (or S3-compatible) storagefalse
schedulesobjectDefines the schedules for the pgBackRest backups Full, Differential and Incremental backup types are supported: https://pgbackrest.org/user-guide.html#concept/backupfalse
volumeobjectRepresents a pgBackRest repository that is created using a PersistentVolumeClaimfalse
namestringThe name of the the repositorytrue
- - -

- PostgresCluster.spec.backups.pgbackrest.repos[index].azure - ↩ Parent -

- - - -Represents a pgBackRest repository that is created using Azure storage - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
containerstringThe Azure container utilized for the repositorytrue
- - -

- PostgresCluster.spec.backups.pgbackrest.repos[index].gcs - ↩ Parent -

- - - -Represents a pgBackRest repository that is created using Google Cloud Storage - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
bucketstringThe GCS bucket utilized for the repositorytrue
- - -

- PostgresCluster.spec.backups.pgbackrest.repos[index].s3 - ↩ Parent -

- - - -RepoS3 represents a pgBackRest repository that is created using AWS S3 (or S3-compatible) storage - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
bucketstringThe S3 bucket utilized for the repositorytrue
endpointstringA valid endpoint corresponding to the specified regiontrue
regionstringThe region corresponding to the S3 buckettrue
- - -

- PostgresCluster.spec.backups.pgbackrest.repos[index].schedules - ↩ Parent -

- - - -Defines the schedules for the pgBackRest backups Full, Differential and Incremental backup types are supported: https://pgbackrest.org/user-guide.html#concept/backup - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
differentialstringDefines the Cron schedule for a differential pgBackRest backup. Follows the standard Cron schedule syntax: https://k8s.io/docs/concepts/workloads/controllers/cron-jobs/#cron-schedule-syntaxfalse
fullstringDefines the Cron schedule for a full pgBackRest backup. Follows the standard Cron schedule syntax: https://k8s.io/docs/concepts/workloads/controllers/cron-jobs/#cron-schedule-syntaxfalse
incrementalstringDefines the Cron schedule for an incremental pgBackRest backup. Follows the standard Cron schedule syntax: https://k8s.io/docs/concepts/workloads/controllers/cron-jobs/#cron-schedule-syntaxfalse
- - -

- PostgresCluster.spec.backups.pgbackrest.repos[index].volume - ↩ Parent -

- - - -Represents a pgBackRest repository that is created using a PersistentVolumeClaim - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
volumeClaimSpecobjectDefines a PersistentVolumeClaim spec used to create and/or bind a volumetrue
- - -

- PostgresCluster.spec.backups.pgbackrest.repos[index].volume.volumeClaimSpec - ↩ Parent -

- - - -Defines a PersistentVolumeClaim spec used to create and/or bind a volume - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
accessModes[]stringAccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1false
dataSourceobjectThis field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) * An existing custom resource that implements data population (Alpha) In order to use custom resource types that implement data population, the AnyVolumeDataSource feature gate must be enabled. If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source.false
resourcesobjectResources represents the minimum resources the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resourcesfalse
selectorobjectA label query over volumes to consider for binding.false
storageClassNamestringName of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1false
volumeModestringvolumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.false
volumeNamestringVolumeName is the binding reference to the PersistentVolume backing this claim.false
- - -

- PostgresCluster.spec.backups.pgbackrest.repos[index].volume.volumeClaimSpec.dataSource - ↩ Parent -

- - - -This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) * An existing custom resource that implements data population (Alpha) In order to use custom resource types that implement data population, the AnyVolumeDataSource feature gate must be enabled. If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
apiGroupstringAPIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.false
kindstringKind is the type of resource being referencedtrue
namestringName is the name of resource being referencedtrue
- - -

- PostgresCluster.spec.backups.pgbackrest.repos[index].volume.volumeClaimSpec.resources - ↩ Parent -

- - - -Resources represents the minimum resources the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
limitsmap[string]int or stringLimits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/false
requestsmap[string]int or stringRequests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/false
- - -

- PostgresCluster.spec.backups.pgbackrest.repos[index].volume.volumeClaimSpec.selector - ↩ Parent -

- - - -A label query over volumes to consider for binding. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.backups.pgbackrest.repos[index].volume.volumeClaimSpec.selector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
- - -

- PostgresCluster.spec.backups.pgbackrest.restore - ↩ Parent -

- - - -Defines details for performing an in-place restore using pgBackRest - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
affinityobjectScheduling constraints of the pgBackRest restore Job. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-nodefalse
clusterNamestringThe name of an existing PostgresCluster to use as the data source for the new PostgresCluster. Defaults to the name of the PostgresCluster being created if not provided.false
clusterNamespacestringThe namespace of the cluster specified as the data source using the clusterName field. Defaults to the namespace of the PostgresCluster being created if not provided.false
options[]stringCommand line options to include when running the pgBackRest restore command. https://pgbackrest.org/command.html#command-restorefalse
resourcesobjectResource requirements for the pgBackRest restore Job.false
tolerations[]objectTolerations of the pgBackRest restore Job. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-tolerationfalse
enabledbooleanWhether or not in-place pgBackRest restores are enabled for this PostgresCluster.true
repoNamestringThe name of the pgBackRest repo within the source PostgresCluster that contains the backups that should be utilized to perform a pgBackRest restore when initializing the data source for the new PostgresCluster.true
- - -

- PostgresCluster.spec.backups.pgbackrest.restore.affinity - ↩ Parent -

- - - -Scheduling constraints of the pgBackRest restore Job. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
nodeAffinityobjectDescribes node affinity scheduling rules for the pod.false
podAffinityobjectDescribes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).false
podAntiAffinityobjectDescribes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).false
- - -

- PostgresCluster.spec.backups.pgbackrest.restore.affinity.nodeAffinity - ↩ Parent -

- - - -Describes node affinity scheduling rules for the pod. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecutionobjectIf the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.false
- - -

- PostgresCluster.spec.backups.pgbackrest.restore.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
preferenceobjectA node selector term, associated with the corresponding weight.true
weightintegerWeight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.true
- - -

- PostgresCluster.spec.backups.pgbackrest.restore.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference - ↩ Parent -

- - - -A node selector term, associated with the corresponding weight. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectA list of node selector requirements by node's labels.false
matchFields[]objectA list of node selector requirements by node's fields.false
- - -

- PostgresCluster.spec.backups.pgbackrest.restore.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference.matchExpressions[index] - ↩ Parent -

- - - -A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
- - -

- PostgresCluster.spec.backups.pgbackrest.restore.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference.matchFields[index] - ↩ Parent -

- - - -A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
- - -

- PostgresCluster.spec.backups.pgbackrest.restore.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution - ↩ Parent -

- - - -If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
nodeSelectorTerms[]objectRequired. A list of node selector terms. The terms are ORed.true
- - -

- PostgresCluster.spec.backups.pgbackrest.restore.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index] - ↩ Parent -

- - - -A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectA list of node selector requirements by node's labels.false
matchFields[]objectA list of node selector requirements by node's fields.false
- - -

- PostgresCluster.spec.backups.pgbackrest.restore.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index].matchExpressions[index] - ↩ Parent -

- - - -A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
- - -

- PostgresCluster.spec.backups.pgbackrest.restore.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index].matchFields[index] - ↩ Parent -

- - - -A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
- - -

- PostgresCluster.spec.backups.pgbackrest.restore.affinity.podAffinity - ↩ Parent -

- - - -Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecution[]objectIf the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.false
- - -

- PostgresCluster.spec.backups.pgbackrest.restore.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
podAffinityTermobjectRequired. A pod affinity term, associated with the corresponding weight.true
weightintegerweight associated with matching the corresponding podAffinityTerm, in the range 1-100.true
- - -

- PostgresCluster.spec.backups.pgbackrest.restore.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm - ↩ Parent -

- - - -Required. A pod affinity term, associated with the corresponding weight. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaces[]stringnamespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"false
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
- - -

- PostgresCluster.spec.backups.pgbackrest.restore.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector - ↩ Parent -

- - - -A label query over a set of resources, in this case pods. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.backups.pgbackrest.restore.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
- - -

- PostgresCluster.spec.backups.pgbackrest.restore.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaces[]stringnamespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"false
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
- - -

- PostgresCluster.spec.backups.pgbackrest.restore.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector - ↩ Parent -

- - - -A label query over a set of resources, in this case pods. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.backups.pgbackrest.restore.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
- - -

- PostgresCluster.spec.backups.pgbackrest.restore.affinity.podAntiAffinity - ↩ Parent -

- - - -Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecution[]objectIf the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.false
- - -

- PostgresCluster.spec.backups.pgbackrest.restore.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
podAffinityTermobjectRequired. A pod affinity term, associated with the corresponding weight.true
weightintegerweight associated with matching the corresponding podAffinityTerm, in the range 1-100.true
- - -

- PostgresCluster.spec.backups.pgbackrest.restore.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm - ↩ Parent -

- - - -Required. A pod affinity term, associated with the corresponding weight. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaces[]stringnamespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"false
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
- - -

- PostgresCluster.spec.backups.pgbackrest.restore.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector - ↩ Parent -

- - - -A label query over a set of resources, in this case pods. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.backups.pgbackrest.restore.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
- - -

- PostgresCluster.spec.backups.pgbackrest.restore.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaces[]stringnamespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"false
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
- - -

- PostgresCluster.spec.backups.pgbackrest.restore.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector - ↩ Parent -

- - - -A label query over a set of resources, in this case pods. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.backups.pgbackrest.restore.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
- - -

- PostgresCluster.spec.backups.pgbackrest.restore.resources - ↩ Parent -

- - - -Resource requirements for the pgBackRest restore Job. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
limitsmap[string]int or stringLimits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/false
requestsmap[string]int or stringRequests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/false
- - -

- PostgresCluster.spec.backups.pgbackrest.restore.tolerations[index] - ↩ Parent -

- - - -The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
effectstringEffect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.false
keystringKey is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.false
operatorstringOperator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.false
tolerationSecondsintegerTolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.false
valuestringValue is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.false
- - -

- PostgresCluster.spec.instances[index] - ↩ Parent -

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
affinityobjectScheduling constraints of a PostgreSQL pod. Changing this value causes PostgreSQL to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-nodefalse
metadataobjectMetadata contains metadata for PostgresCluster resourcesfalse
namestringfalse
replicasintegerfalse
resourcesobjectCompute resources of a PostgreSQL container.false
tolerations[]objectTolerations of a PostgreSQL pod. Changing this value causes PostgreSQL to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-tolerationfalse
walVolumeClaimSpecobjectDefines a separate PersistentVolumeClaim for PostgreSQL's write-ahead log. More info: https://www.postgresql.org/docs/current/wal.htmlfalse
dataVolumeClaimSpecobjectDefines a PersistentVolumeClaim for PostgreSQL data. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumestrue
- - -

- PostgresCluster.spec.instances[index].affinity - ↩ Parent -

- - - -Scheduling constraints of a PostgreSQL pod. Changing this value causes PostgreSQL to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
nodeAffinityobjectDescribes node affinity scheduling rules for the pod.false
podAffinityobjectDescribes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).false
podAntiAffinityobjectDescribes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).false
- - -

- PostgresCluster.spec.instances[index].affinity.nodeAffinity - ↩ Parent -

- - - -Describes node affinity scheduling rules for the pod. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecutionobjectIf the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.false
- - -

- PostgresCluster.spec.instances[index].affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
preferenceobjectA node selector term, associated with the corresponding weight.true
weightintegerWeight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.true
- - -

- PostgresCluster.spec.instances[index].affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference - ↩ Parent -

- - - -A node selector term, associated with the corresponding weight. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectA list of node selector requirements by node's labels.false
matchFields[]objectA list of node selector requirements by node's fields.false
- - -

- PostgresCluster.spec.instances[index].affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference.matchExpressions[index] - ↩ Parent -

- - - -A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
- - -

- PostgresCluster.spec.instances[index].affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference.matchFields[index] - ↩ Parent -

- - - -A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
- - -

- PostgresCluster.spec.instances[index].affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution - ↩ Parent -

- - - -If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
nodeSelectorTerms[]objectRequired. A list of node selector terms. The terms are ORed.true
- - -

- PostgresCluster.spec.instances[index].affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index] - ↩ Parent -

- - - -A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectA list of node selector requirements by node's labels.false
matchFields[]objectA list of node selector requirements by node's fields.false
- - -

- PostgresCluster.spec.instances[index].affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index].matchExpressions[index] - ↩ Parent -

- - - -A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
- - -

- PostgresCluster.spec.instances[index].affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index].matchFields[index] - ↩ Parent -

- - - -A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
- - -

- PostgresCluster.spec.instances[index].affinity.podAffinity - ↩ Parent -

- - - -Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecution[]objectIf the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.false
- - -

- PostgresCluster.spec.instances[index].affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
podAffinityTermobjectRequired. A pod affinity term, associated with the corresponding weight.true
weightintegerweight associated with matching the corresponding podAffinityTerm, in the range 1-100.true
- - -

- PostgresCluster.spec.instances[index].affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm - ↩ Parent -

- - - -Required. A pod affinity term, associated with the corresponding weight. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaces[]stringnamespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"false
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
- - -

- PostgresCluster.spec.instances[index].affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector - ↩ Parent -

- - - -A label query over a set of resources, in this case pods. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.instances[index].affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
- - -

- PostgresCluster.spec.instances[index].affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaces[]stringnamespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"false
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
- - -

- PostgresCluster.spec.instances[index].affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector - ↩ Parent -

- - - -A label query over a set of resources, in this case pods. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.instances[index].affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
- - -

- PostgresCluster.spec.instances[index].affinity.podAntiAffinity - ↩ Parent -

- - - -Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecution[]objectIf the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.false
- - -

- PostgresCluster.spec.instances[index].affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
podAffinityTermobjectRequired. A pod affinity term, associated with the corresponding weight.true
weightintegerweight associated with matching the corresponding podAffinityTerm, in the range 1-100.true
- - -

- PostgresCluster.spec.instances[index].affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm - ↩ Parent -

- - - -Required. A pod affinity term, associated with the corresponding weight. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaces[]stringnamespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"false
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
- - -

- PostgresCluster.spec.instances[index].affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector - ↩ Parent -

- - - -A label query over a set of resources, in this case pods. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.instances[index].affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
- - -

- PostgresCluster.spec.instances[index].affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaces[]stringnamespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"false
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
- - -

- PostgresCluster.spec.instances[index].affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector - ↩ Parent -

- - - -A label query over a set of resources, in this case pods. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.instances[index].affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
- - -

- PostgresCluster.spec.instances[index].metadata - ↩ Parent -

- - - -Metadata contains metadata for PostgresCluster resources - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
annotationsmap[string]stringfalse
labelsmap[string]stringfalse
- - -

- PostgresCluster.spec.instances[index].resources - ↩ Parent -

- - - -Compute resources of a PostgreSQL container. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
limitsmap[string]int or stringLimits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/false
requestsmap[string]int or stringRequests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/false
- - -

- PostgresCluster.spec.instances[index].tolerations[index] - ↩ Parent -

- - - -The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
effectstringEffect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.false
keystringKey is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.false
operatorstringOperator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.false
tolerationSecondsintegerTolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.false
valuestringValue is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.false
- - -

- PostgresCluster.spec.instances[index].walVolumeClaimSpec - ↩ Parent -

- - - -Defines a separate PersistentVolumeClaim for PostgreSQL's write-ahead log. More info: https://www.postgresql.org/docs/current/wal.html - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
accessModes[]stringAccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1false
dataSourceobjectThis field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) * An existing custom resource that implements data population (Alpha) In order to use custom resource types that implement data population, the AnyVolumeDataSource feature gate must be enabled. If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source.false
resourcesobjectResources represents the minimum resources the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resourcesfalse
selectorobjectA label query over volumes to consider for binding.false
storageClassNamestringName of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1false
volumeModestringvolumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.false
volumeNamestringVolumeName is the binding reference to the PersistentVolume backing this claim.false
- - -

- PostgresCluster.spec.instances[index].walVolumeClaimSpec.dataSource - ↩ Parent -

- - - -This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) * An existing custom resource that implements data population (Alpha) In order to use custom resource types that implement data population, the AnyVolumeDataSource feature gate must be enabled. If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
apiGroupstringAPIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.false
kindstringKind is the type of resource being referencedtrue
namestringName is the name of resource being referencedtrue
- - -

- PostgresCluster.spec.instances[index].walVolumeClaimSpec.resources - ↩ Parent -

- - - -Resources represents the minimum resources the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
limitsmap[string]int or stringLimits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/false
requestsmap[string]int or stringRequests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/false
- - -

- PostgresCluster.spec.instances[index].walVolumeClaimSpec.selector - ↩ Parent -

- - - -A label query over volumes to consider for binding. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.instances[index].walVolumeClaimSpec.selector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
- - -

- PostgresCluster.spec.instances[index].dataVolumeClaimSpec - ↩ Parent -

- - - -Defines a PersistentVolumeClaim for PostgreSQL data. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
accessModes[]stringAccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1false
dataSourceobjectThis field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) * An existing custom resource that implements data population (Alpha) In order to use custom resource types that implement data population, the AnyVolumeDataSource feature gate must be enabled. If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source.false
resourcesobjectResources represents the minimum resources the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resourcesfalse
selectorobjectA label query over volumes to consider for binding.false
storageClassNamestringName of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1false
volumeModestringvolumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.false
volumeNamestringVolumeName is the binding reference to the PersistentVolume backing this claim.false
- - -

- PostgresCluster.spec.instances[index].dataVolumeClaimSpec.dataSource - ↩ Parent -

- - - -This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) * An existing custom resource that implements data population (Alpha) In order to use custom resource types that implement data population, the AnyVolumeDataSource feature gate must be enabled. If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
apiGroupstringAPIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.false
kindstringKind is the type of resource being referencedtrue
namestringName is the name of resource being referencedtrue
- - -

- PostgresCluster.spec.instances[index].dataVolumeClaimSpec.resources - ↩ Parent -

- - - -Resources represents the minimum resources the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
limitsmap[string]int or stringLimits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/false
requestsmap[string]int or stringRequests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/false
- - -

- PostgresCluster.spec.instances[index].dataVolumeClaimSpec.selector - ↩ Parent -

- - - -A label query over volumes to consider for binding. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.instances[index].dataVolumeClaimSpec.selector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
- - -

- PostgresCluster.status - ↩ Parent -

- - - -PostgresClusterStatus defines the observed state of PostgresCluster - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
conditions[]objectconditions represent the observations of postgrescluster's current state. Known .status.conditions.type are: "PersistentVolumeResizing", "ProxyAvailable"false
databaseRevisionstringIdentifies the databases that have been installed into PostgreSQL.false
instances[]objectCurrent state of PostgreSQL instances.false
monitoringobjectCurrent state of PostgreSQL cluster monitoring tool configurationfalse
observedGenerationintegerobservedGeneration represents the .metadata.generation on which the status was based.false
patroniobjectfalse
pgbackrestobjectStatus information for pgBackRestfalse
proxyobjectCurrent state of the PostgreSQL proxy.false
startupInstancestringThe instance that should be started first when bootstrapping and/or starting a PostgresCluster.false
startupInstanceSetstringThe instance set associated with the startupInstancefalse
usersRevisionstringIdentifies the users that have been installed into PostgreSQL.false
- - -

- PostgresCluster.status.conditions[index] - ↩ Parent -

- - - -Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` - // other fields } - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
observedGenerationintegerobservedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance.false
lastTransitionTimestringlastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.true
messagestringmessage is a human readable message indicating details about the transition. This may be an empty string.true
reasonstringreason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty.true
statusenumstatus of the condition, one of True, False, Unknown.true
typestringtype of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)true
- - -

- PostgresCluster.status.instances[index] - ↩ Parent -

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
readyReplicasintegerTotal number of ready pods.false
replicasintegerTotal number of non-terminated pods.false
updatedReplicasintegerTotal number of non-terminated pods that have the desired specification.false
namestringtrue
- - -

- PostgresCluster.status.monitoring - ↩ Parent -

- - - -Current state of PostgreSQL cluster monitoring tool configuration - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
exporterConfigurationstringfalse
- - -

- PostgresCluster.status.patroni - ↩ Parent -

- - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
systemIdentifierstringThe PostgreSQL system identifier reported by Patroni.false
- - -

- PostgresCluster.status.pgbackrest - ↩ Parent -

- - - -Status information for pgBackRest - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
manualBackupobjectStatus information for manual backupsfalse
repoHostobjectStatus information for the pgBackRest dedicated repository hostfalse
repos[]objectStatus information for pgBackRest repositoriesfalse
restoreobjectStatus information for in-place restoresfalse
scheduledBackups[]objectStatus information for scheduled backupsfalse
- - -

- PostgresCluster.status.pgbackrest.manualBackup - ↩ Parent -

- - - -Status information for manual backups - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
activeintegerThe number of actively running manual backup Pods.false
completionTimestringRepresents the time the manual backup Job was determined by the Job controller to be completed. This field is only set if the backup completed successfully. Additionally, it is represented in RFC3339 form and is in UTC.false
failedintegerThe number of Pods for the manual backup Job that reached the "Failed" phase.false
startTimestringRepresents the time the manual backup Job was acknowledged by the Job controller. It is represented in RFC3339 form and is in UTC.false
succeededintegerThe number of Pods for the manual backup Job that reached the "Succeeded" phase.false
finishedbooleanSpecifies whether or not the Job is finished executing (does not indicate success or failure).true
idstringA unique identifier for the manual backup as provided using the "pgbackrest-backup" annotation when initiating a backup.true
- - -

- PostgresCluster.status.pgbackrest.repoHost - ↩ Parent -

- - - -Status information for the pgBackRest dedicated repository host - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
apiVersionstringAPIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resourcesfalse
kindstringKind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kindsfalse
readybooleanWhether or not the pgBackRest repository host is ready for usefalse
- - -

- PostgresCluster.status.pgbackrest.repos[index] - ↩ Parent -

- - - -RepoStatus the status of a pgBackRest repository - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
boundbooleanWhether or not the pgBackRest repository PersistentVolumeClaim is bound to a volumefalse
replicaCreateBackupCompletebooleanReplicaCreateBackupReady indicates whether a backup exists in the repository as needed to bootstrap replicas.false
repoOptionsHashstringA hash of the required fields in the spec for defining an Azure, GCS or S3 repository, Utilizd to detect changes to these fields and then execute pgBackRest stanza-create commands accordingly.false
stanzaCreatedbooleanSpecifies whether or not a stanza has been successfully created for the repositoryfalse
volumestringThe name of the volume the containing the pgBackRest repositoryfalse
namestringThe name of the pgBackRest repositorytrue
- - -

- PostgresCluster.status.pgbackrest.restore - ↩ Parent -

- - - -Status information for in-place restores - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
activeintegerThe number of actively running manual backup Pods.false
completionTimestringRepresents the time the manual backup Job was determined by the Job controller to be completed. This field is only set if the backup completed successfully. Additionally, it is represented in RFC3339 form and is in UTC.false
failedintegerThe number of Pods for the manual backup Job that reached the "Failed" phase.false
startTimestringRepresents the time the manual backup Job was acknowledged by the Job controller. It is represented in RFC3339 form and is in UTC.false
succeededintegerThe number of Pods for the manual backup Job that reached the "Succeeded" phase.false
finishedbooleanSpecifies whether or not the Job is finished executing (does not indicate success or failure).true
idstringA unique identifier for the manual backup as provided using the "pgbackrest-backup" annotation when initiating a backup.true
- - -

- PostgresCluster.status.pgbackrest.scheduledBackups[index] - ↩ Parent -

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
activeintegerThe number of actively running manual backup Pods.false
completionTimestringRepresents the time the manual backup Job was determined by the Job controller to be completed. This field is only set if the backup completed successfully. Additionally, it is represented in RFC3339 form and is in UTC.false
cronJobNamestringThe name of the associated pgBackRest scheduled backup CronJobfalse
failedintegerThe number of Pods for the manual backup Job that reached the "Failed" phase.false
repostringThe name of the associated pgBackRest repositoryfalse
startTimestringRepresents the time the manual backup Job was acknowledged by the Job controller. It is represented in RFC3339 form and is in UTC.false
succeededintegerThe number of Pods for the manual backup Job that reached the "Succeeded" phase.false
typestringThe pgBackRest backup type for this Jobfalse
- - -

- PostgresCluster.status.proxy - ↩ Parent -

- - - -Current state of the PostgreSQL proxy. - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
pgBouncerobjectfalse
- - -

- PostgresCluster.status.proxy.pgBouncer - ↩ Parent -

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
postgresRevisionstringIdentifies the revision of PgBouncer assets that have been installed into PostgreSQL.false
readyReplicasintegerTotal number of ready pods.false
replicasintegerTotal number of non-terminated pods.false
diff --git a/docs/content/releases/5.0.0.md b/docs/content/releases/5.0.0.md deleted file mode 100644 index 19955c73fe..0000000000 --- a/docs/content/releases/5.0.0.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: "5.0.0" -date: -draft: false -weight: 900 ---- - -Crunchy Data announces the release of the PGO, the open source Postgres Operator, 5.0.0 on June 30, 2021. - -To get started with PGO 5.0.0, we invite you to read through the [quickstart]({{< relref "quickstart/_index.md" >}}). We also encourage you to work through the [PGO tutorial]({{< relref "tutorial/_index.md" >}}). - -PGO 5.0.0 is a major release of the Postgres Operator. The focus of this release was to take the features from the previous versions of PGO, add in some new features, and allow you to deploy Kubernetes native Postgres through a fully declarative, GitOps style workflow. As with previous versions, PGO 5.0 makes it easy to deploy production ready, cloud native Postgres. - -Postgres clusters are now fully managed through a custom resource called [`postgrescluster.postgres-operator.crunchydata.com`]({{< relref "references/crd.md" >}}). You can also view the various attributes of the custom resource using `kubectl explain postgrescluster.postgres-operator.crunchydata.com` or `kubectl explain postgrescluster`. The custom resource can be edited at any time, and all of the changes are rolled out in a minimally disruptive way. - -There are [a set of examples](https://github.com/CrunchyData/postgres-operator-examples/fork) for how to use Kustomize and Helm with PGO 5.0. This example set will grow and we encourage you to contribute to it. - -PGO 5.0 continues to support the Postgres architecture that was built up in previous releases. This means that Postgres clusters are deployed without a single-point-of-failure and can continue operating even if PGO is unavailable. PGO 5.0 includes support for Postgres high availability, backup management, disaster recovery, monitoring, full customizability, database cloning, connection pooling, security, running with locked down container settings, and more. - -PGO 5.0 also continuously monitors your environment to ensure all of the components you want deployed are available. For example, if PGO detects that your connection pooler is missing, it will recreate it as you specified in the custom resource. PGO 5.0 can watch for Postgres clusters in all Kubernetes namespaces or be isolated to individual namespaces. - -As PGO 5.0 is a major release, it is not backwards compatible with PGO 4.x. However, you can run PGO 4.x and PGO 5.0 in the same Kubernetes cluster, which allows you to migrate Postgres clusters from 4.x to 5.0. - -## Changes - -Beyond being fully declarative, PGO 5.0 has some notable changes that you should be aware of. These include: - -- The minimum Kubernetes version is now 1.18. The minimum OpenShift version is 4.5. This release drops support for OpenShift 3.11. - - We recommend running the latest bug fix releases of Kubernetes. -- The removal of the `pgo` client. This may be reintroduced in a later release, but all actions on a Postgres cluster can be accomplished using `kubectl`, `oc`, or your preferred Kubernetes management tool (e.g. ArgoCD). -- A fully defined `status` subresource is now available within the `postgrescluster` custom resource that provides direct insight into the current status of a PostgreSQL cluster. -- Native Kubernetes eventing is now utilized to generate and record events related to the creation and management of PostgreSQL clusters. -- Postgres instances now use Kubernetes Statefulsets. -- Scheduled backups now use Kubernetes CronJobs. -- Connections to Postgres require TLS. You can bring your own TLS infrastructure, otherwise PGO provides it for you. -- Custom configurations for all components can be set directly on the `postgrescluster` custom resource. - -## Features - -In addition to supporting the PGO 4.x feature set, the PGO 5.0.0 adds the following new features: - -- Postgres minor version (bug fix) updates can be applied without having to update PGO. You only need to update the `image` attribute in the custom resource. -- Adds support for Azure Blob Storage for storing backups. This is in addition to using Kubernetes storage, Amazon S3 (or S3-equivalents like MinIO), and Google Cloud Storage (GCS). -- Allows for backups to be stored in up to four different locations simultaneously. -- Backup locations can be changed during the lifetime of a Postgres cluster, e.g. moving from "posix" to "s3". diff --git a/docs/content/releases/5.0.1.md b/docs/content/releases/5.0.1.md deleted file mode 100644 index 670d696e7e..0000000000 --- a/docs/content/releases/5.0.1.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: "5.0.1" -date: -draft: false -weight: 899 ---- - -Crunchy Data announces the release of [Crunchy Postgres for Kubernetes](https://www.crunchydata.com/products/crunchy-postgresql-for-kubernetes/) 5.0.1. - -Crunchy Postgres for Kubernetes is powered by [PGO](https://github.com/CrunchyData/postgres-operator), the open source [Postgres Operator](https://github.com/CrunchyData/postgres-operator) from [Crunchy Data](https://www.crunchydata.com). [PGO](https://github.com/CrunchyData/postgres-operator) is released in conjunction with the [Crunchy Container Suite](https://github.com/CrunchyData/crunchy-containers/). - -Crunchy Postgres for Kubernetes 5.0.1 includes the following software versions upgrades: - -- [Patroni](https://patroni.readthedocs.io/) is now at 2.1.0. -- PL/Tcl is now included in the PostGIS (`crunchy-postgres-gis-ha`) container. - -Read more about how you can [get started]({{< relref "quickstart/_index.md" >}}) with Crunchy Postgres for Kubernetes. We recommend [forking the Postgres Operator examples](https://github.com/CrunchyData/postgres-operator-examples/fork) repo. - -## Features - -- Custom affinity rules and tolerations can now be added to pgBackRest restore Jobs. -- OLM bundles can now be generated for PGO 5. - -## Changes - -- The `replicas` value for an instance set must now be greater than `0`, and at least one instance set must now be defined for a `PostgresCluster`. This is to prevent the cluster from being scaled down to `0` instances, since doing so results in the inability to scale the cluster back up. -- Refreshed the PostgresCluster CRD documentation using the latest version of `crdoc` (`v0.3.0`). -- The PGO test suite now includes a test to validate image pull secrets. -- Related Image functionality has been implemented for the OLM installer as required to support offline deployments. -- The name of the PGO Deployment and ServiceAccount has been changed to `pgo` for all installers, allowing both PGO v4.x and PGO v5.x to be run in the same namespace. If you are using Kustomize to install PGO and are upgrading from PGO 5.0.0, please see the [Upgrade Guide]({{< relref "../installation/upgrade.md" >}}) for addtional steps that must be completed as a result of this change in order to ensure a successful upgrade. -- PGO now automatically detects whether or not it is running in an OpenShift environment. -- Postgres users and databases can be specified in `PostgresCluster.spec.users`. The credentials stored in the `{cluster}-pguser` Secret are still valid, but they are no longer reconciled. References to that Secret should be replaced with `{cluster}-pguser-{cluster}`. Once all references are updated, the old `{cluster}-pguser` Secret can be deleted. -- The built-in `postgres` superuser can now be managed the same way as other users. Specifying it in `PostgresCluster.spec.users` will give it a password, allowing it to connect over the network. -- PostgreSQL data and pgBackRest repo volumes are now reconciled using labels. - -## Fixes - -- It is now possible to customize `shared_preload_libraries` when monitoring is enabled. -- Fixed a typo in the description of the `openshift` field in the PostgresCluster CRD. -- When a new cluster is created using an existing PostgresCluster as its dataSource, the original primary for that cluster will now properly initialize as a replica following a switchover. This is fixed with the upgrade to Patroni 2.1.0). -- A consistent `startupInstance` name is now set in the PostgresCluster status when bootstrapping a new cluster using an existing PostgresCluster as its data source. -- It is now possible to properly customize the `pg_hba.conf` configuration file. diff --git a/docs/content/releases/5.0.2.md b/docs/content/releases/5.0.2.md deleted file mode 100644 index 372c1c60fb..0000000000 --- a/docs/content/releases/5.0.2.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -title: "5.0.2" -date: -draft: false -weight: 898 ---- - -Crunchy Data announces the release of [Crunchy Postgres for Kubernetes](https://www.crunchydata.com/products/crunchy-postgresql-for-kubernetes/) 5.0.2. - -Crunchy Postgres for Kubernetes is powered by [PGO](https://github.com/CrunchyData/postgres-operator), the open source [Postgres Operator](https://github.com/CrunchyData/postgres-operator) from [Crunchy Data](https://www.crunchydata.com). [PGO](https://github.com/CrunchyData/postgres-operator) is released in conjunction with the [Crunchy Container Suite](https://github.com/CrunchyData/crunchy-containers/). - -Crunchy Postgres for Kubernetes 5.0.2 includes the following software versions upgrades: - -- [PostgreSQL](https://www.postgresql.org) is updated to 13.4, 12.8, 11.13, and 10.18. -- PL/Tcl is now included in the PostGIS (`crunchy-postgres-gis-ha`) container. -- The [TimescaleDB](https://github.com/timescale/timescaledb) extension is now at version 2.4.0. -- The [set_user](https://github.com/pgaudit/set_user) extension is now at version 2.0.1. - -Read more about how you can [get started]({{< relref "quickstart/_index.md" >}}) with Crunchy Postgres for Kubernetes. We recommend [forking the Postgres Operator examples](https://github.com/CrunchyData/postgres-operator-examples/fork) repo. diff --git a/docs/content/releases/_index.md b/docs/content/releases/_index.md deleted file mode 100644 index 7ea3840539..0000000000 --- a/docs/content/releases/_index.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -title: "Release Notes" -date: -draft: false -weight: 103 ---- diff --git a/docs/content/support/_index.md b/docs/content/support/_index.md deleted file mode 100644 index 0a2ca60346..0000000000 --- a/docs/content/support/_index.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: "Support" -date: -draft: false -weight: 110 ---- - -There are a few options available for community support of the [PGO: the Postgres Operator](https://github.com/CrunchyData/postgres-operator): - -- **If you believe you have found a bug** or have a detailed feature request: please open [an issue on GitHub](https://github.com/CrunchyData/postgres-operator/issues/new/choose). The Postgres Operator community and the Crunchy Data team behind the PGO is generally active in responding to issues. -- **For general questions or community support**: please join the [PostgreSQL Operator community mailing list](https://groups.google.com/a/crunchydata.com/forum/#!forum/postgres-operator/join) at [https://groups.google.com/a/crunchydata.com/forum/#!forum/postgres-operator/join](https://groups.google.com/a/crunchydata.com/forum/#!forum/postgres-operator/join), - -In all cases, please be sure to provide as many details as possible in regards to your issue, including: - -- Your Platform (e.g. Kubernetes vX.YY.Z) -- Operator Version (e.g. {{< param centosBase >}}-{{< param operatorVersion >}}) -- A detailed description of the issue, as well as steps you took that lead up to the issue -- Any relevant logs -- Any additional information you can provide that you may find helpful - -For production and commercial support of the PostgreSQL Operator, please -[contact Crunchy Data](https://www.crunchydata.com/contact/) at [info@crunchydata.com](mailto:info@crunchydata.com) for information regarding an [Enterprise Support Subscription](https://www.crunchydata.com/about/value-of-subscription/). diff --git a/docs/content/tutorial/_index.md b/docs/content/tutorial/_index.md deleted file mode 100644 index db7477da91..0000000000 --- a/docs/content/tutorial/_index.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -title: "Tutorial" -date: -draft: false -weight: 20 ---- - -Ready to get started with [PGO](https://github.com/CrunchyData/postgres-operator), the [Postgres Operator](https://github.com/CrunchyData/postgres-operator) from [Crunchy Data](https://www.crunchydata.com)? Us too! - -This tutorial covers several concepts around day-to-day life managing a Postgres cluster with PGO. While going through and looking at various "HOWTOs" with PGO, we will also cover concepts and features that will help you have a successful cloud native Postgres journey! - -In this tutorial, you will learn: - -- How to create a Postgres cluster -- How to connect to a Postgres cluster -- How to scale and create a high availability (HA) Postgres cluster -- How to resize your cluster -- How to set up proper disaster recovery and manage backups and restores -- How to apply software updates to Postgres and other components -- How to set up connection pooling -- How to delete your cluster - -and more. - -You will also see: - -- How PGO helps your Postgres cluster achieve high availability -- How PGO can heal your Postgres cluster and ensure all objects are present and available -- How PGO sets up disaster recovery -- How to manage working with PGO in a single namespace or in a cluster-wide installation of PGO. - -[Let's get started]({{< relref "./getting-started.md" >}})! diff --git a/docs/content/tutorial/administrative-tasks.md b/docs/content/tutorial/administrative-tasks.md deleted file mode 100644 index 7ce14444c6..0000000000 --- a/docs/content/tutorial/administrative-tasks.md +++ /dev/null @@ -1,94 +0,0 @@ ---- -title: "Administrative Tasks" -date: -draft: false -weight: 105 ---- - -## Manually Restarting PostgreSQL - -There are times when you might need to manually restart PostgreSQL. This can be done by adding or updating a custom annotation to the cluster's `spec.metadata.annotations` section. PGO will notice the change and perform a [rolling restart]({{< relref "/architecture/high-availability.md" >}}#rolling-update). - -For example, if you have a cluster named `hippo` in the namespace `postgres-operator`, all you need to do is patch the hippo postgrescluster with the following: - -```shell -kubectl patch postgrescluster/hippo -n postgres-operator --type merge \ - --patch '{"spec":{"metadata":{"annotations":{"restarted":"'"$(date)"'"}}}}' -``` - -Watch your hippo cluster: you will see the rolling update has been triggered and the restart has begun. - - -## Rotating TLS Certificates - -Credentials should be invalidated and replaced (rotated) as often as possible -to minimize the risk of their misuse. Unlike passwords, every TLS certificate -has an expiration, so replacing them is inevitable. When you use your own TLS -certificates with PGO, you are responsible for replacing them appropriately. -Here's how. - - -PostgreSQL needs to be restarted after its server certificates change. -There are a few ways to do it: - -1. Store the new certificates in a new Secret. Edit the PostgresCluster object - to refer to the new Secret, and PGO will perform a - [rolling restart]({{< relref "/architecture/high-availability.md" >}}#rolling-update). - ```yaml - spec: - customTLSSecret: - name: hippo.new.tls - ``` - - _or_ - -2. Replace the old certificates in the current Secret. PGO doesn't notice when - the contents of your Secret change, so you need to - [trigger a rolling restart]({{< relref "/tutorial/administrative-tasks.md" >}}#manually-restarting-postgresql). - -{{% notice info %}} -When changing the PostgreSQL certificate authority, make sure to update -[`customReplicationTLSSecret`]({{< relref "/tutorial/customize-cluster.md" >}}#customize-tls) as well. -{{% /notice %}} - -PgBouncer needs to be restarted after its certificates change. -There are a few ways to do it: - -1. Store the new certificates in a new Secret. Edit the PostgresCluster object - to refer to the new Secret, and PGO will perform a rolling restart of PgBouncer. - ```yaml - spec: - proxy: - pgBouncer: - customTLSSecret: - name: hippo.pgbouncer.new.tls - ``` - - _or_ - -2. Replace the old certificates in the current Secret. PGO doesn't notice when - the contents of your Secret change, so you need to trigger a rolling restart - of PgBouncer. Edit the PostgresCluster object to add a unique annotation. - The name and value are up to you, so long as the value differs from the - previous value. - ```yaml - spec: - proxy: - pgBouncer: - metadata: - annotations: - restarted: Q1-certs - ``` - - This `kubectl patch` command uses your local date and time: - - ```shell - kubectl patch postgrescluster/hippo --type merge \ - --patch '{"spec":{"proxy":{"pgBouncer":{"metadata":{"annotations":{"restarted":"'"$(date)"'"}}}}}}' - ``` - - -## Next Steps - -We've covered a lot in terms of building, maintaining, scaling, customizing, restarting, and expanding our Postgres cluster. However, there may come a time where we need to [delete our Postgres cluster]({{< relref "delete-cluster.md" >}}). How do we do that? - diff --git a/docs/content/tutorial/backup-management.md b/docs/content/tutorial/backup-management.md deleted file mode 100644 index 145ceb699b..0000000000 --- a/docs/content/tutorial/backup-management.md +++ /dev/null @@ -1,111 +0,0 @@ ---- -title: "Backup Management" -date: -draft: false -weight: 82 ---- - -In the [previous section]({{< relref "./backups.md" >}}), we looked at a brief overview of the full disaster recovery feature set that PGO provides and explored how to [configure backups for our Postgres cluster]({{< relref "./backups.md" >}}). - -Now that we have backups set up, lets look at some of the various backup management tasks we can perform. These include: - -- Setting up scheduled backups -- Setting backup retention policies -- Taking one-off / ad hoc backups - -## Managing Scheduled Backups - -PGO sets up your Postgres clusters so that they are continuously archiving: your data is constantly being stored in your backup repository. Effectively, this is a backup! - -However, in a [disaster recovery]({{< relref "./disaster-recovery.md" >}}) scenario, you likely want to get your Postgres cluster back up and running as quickly as possible (e.g. a short "[recovery time objective (RTO)](https://en.wikipedia.org/wiki/Disaster_recovery#Recovery_Time_Objective)"). What helps accomplish this is to take periodic backups. This makes it faster to restore! - -[pgBackRest](https://pgbackrest.org/), the backup management tool used by PGO, provides different backup types to help both from a space management and RTO optimization perspective. These backup types include: - -- **full** (`full`): A backup of your entire Postgres cluster. This is the largest of all of the backup types. -- **differential** (`diff`): A backup of all of the data since the last `full` backup. -- **incremental** (`incr`): A backup of all of the data since the last `full`, `diff`, or `incr` backup. - -Selecting the appropriate backup strategy for your Postgres cluster is outside the scope of this tutorial, but let's look at how we can set up scheduled backups. - -Backup schedules are stored in the `spec.backups.pgbackrest.repos.schedules` section. Each value in this section accepts a [cron-formatted](https://k8s.io/docs/concepts/workloads/controllers/cron-jobs/#cron-schedule-syntax) string that dictates the backup schedule. The available keys are `full`, `differential`, and `incremental` for full, differential, and incremental backups respectively. - -Let's say that our backup policy is to take a full backup once a day at 1am and take incremental backups every four hours. We would want to add configuration to our spec that looks similar to: - -``` -spec: - backups: - pgbackrest: - repos: - - name: repo1 - schedules: - full: "0 1 * * *" - incremental: "0 */4 * * *" -``` - -To manage schedule backups, PGO will create several Kubernetes [CronJob](https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/) objects that will perform backups on the specified periods. The backups will use the [configuration that you specified]({{< relref "./backups.md" >}}). - -Ensuring you take regularly scheduled backups is important to maintaining Postgres cluster health. However, you don't need to keep all of your backups: this could cause you to run out of space! As such, it's also important to set a backup retention policy. - -## Managing Backup Retention - -PGO lets you set backup retention on full and differential backups. When a backup expires, either through your retention policy or through manual expiration, pgBackRest will clean up any backup associated with it. For example, if you have a full backup with four incremental backups associated with it, when the full backup expires, all of its incremental backups also expire. - -There are two different types of backup retention you can set: - -- `count`: This is based on the number of backups you want to keep. This is the default. -- `time`: This is based on the total number of days you would like to keep the a backup. - -Let's look at an example where we keep full backups for 14 days. The most convenient way to do this is through the `spec.backups.pgbackrest.global` section, e.g.: - -``` -spec: - backups: - pgbackrest: - global: - repo1-retention-full: "14" - repo1-retention-full-type: time -``` - -For a full list of available configuration options, please visit the [pgBackRest configuration](https://pgbackrest.org/configuration.html) guide. - -## Taking a One-Off Backup - -There are times where you may want to take a one-off backup, such as before major application changes or updates. This is not your typical declarative action -- in fact a one-off backup is imperative in its nature! -- but it is possibly to take a one-off backup of your Postgres cluster with PGO. - -First, you need to configure your spec to be able to take a one-off backup, you will need to edit the `spec.backups.pgbackrest.manual` section of your custom resource. This will contain information about the type of backup you want to take and any other [pgBackRest configuration](https://pgbackrest.org/configuration.html) options. - -Let's configure the custom resource to take a one-off full backup: - -``` -spec: - backups: - pgbackrest: - manual: - repoName: repo1 - options: - - --type=full -``` - -This does not trigger the one-off backup -- you have to do that by adding the `postgres-operator.crunchydata.com/pgbackrest-backup` to your custom resource. The best way to set this annotation is with a timestamp, so you know when you initialized the backup. - -For example, for our `hippo` cluster, we can run the following command to trigger the one-off backup: - -``` -kubectl annotate -n postgres-operator postgrescluster hippo \ - postgres-operator.crunchydata.com/pgbackrest-backup="$( date '+%F_%H:%M:%S' )" -``` - -PGO will detect this annotation and create a new, one-off backup Job! - -If you intend to take one-off backups with similar settings in the future, you can leave those in the spec; just update the annotation to a different value the next time you are taking a backup. - -To re-run the command above, you will need to add the `--overwrite` flag so the annotation's value can be updated, i.e. - -``` -kubectl annotate -n postgres-operator postgrescluster hippo --overwrite \ - postgres-operator.crunchydata.com/pgbackrest-backup="$( date '+%F_%H:%M:%S' )" -``` - -## Next Steps - -We've covered the fundamental tasks with managing backups. What about [restores]({{< relref "./disaster-recovery.md" >}})? Or [cloning data into new Postgres clusters]({{< relref "./disaster-recovery.md" >}})? Let's explore! diff --git a/docs/content/tutorial/backups.md b/docs/content/tutorial/backups.md deleted file mode 100644 index 1581cf584c..0000000000 --- a/docs/content/tutorial/backups.md +++ /dev/null @@ -1,204 +0,0 @@ ---- -title: "Backup Configuration" -date: -draft: false -weight: 80 ---- - -An important part of a healthy Postgres cluster is maintaining backups. PGO optimizes its use of open source [pgBackRest](https://pgbackrest.org/) to be able to support terabyte size databases. What's more, PGO makes it convenient to perform many common and advanced actions that can occur during the lifecycle of a database, including: - -- Setting automatic backup schedules and retention policies -- Backing data up to multiple locations - - Support for backup storage in Kubernetes, AWS S3 (or S3-compatible systems like MinIO), Google Cloud Storage (GCS), and Azure Blog Storage -- Taking one-off / ad hoc backups -- Performing a "point-in-time-recovery" -- Cloning data to a new instance - -and more. - -Let's explore the various disaster recovery features work in PGO by first looking at how to set up backups. - -## Understanding Backup Configuration and Basic Operations - -The backup configuration for a PGO managed Postgres cluster resides in the `spec.backups.pgbackrest` section of a custom resource. In addition to indicate which version of pgBackRest to use, this section allows you to configure the fundamental backup settings for your Postgres cluster, including: - -- `spec.backups.pgbackrest.configuration` - allows to add additional configuration and references to Secrets that are needed for configuration your backups. For example, this may reference a Secret that contains your S3 credentials. -- `spec.backups.pgbackrest.global` - a convenience to apply global [pgBackRest configuration](https://pgbackrest.org/configuration.html). An example of this may be setting the global pgBackRest logging level (e.g. `log-level-console: info`), or provide configuration to optimize performance. -- `spec.backups.pgbackrest.repos` - information on each specific pgBackRest backup repository. This allows you to configure where and how your backups are stored. You can keep backups in up to four (4) different locations! - -You can configure the `repos` section based on the backup storage system you are looking to use. Specifically, you configure your `repos` section according to the storage type you are using. There are four storage types available in `spec.backups.pgbackrest.repos`: - -| Storage Type | Description | -|--------------| ------------ | -| `azure` | For use with Azure Blob Storage. | -| `gcs` | For use with Google Cloud Storage (GCS). | -| `s3` | For use with Amazon S3 or any S3 compatible storage system such as MinIO. | -| `volume` | For use with a Kubernetes [Persistent Volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/). | - - -Regardless of the backup storage system you select, you **must** assign a name to `spec.backups.pgbackrest.repos.name`, e.g. `repo1`. pgBackRest follows the convention of assigning configuration to a specific repository using a `repoN` format, e.g. `repo1`, `repo2`, etc. You can customize your configuration based upon the name that you assign in the spec. We will cover this topic further in the multi-repository example. - -By default, backups are stored in a directory that follows the pattern `pgbackrest/repoN` where `N` is the number of the repo. This typically does not present issues when storing your backup information in a Kubernetes volume, but it can present complications if you are storing all of your backups in the same backup in a blob storage system like S3/GCS/Azure. You can avoid conflicts by setting the `repoN-path` variable in `spec.backups.pgbackrest.global`. The convention we recommend for setting this variable is `/pgbackrest/$NAMESPACE/$CLUSTER_NAME/repoN`. For example, if I have a cluster named `hippo` in the namespace `postgres-operator`, I would set the following: - -``` -spec: - backups: - pgbackrest: - global: - repo1-path: /pgbackrest/postgres-operator/hippo/repo1 -``` - -As mentioned earlier, you can store backups in up to four different repositories. You can also mix and match, e.g. you could store your backups in two different S3 repositories. Each storage type does have its own required attributes that you need to set. We will cover that later in this section. - -Now that we've covered the basics, let's learn how to set up our backup repositories! - -## Setting Up a Backup Repository - -As mentioned above, PGO, the Postgres Operator from Crunchy Data, supports multiple ways to store backups. Let's look into each method and see how you can ensure your backups and archives are being safely stored! - -## Using Kubernetes Volumes - -The simplest way to get started storing backups is to use a Kubernetes Volume. This was already configure as part of the [create a Postgres cluster]({{< relref "./create-cluster.md">}}) example. Let's take a closer look at some of that configuration: - -``` -- name: repo1 - volume: - volumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi -``` - -The one requirement of volume is that you need to fill out the `volumeClaimSpec` attribute. This attribute uses the same format as a [persistent volume claim](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) spec! In fact, we performed a similar set up when we [created a Postgres cluster]({{< relref "./create-cluster.md">}}). - -In the above example, we assume that the Kubernetes cluster is using a default storage class. If your cluster does not have a default storage class, or you wish to use a different storage class, you will have to set `spec.backups.pgbackrest.repos.volume.volumeClaimSpec.storageClassName`. - -## Using S3 - -Setting up backups in S3 requires a few additional modifications to your custom resource spec and the use of a Secret to protect your S3 credentials! - -There is an example for creating a Postgres cluster that uses S3 for backups in the `kustomize/s3` directory in the [Postgres Operator examples](https://github.com/CrunchyData/postgres-operator-examples/fork) repository. In this directory, there is a file called `s3.conf.example`. Copy this example file to `s3.conf`: - -``` -cp s3.conf.example s3.conf -``` - -Note that `s3.conf` is protected from commit by a `.gitignore`. - -Open up `s3.conf`, you will see something similar to: - -``` -[global] -repo1-s3-key= -repo1-s3-key-secret= -``` - -Replace the values with your AWS S3 credentials and save. - -Now, open up `kustomize/s3/postgres.yaml`. In the `s3` section, you will see something similar to: - -``` -s3: - bucket: "" - endpoint: "" - region: "" -``` - -Again, replace these values with the values that match your S3 configuration. - -When your configuration is saved, you can deploy your cluster: - -``` -kubectl apply -k kustomize/s3 -``` - -Watch your cluster: you will see that your backups and archives are now being stored in S3! - -## Using Google Cloud Storage (GCS) - -Similar to S3, setting up backups in Google Cloud Storage (GCS) requires a few additional modifications to your custom resource spec and the use of a Secret to protect your GCS credentials. - -There is an example for creating a Postgres cluster that uses GCS for backups in the `kustomize/gcs` directory in the [Postgres Operator examples](https://github.com/CrunchyData/postgres-operator-examples/fork) repository. In order to configure this example to use GCS for backups, you will need do two things. - -First, copy your GCS key secret (which is a JSON file) into `kustomize/gcs/gcs-key.json`. Note that a `.gitignore` directive prevents you from committing this file. - -Next, open the `postgres.yaml` file and edit `spec.backups.pgbackrest.repos.gcs.bucket` to the name of the GCS bucket that you want to back up to. - -Save this file, and then run: - -``` -kubectl apply -k kustomize/gcs -``` - -Watch your cluster: you will see that your backups and archives are now being stored in GCS! - -## Using Azure Blob Storage - -Similar to the above, setting up backups in Azure Blob Storage requires a few additional modifications to your custom resource spec and the use of a Secret to protect your GCS credentials. - -There is an example for creating a Postgres cluster that uses Azure for backups in the `kustomize/azure` directory in the [Postgres Operator examples](https://github.com/CrunchyData/postgres-operator-examples/fork) repository. In this directory, there is a file called `azure.conf.example`. Copy this example file to `azure.conf`: - -``` -cp azure.conf.example azure.conf -``` - -Note that `azure.conf` is protected from commit by a `.gitignore`. - -Open up `azure.conf`, you will see something similar to: - -``` -[global] -repo1-azure-account= -repo1-azure-key= -``` - -Replace the values with your AWS S3 credentials and save. - -Now, open up `kustomize/azure/postgres.yaml`. In the `azure` section, you will see something similar to: - -``` -azure: - container: "" -``` - -Again, replace these values with the values that match your Azure configuration. - -When your configuration is saved, you can deploy your cluster: - -``` -kubectl apply -k kustomize/azure -``` - -Watch your cluster: you will see that your backups and archives are now being stored in Azure! - -## Set Up Multiple Backup Repositories - -It is possible to store backups in multiple locations! For example, you may want to keep your backups both within your Kubernetes cluster and S3. There are many reasons for doing this: - -- It is typically faster to heal Postgres instances when your backups are closer -- You can set different backup retention policies based upon your available storage -- You want to ensure that your backups are distributed geographically - -and more. - -PGO lets you store your backups in up to four locations simultaneously. You can mix and match: for example, you can store backups both locally and in GCS, or store your backups in two different GCS repositories. It's up to you! - -There is an example in the [Postgres Operator examples](https://github.com/CrunchyData/postgres-operator-examples/fork) repository in the `kustomize/multi-backup-repo` folder that sets up backups in four different locations using each storage type. You can modify this example to match your desired backup topology. - -### Additional Notes - -While storing Postgres archives (write-ahead log [WAL] files) occurs in parallel when saving data to multiple pgBackRest repos, you cannot take parallel backups to different repos at the same time. PGO will ensure that all backups are taken serially. Future work in pgBackRest will address parallel backups to different repos. Please don't confuse this with parallel backup: pgBackRest does allow for backups to use parallel processes when storing them to a single repo! - -## Custom Backup Configuration - -Most of your backup configuration can be configured through the `spec.backups.pgbackrest.global` attribute, or through information that you supply in the ConfigMap or Secret that you refer to in `spec.backups.pgbackrest.configuration`. You can also provide additional Secret values if need be, e.g. `repo1-cipher-pass` for encrypting backups. - -The full list of [pgBackRest configuration options](https://pgbackrest.org/configuration.html) is available here: - -[https://pgbackrest.org/configuration.html](https://pgbackrest.org/configuration.html) - -## Next Steps - -We've now seen how to use PGO to get our backups and archives set up and safely stored. Now let's take a look at [backup management]({{< relref "./backup-management.md" >}}) and how we can do things such as set backup frequency, set retention policies, and even take one-off backups! diff --git a/docs/content/tutorial/connect-cluster.md b/docs/content/tutorial/connect-cluster.md deleted file mode 100644 index ca8b4a66b8..0000000000 --- a/docs/content/tutorial/connect-cluster.md +++ /dev/null @@ -1,136 +0,0 @@ ---- -title: "Connect to a Postgres Cluster" -date: -draft: false -weight: 30 ---- - -It's one thing to [create a Postgres cluster]({{< relref "./create-cluster.md" >}}); it's another thing to connect to it. Let's explore how PGO makes it possible to connect to a Postgres cluster! - -## Background: Services, Secrets, and TLS - -PGO creates a series of Kubernetes [Services](https://kubernetes.io/docs/concepts/services-networking/service/) to provide stable endpoints for connecting to your Postgres databases. These endpoints make it easy to provide a consistent way for your application to maintain connectivity to your data. To inspect what services are available, you can run the following command: - -``` -kubectl -n postgres-operator get svc --selector=postgres-operator.crunchydata.com/cluster=hippo -``` - -will yield something similar to: - -``` -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -hippo-ha ClusterIP 10.103.73.92 5432/TCP 3h14m -hippo-ha-config ClusterIP None 3h14m -hippo-pods ClusterIP None 3h14m -hippo-primary ClusterIP None 5432/TCP 3h14m -hippo-replicas ClusterIP 10.98.110.215 5432/TCP 3h14m -``` - -You do not need to worry about most of these Services, as they are used to help manage the overall health of your Postgres cluster. For the purposes of connecting to your database, the Service of interest is called `hippo-primary`. Thanks to PGO, you do not need to even worry about that, as that information is captured within a Secret! - -When your Postgres cluster is initialized, PGO will bootstrap a database and Postgres user that your application can access. This information is stored in a Secret named with the pattern `-pguser-`. For our `hippo` cluster, this Secret is called `hippo-pguser-hippo`. This Secret contains the information you need to connect your application to your Postgres database: - -- `user`: The name of the user account. -- `password`: The password for the user account. -- `dbname`: The name of the database that the user has access to by default. -- `host`: The name of the host of the database. This references the [Service](https://kubernetes.io/docs/concepts/services-networking/service/) of the primary Postgres instance. -- `port`: The port that the database is listening on. -- `uri`: A [PostgreSQL connection URI](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING) that provides all the information for logging into the Postgres database. - -All connections are over TLS. PGO provides its own certificate authority (CA) to allow you to securely connect your applications to your Postgres clusters. This allows you to use the [`verify-full` "SSL mode"](https://www.postgresql.org/docs/current/libpq-ssl.html#LIBPQ-SSL-SSLMODE-STATEMENTS) of Postgres, which provides eavesdropping protection and prevents MITM attacks. You can also choose to bring your own CA, which is described later in this tutorial in the [Customize Cluster]({{< relref "./customize-cluster.md" >}}) section. - -## Connect an Application - -For this tutorial, we are going to connect [Keycloak](https://www.keycloak.org/), an open source identity management application. Keycloak can be deployed on Kubernetes and is backed by a Postgres database. While we provide an [example of deploying Keycloak](https://github.com/CrunchyData/postgres-operator-examples/tree/main/kustomize/keycloak) in the [Postgres Operator examples](https://github.com/CrunchyData/postgres-operator-examples) repository, we will use the sample manifest below to deploy the application: - -``` -cat <> keycloak.yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: keycloak - namespace: postgres-operator - labels: - app.kubernetes.io/name: keycloak -spec: - selector: - matchLabels: - app: keycloak - template: - metadata: - labels: - app.kubernetes.io/name: keycloak - spec: - containers: - - image: quay.io/keycloak/keycloak:latest - name: keycloak - env: - - name: DB_ADDR - valueFrom: { secretKeyRef: { name: hippo-pguser-hippo, key: host } } - - name: DB_PORT - valueFrom: { secretKeyRef: { name: hippo-pguser-hippo, key: port } } - - name: DB_DATABASE - valueFrom: { secretKeyRef: { name: hippo-pguser-hippo, key: dbname } } - - name: DB_USER - valueFrom: { secretKeyRef: { name: hippo-pguser-hippo, key: user } } - - name: DB_PASSWORD - valueFrom: { secretKeyRef: { name: hippo-pguser-hippo, key: password } } - - name: KEYCLOAK_USER - value: "admin" - - name: KEYCLOAK_PASSWORD - value: "admin" - - name: PROXY_ADDRESS_FORWARDING - value: "true" - ports: - - name: http - containerPort: 8080 - - name: https - containerPort: 8443 - readinessProbe: - httpGet: - path: /auth/realms/master - port: 8080 - restartPolicy: Always - -EOF - -kubectl apply -f keycloak.yaml -``` - -Notice this part of the manifest: - -``` -- name: DB_ADDR - valueFrom: - secretKeyRef: - name: hippo-pguser-hippo - key: host -- name: DB_PORT - valueFrom: - secretKeyRef: - name: hippo-pguser-hippo - key: port -- name: DB_DATABASE - valueFrom: - secretKeyRef: - name: hippo-pguser-hippo - key: dbname -- name: DB_USER - valueFrom: - secretKeyRef: - name: hippo-pguser-hippo - key: user -- name: DB_PASSWORD - valueFrom: - secretKeyRef: - name: hippo-pguser-hippo - key: password -``` - -The above manifest shows how all of these values are derived from the `hippo-pguser-hippo` Secret. This means that we do not need to know any of the connection credentials or have to insecurely pass them around -- they are made directly available to the application! - -Using this method, you can tie application directly into your GitOps pipeline that connect to Postgres without any prior knowledge of how PGO will deploy Postgres: all of the information your application needs is propagated into the Secret! - -## Next Steps - -Now that we have seen how to connect an application to a cluster, let's learn how to create a [high availability Postgres]({{< relref "./high-availability.md" >}}) cluster! diff --git a/docs/content/tutorial/connection-pooling.md b/docs/content/tutorial/connection-pooling.md deleted file mode 100644 index 075221b70f..0000000000 --- a/docs/content/tutorial/connection-pooling.md +++ /dev/null @@ -1,212 +0,0 @@ ---- -title: "Connection Pooling" -date: -draft: false -weight: 100 ---- - -Connection pooling can be helpful for scaling and maintaining overall availability between your application and the database. PGO helps facilitate this by supporting the [PgBouncer](https://www.pgbouncer.org/) connection pooler and state manager. - -Let's look at how we can a connection pooler and connect it to our application! - -## Adding a Connection Pooler - -Let's look at how we can add a connection pooler using the `kustomize/keycloak` example in the [Postgres Operator examples](https://github.com/CrunchyData/postgres-operator-examples/fork) repository. - -Connection poolers are added using the `spec.proxy` section of the custom resource. Currently, the only connection pooler supported is [PgBouncer](https://www.pgbouncer.org/). - -The only required attribute for adding a PgBouncer connection pooler is to set the `spec.proxy.pgBouncer.image` attribute. In the `kustomize/keycloak/postgres.yaml` file, add the following YAML to the spec: - -``` -proxy: - pgBouncer: - image: registry.developers.crunchydata.com/crunchydata/crunchy-pgbouncer:{{< param centosBase >}}-1.15-2 -``` - -(You can also find an example of this in the `kustomize/examples/high-availability` example). - -Save your changes and run: - -``` -kubectl apply -k kustomize/keycloak -``` - -PGO will detect the change and create a new PgBouncer Deployment! - -That was fairly easy to set up, so now let's look at how we can connect our application to the connection pooler. - -## Connecting to a Connection Pooler - -When a connection pooler is deployed to the cluster, PGO adds additional information to the user Secrets to allow for applications to connect directly to the connection pooler. Recall that in this example, our user Secret is called `keycloakdb-pguser-keycloakdb`. Describe the user Secret: - -``` -kubectl -n postgres-operator describe secrets keycloakdb-pguser-keycloakdb -``` - -You should see that there are several new attributes included in this Secret that allow for you to connect to your Postgres instance via the connection pooler: - -- `pgbouncer-host`: The name of the host of the PgBouncer connection pooler. This references the [Service](https://kubernetes.io/docs/concepts/services-networking/service/) of the PgBouncer connection pooler. -- `pgbouncer-port`: The port that the PgBouncer connection pooler is listening on. -- `pgbouncer-uri`: A [PostgreSQL connection URI](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING) that provides all the information for logging into the Postgres database via the PgBouncer connection pooler. - -Open up the file in `kustomize/keycloak/keycloak.yaml`. Update the `DB_ADDR` and `DB_PORT` values to be the following: - -``` -- name: DB_ADDR - valueFrom: { secretKeyRef: { name: keycloakdb-pguser-keycloakdb, key: pgbouncer-host } } -- name: DB_PORT - valueFrom: { secretKeyRef: { name: keycloakdb-pguser-keycloakdb, key: pgbouncer-port } } -``` - -This changes Keycloak's configuration so that it will now connect through the connection pooler. - -Apply the changes: - -``` -kubectl apply -k kustomize/keycloak -``` - -Kubernetes will detect the changes and begin to deploy a new Keycloak Pod. When it is completed, Keycloak will now be connected to Postgres via the PgBouncer connection pooler! - -## TLS - -PGO deploys every cluster and component over TLS. This includes the PgBouncer connection pooler. If you are using your own [custom TLS setup]({{< relref "./customize-cluster.md" >}}#customize-tls), you will need to provide a Secret reference for a TLS key / certificate pair for PgBouncer in `spec.proxy.pgBouncer.customTLSSecret`. - -Your TLS certificate for PgBouncer should have a Common Name (CN) setting that matches the PgBouncer Service name. This is the name of the cluster suffixed with `-pgbouncer`. For example, for our `hippo` cluster this would be `hippo-pgbouncer`. For the `keycloakdb` example, it would be `keycloakdb-pgbouncer`. - -To customize the TLS for PgBouncer, you will need to create a Secret in the Namespace of your Postgres cluster that contains the TLS key (`tls.key`), TLS certificate (`tls.crt`) and the CA certificate (`ca.crt`) to use. The Secret should contain the following values: - -``` -data: - ca.crt: - tls.crt: - tls.key: -``` - -For example, if you have files named `ca.crt`, `keycloakdb-pgbouncer.key`, and `keycloakdb-pgbouncer.crt` stored on your local machine, you could run the following command: - -``` -kubectl create secret generic -n postgres-operator keycloakdb-pgbouncer.tls \ - --from-file=ca.crt=ca.crt \ - --from-file=tls.key=keycloakdb-pgbouncer.key \ - --from-file=tls.crt=keycloakdb-pgbouncer.crt -``` - -You can specify the custom TLS Secret in the `spec.proxy.pgBouncer.customTLSSecret.name` field in your `postgrescluster.postgres-operator.crunchydata.com` custom resource, e.g.: - -``` -spec: - proxy: - pgBouncer: - customTLSSecret: - name: keycloakdb-pgbouncer.tls -``` - -## Customizing - -The PgBouncer connection pooler is highly customizable, both from a configuration and Kubernetes deployment standpoint. Let's explore some of the customizations that you can do! - -### Configuration - -[PgBouncer configuration](https://www.pgbouncer.org/config.html) can be customized through `spec.proxy.pgBouncer.config`. After making configuration changes, PGO will roll them out to any PgBouncer instance and automatically issue a "reload". - -There are several ways you can customize the configuration: - -- `spec.proxy.pgBouncer.config.global`: Accepts key-value pairs that apply changes globally to PgBouncer. -- `spec.proxy.pgBouncer.config.databases`: Accepts key-value pairs that represent PgBouncer [database definitions](https://www.pgbouncer.org/config.html#section-databases). -- `spec.proxy.pgBouncer.config.users`: Accepts key-value pairs that represent [connection settings applied to specific users](https://www.pgbouncer.org/config.html#section-users). -- `spec.proxy.pgBouncer.config.files`: Accepts a list of files that are mounted in the `/etc/pgbouncer` directory and loaded before any other options are considered using PgBouncer's [include directive](https://www.pgbouncer.org/config.html#include-directive). - -For example, to set the connection pool mode to `transaction`, you would set the following configuration: - -``` -spec: - proxy: - pgBouncer: - config: - global: - pool_mode: transaction -``` - -For a reference on [PgBouncer configuration](https://www.pgbouncer.org/config.html) please see: - -[https://www.pgbouncer.org/config.html](https://www.pgbouncer.org/config.html) - -### Replicas - -PGO deploys one PgBouncer instance by default. You may want to run multiple PgBouncer instances to have some level of redundancy, though you still want to be mindful of how many connections are going to your Postgres database! - -You can manage the number of PgBouncer instances that are deployed through the `spec.proxy.pgBouncer.replicas` attribute. - -### Resources - -You can manage the CPU and memory resources given to a PgBouncer instance through the `spec.proxy.pgBouncer.resources` attribute. The layout of `spec.proxy.pgBouncer.resources` should be familiar: it follows the same pattern as the standard Kubernetes structure for setting [container resources](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/). - -For example, let's say we want to set some CPU and memory limits on our PgBouncer instances. We could add the following configuration: - -``` -spec: - proxy: - pgBouncer: - resources: - limits: - cpu: 200m - memory: 128Mi -``` - -As PGO deploys the PgBouncer instances using a [Deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) these changes are rolled out using a rolling update to minimize disruption between your application and Postgres instances! - -### Annotations / Labels - -You can apply custom annotations and labels to your PgBouncer instances through the `spec.proxy.pgBouncer.metadata.annotations` and `spec.proxy.pgBouncer.metadata.labels` attributes respectively. Note that any changes to either of these two attributes take precedence over any other custom labels you have added. - -### Pod Anti-Affinity / Pod Affinity / Node Affinity - -You can control the [pod anti-affinity, pod affinity, and node affinity](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) through the `spec.proxy.pgBouncer.affinity` attribute, specifically: - -- `spec.proxy.pgBouncer.affinity.nodeAffinity`: controls node affinity for the PgBouncer instances. -- `spec.proxy.pgBouncer.affinity.podAffinity`: controls Pod affinity for the PgBouncer instances. -- `spec.proxy.pgBouncer.affinity.podAntiAffinity`: controls Pod anti-affinity for the PgBouncer instances. - -Each of the above follows the [standard Kubernetes specification for setting affinity](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity). - -For example, to set a preferred Pod anti-affinity rule for the `kustomize/keycloak` example, you would want to add the following to your configuration: - -``` -spec: - proxy: - pgBouncer: - affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 1 - podAffinityTerm: - labelSelector: - matchLabels: - postgres-operator.crunchydata.com/cluster: keycloakdb - postgres-operator.crunchydata.com/role: pgbouncer - topologyKey: kubernetes.io/hostname -``` - -### Tolerations - -You can deploy PgBouncer instances to [Nodes with Taints](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) by setting [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) through `spec.proxy.pgBouncer.tolerations`. This attribute follows the Kubernetes standard tolerations layout. - -For example, if there were a set of Nodes with a Taint of `role=connection-poolers:NoSchedule` that you want to schedule your PgBouncer instances to, you could apply the following configuration: - -``` -spec: - proxy: - pgBouncer: - tolerations: - - effect: NoSchedule - key: role - operator: Equal - value: connection-poolers -``` - -Note that setting a toleration does not necessarily mean that the PgBouncer instances will be assigned to Nodes with those taints. [Tolerations act as a **key**: they allow for you to access Nodes](https://blog.crunchydata.com/blog/kubernetes-pod-tolerations-and-postgresql-deployment-strategies). If you want to ensure that your PgBouncer instances are deployed to specific nodes, you need to combine setting tolerations with node affinity. - -## Next Steps - -Now that we can enable connection pooling in a cluster, let’s explore some [administrative tasks]({{< relref "administrative-tasks.md" >}}) such as manually restarting PostgreSQL using PGO. How do we do that? diff --git a/docs/content/tutorial/create-cluster.md b/docs/content/tutorial/create-cluster.md deleted file mode 100644 index 53ad975d6a..0000000000 --- a/docs/content/tutorial/create-cluster.md +++ /dev/null @@ -1,91 +0,0 @@ ---- -title: "Create a Postgres Cluster" -date: -draft: false -weight: 20 ---- - -If you came here through the [quickstart]({{< relref "quickstart/_index.md" >}}), you may have already created a cluster. If you created a cluster by using the example in the `kustomize/postgres` directory, feel free to skip to connecting to a cluster, or read onward for a more in depth look into cluster creation! - -## Create a Postgres Cluster - -Creating a Postgres cluster is pretty simple. Using the example in the `kustomize/postgres` directory, all we have to do is run: - -``` -kubectl apply -k kustomize/postgres -``` - -and PGO will create a simple Postgres cluster named `hippo` in the `postgres-operator` namespace. You can track the status of your Postgres cluster using `kubectl describe` on the `postgresclusters.postgres-operator.crunchydata.com` custom resource: - -``` -kubectl -n postgres-operator describe postgresclusters.postgres-operator.crunchydata.com hippo -``` - -and you can track the state of the Postgres Pod using the following command: - -``` -kubectl -n postgres-operator get pods \ - --selector=postgres-operator.crunchydata.com/cluster=hippo,postgres-operator.crunchydata.com/instance -``` - -### What Just Happened? - -PGO created a Postgres cluster based on the information provided to it in the Kustomize manifests located in the `kustomize/postgres` directory. Let's better understand what happened by inspecting the `kustomize/postgres/postgres.yaml` file: - -``` -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: hippo -spec: - image: registry.developers.crunchydata.com/crunchydata/crunchy-postgres-ha:centos8-13.4-0 - postgresVersion: 13 - instances: - - name: instance1 - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - backups: - pgbackrest: - image: registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:centos8-2.33-2 - repos: - - name: repo1 - volume: - volumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi -``` - -When we ran the `kubectl apply` command earlier, what we did was create a `PostgresCluster` custom resource in Kubernetes. PGO detected that we added a new `PostgresCluster` resource and started to create all the objects needed to run Postgres in Kubernetes! - -What else happened? PGO read the value from `metadata.name` to provide the Postgres cluster with the name `hippo`. Additionally, PGO knew which containers to use for Postgres and pgBackRest by looking at the values in `spec.image` and `spec.backups.pgbackrest.image` respectively. The value in `spec.postgresVersion` is important as it will help PGO track which major version of Postgres you are using. - -PGO knows how many Postgres instances to create through the `spec.instances` section of the manifest. While `name` is optional, we opted to give it the name `instance1`. We could have also created multiple replicas and instances during cluster initialization, but we will cover that more when we discuss how to [scale and create a HA Postgres cluster]({{< relref "./high-availability.md" >}}). - -A very important piece of your `PostgresCluster` custom resource is the `dataVolumeClaimSpec` section. This describes the storage that your Postgres instance will use. It is modeled after the [Persistent Volume Claim](https://kubernetes.io/docs/concepts/storage/persistent-volumes/). If you do not provide a `spec.instances.dataVolumeClaimSpec.storageClassName`, then the default storage class in your Kubernetes environment is used. - -As part of creating a Postgres cluster, we also specify information about our backup archive. PGO uses [pgBackRest](https://pgbackrest.org/), an open source backup and restore tool designed to handle terabyte-scale backups. As part of initializing our cluster, we can specify where we want our backups and archives ([write-ahead logs or WAL](https://www.postgresql.org/docs/current/wal-intro.html)) stored. We will talk about this portion of the `PostgresCluster` spec in greater depth in the [disaster recovery]({{< relref "./backups.md" >}}) section of this tutorial, and also see how we can store backups in Amazon S3, Google GCS, and Azure Blob Storage. - -## Troubleshooting - -### PostgreSQL / pgBackRest Pods Stuck in `Pending` Phase - -The most common occurrence of this is due to PVCs not being bound. Ensure that you have set up your storage options correctly in any `volumeClaimSpec`. You can always update your settings and reapply your changes with `kubectl apply`. - -Also ensure that you have enough persistent volumes available: your Kubernetes administrator may need to provision more. - -If you are on OpenShift, you may need to set `spec.openshift` to `true`. - -### Backups Never Complete - -The most common occurrence of this is due to the Kubernetes network blocking SSH connections between Pods. Ensure that your Kubernetes networking layer allows for SSH connections over port 2022 in the Namespace that you are deploying your PostgreSQL clusters into. - -## Next Steps - -We're up and running -- now let's [connect to our Postgres cluster]({{< relref "./connect-cluster.md" >}})! diff --git a/docs/content/tutorial/customize-cluster.md b/docs/content/tutorial/customize-cluster.md deleted file mode 100644 index 60870d23a7..0000000000 --- a/docs/content/tutorial/customize-cluster.md +++ /dev/null @@ -1,198 +0,0 @@ ---- -title: "Customize a Postgres Cluster" -date: -draft: false -weight: 60 ---- - -Postgres is known for its ease of customization; PGO helps you to roll out changes efficiently and without disruption. After [resizing the resources]({{< relref "./resize-cluster.md" >}}) for our Postgres cluster in the previous step of this tutorial, lets see how we can tweak our Postgres configuration to optimize its usage of them. - -## Custom Postgres Configuration - -Part of the trick of managing multiple instances in a Postgres cluster is ensuring all of the configuration changes are propagated to each of them. This is where PGO helps: when you make a Postgres configuration change for a cluster, PGO will apply the changes to all of the managed instances. - -For example, in our previous step we added CPU and memory limits of `2.0` and `4Gi` respectively. Let's tweak some of the Postgres settings to better use our new resources. We can do this in the `spec.patroni.dynamicConfiguration` section. Here is an example updated manifest that tweaks several settings: - -``` -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: hippo -spec: - image: registry.developers.crunchydata.com/crunchydata/crunchy-postgres-ha:centos8-13.4-0 - postgresVersion: 13 - instances: - - name: instance1 - replicas: 2 - resources: - limits: - cpu: 2.0 - memory: 4Gi - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - backups: - pgbackrest: - image: registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:centos8-2.33-2 - repos: - - name: repo1 - volume: - volumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - patroni: - dynamicConfiguration: - postgresql: - parameters: - max_parallel_workers: 2 - max_worker_processes: 2 - shared_buffers: 1GB - work_mem: 2MB -``` - -In particular, we added the following to `spec`: - -``` -patroni: - dynamicConfiguration: - postgresql: - parameters: - max_parallel_workers: 2 - max_worker_processes: 2 - shared_buffers: 1GB - work_mem: 2MB -``` - -Apply these updates to your Kubernetes cluster with the following command: - -``` -kubectl apply -k kustomize/postgres -``` - -PGO will go and apply these settings to all of the Postgres clusters. You can verify that the changes are present using the Postgres `SHOW` command, e.g. - -``` -SHOW work_mem; -``` - -should yield something similar to: - -``` - work_mem ----------- - 2MB -``` - -## Customize TLS - -All connections in PGO use TLS to encrypt communication between components. PGO sets up a PKI and certificate authority (CA) that allow you create verifiable endpoints. However, you may want to bring a different TLS infrastructure based upon your organizational requirements. The good news: PGO lets you do this! - -If you want to use the TLS infrastructure that PGO provides, you can skip the rest of this section and move on to learning how to [apply software updates]({{< relref "./update-cluster.md" >}}). - -### How to Customize TLS - -There are a few different TLS endpoints that can be customized for PGO, including those of the Postgres cluster and controlling how Postgres instances authenticate with each other. Let's look at how we can customize TLS. - -Your TLS certificate should have a Common Name (CN) setting that matches the primary Service name. This is the name of the cluster suffixed with `-primary`. For example, for our `hippo` cluster this would be `hippo-primary`. - -To customize the TLS for a Postgres cluster, you will need to create a Secret in the Namespace of your Postgres cluster that contains the TLS key (`tls.key`), TLS certificate (`tls.crt`) and the CA certificate (`ca.crt`) to use. The Secret should contain the following values: - -``` -data: - ca.crt: - tls.crt: - tls.key: -``` - -For example, if you have files named `ca.crt`, `hippo.key`, and `hippo.crt` stored on your local machine, you could run the following command: - -``` -kubectl create secret generic -n postgres-operator hippo.tls \ - --from-file=ca.crt=ca.crt \ - --from-file=tls.key=hippo.key \ - --from-file=tls.crt=hippo.crt -``` - -You can specify the custom TLS Secret in the `spec.customTLSSecret.name` field in your `postgrescluster.postgres-operator.crunchydata.com` custom resource, e.g: - -``` -spec: - customTLSSecret: - name: hippo.tls -``` - -If you're unable to control the key-value pairs in the Secret, you can create a mapping that looks similar to this: - -``` -spec: - customTLSSecret: - name: hippo.tls - items: - - key: - path: tls.crt - - key: - path: tls.key - - key: - path: ca.crt -``` - -If `spec.customTLSSecret` is provided you **must** also provide `spec.customReplicationTLSSecret` and both must contain the same `ca.crt`. - -As with the other changes, you can roll out the TLS customizations with `kubectl apply`. - -## Labels - -There are several ways to add your own custom Kubernetes [Labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) to your Postgres cluster. - -- Cluster: You can apply labels to any PGO managed object in a cluster by editing the `spec.metadata.labels` section of the custom resource. -- Postgres: You can apply labels to a Postgres instance set and its objects by editing `spec.instances.metadata.labels`. -- pgBackRest: You can apply labels to pgBackRest and its objects by editing `postgresclusters.spec.backups.pgbackrest.metadata.labels`. -- PgBouncer: You can apply labels to PgBouncer connection pooling instances by editing `spec.proxy.pgBouncer.metadata.labels`. - -## Annotations - -There are several ways to add your own custom Kubernetes [Annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) to your Postgres cluster. - -- Cluster: You can apply annotations to any PGO managed object in a cluster by editing the `spec.metadata.annotations` section of the custom resource. -- Postgres: You can apply annotations to a Postgres instance set and its objects by editing `spec.instances.metadata.annotations`. -- pgBackRest: You can apply annotations to pgBackRest and its objects by editing `spec.backups.pgbackrest.metadata.annotations`. -- PgBouncer: You can apply annotations to PgBouncer connection pooling instances by editing `spec.proxy.pgBouncer.metadata.annotations`. - -## Separate WAL PVCs - -PostgreSQL commits transactions by storing changes in its [Write-Ahead Log (WAL)](https://www.postgresql.org/docs/current/wal-intro.html). Because the way WAL files are accessed and -utilized often differs from that of data files, and in high-performance situations, it can desirable to put WAL files on separate storage volume. With PGO, this can be done by adding -the `walVolumeClaimSpec` block to your desired instance in your PostgresCluster spec, either when your cluster is created or anytime thereafter: - -``` -spec: - instances: - - name: instance - walVolumeClaimSpec: - accessModes: - - "ReadWriteMany" - resources: - requests: - storage: 1Gi -``` - -This volume can be removed later by removing the `walVolumeClaimSpec` section from the instance. Note that when changing the WAL directory, care is taken so as not to lose any WAL files. PGO only -deletes the PVC once there are no longer any WAL files on the previously configured volume. - -## Troubleshooting - -### Changes Not Applied - -If your Postgres configuration settings are not present, you may need to check a few things. First, ensure that you are using the syntax that Postgres expects. You can see this in the [Postgres configuration documentation](https://www.postgresql.org/docs/current/runtime-config.html). - -Some settings, such as `shared_buffers`, require for Postgres to restart. Patroni only performs a reload when parameter changes are identified. Therefore, for parameters that require a restart, the restart can be performed manually by executing into a Postgres instance and running `patronictl restart --force -ha`. - -## Next Steps - -You've now seen how you can further customize your Postgres cluster, but what about [managing users and atabases]({{< relref "./user-management.md" >}})? That's a great question that is answered in the [next section]({{< relref "./user-management.md" >}}). diff --git a/docs/content/tutorial/delete-cluster.md b/docs/content/tutorial/delete-cluster.md deleted file mode 100644 index e83fd65a95..0000000000 --- a/docs/content/tutorial/delete-cluster.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: "Delete a Postgres Cluster" -date: -draft: false -weight: 110 ---- - -There comes a time when it is necessary to delete your cluster. If you have been [following along with the example](https://github.com/CrunchyData/postgres-operator-examples), you can delete your Postgres cluster by simply running: - -``` -kubectl delete -k kustomize/postgres -``` - -PGO will remove all of the objects associated with your cluster. - -With data retention, this is subject to the [retention policy of your PVC](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#reclaiming). For more information on how Kubernetes manages data retention, please refer to the [Kubernetes docs on volume reclaiming](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#reclaiming). diff --git a/docs/content/tutorial/disaster-recovery.md b/docs/content/tutorial/disaster-recovery.md deleted file mode 100644 index 707a7a5998..0000000000 --- a/docs/content/tutorial/disaster-recovery.md +++ /dev/null @@ -1,306 +0,0 @@ ---- -title: "Disaster Recovery and Cloning" -date: -draft: false -weight: 85 ---- - -Perhaps someone accidentally dropped the `users` table. Perhaps you want to clone your production database to a step-down environment. Perhaps you want to exercise your disaster recovery system (and it is important that you do!). - -Regardless of scenario, it's important to know how you can perform a "restore" operation with PGO to be able to recovery your data from a particular point in time, or clone a database for other purposes. - -Let's look at how we can perform different types of restore operations. First, let's understand the core restore properties on the custom resource. - -## Restore Properties - -There are several attributes on the custom resource that are important to understand as part of the restore process. All of these attributes are grouped together in the `spec.dataSource.postgresCluster` section of the custom resource. - -Please review the table below to understand how each of these attributes work in the context of setting up a restore operation. - -- `spec.dataSource.postgresCluster.clusterName`: The name of the cluster that you are restoring from. This corresponds to the `metadata.name` attribute on a different `postgrescluster` custom resource. -- `spec.dataSource.postgresCluster.clusterNamespace`: The namespace of the cluster that you are restoring from. Used when the cluster exists in a different namespace. -- `spec.dataSource.postgresCluster.repoName`: The name of the pgBackRest repository from the `spec.dataSource.postgresCluster.clusterName` to use for the restore. Can be one of `repo1`, `repo2`, `repo3`, or `repo4`. The repository must exist in the other cluster. -- `spec.dataSource.postgresCluster.options`: Any additional [pgBackRest restore options](https://pgbackrest.org/command.html#command-restore) or general options you would like to pass in. For example, you may want to set `--process-max` to help improve performance on larger databases. -- `spec.dataSource.postgresCluster.resources`: Setting [resource limits and requests](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits) of the restore job can ensure that it runs efficiently. -- `spec.dataSource.postgresCluster.affinity`: Custom [Kubernetes affinity](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/) rules constrain the restore job so that it only runs on certain nodes. -- `spec.dataSource.postgresCluster.tolerations`: Custom [Kubernetes tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) allow the restore job to run on [tainted](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) nodes. - -Let's walk through some examples for how we can clone and restore our databases. - -## Clone a Postgres Cluster - -Let's create a clone of our [`hippo`]({{< relref "./create-cluster.md" >}}) cluster that we created previously. We know that our cluster is named `hippo` (baesd on its `metadata.name`) and that we only have a single backup repository called `repo1`. - -Let's call our new cluster `elephant`. We can create a clone of the `hippo` cluster using a manifest like this: - -``` -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: elephant -spec: - dataSource: - postgresCluster: - clusterName: hippo - repoName: repo1 - image: registry.developers.crunchydata.com/crunchydata/crunchy-postgres-ha:centos8-13.4-0 - postgresVersion: 13 - instances: - - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - backups: - pgbackrest: - image: registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:centos8-2.33-2 - repos: - - name: repo1 - volume: - volumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi -``` - -Note this section of the spec: - -``` -spec: - dataSource: - postgresCluster: - clusterName: hippo - repoName: repo1 -``` - -This is the part that tells PGO to create the `elephant` cluster as an independent copy of the `hippo` cluster. - -The above is all you need to do to clone a Postgres cluster! PGO will work on creating a copy of your data on a new persistent volume claim (PVC) and work on initializing your cluster to spec. Easy! - -## Perform a Point-in-time-Recovery (PITR) - -Did someone drop the user table? You may want to perform a point-in-time-recovery (PITR) to revert your database back to a state before a change occurred. Fortunately, PGO can help you do that. - -You can set up a PITR using the [restore](https://pgbackrest.org/command.html#command-restore) command of [pgBackRest](https://www.pgbackrest.org), the backup management tool that powers the disaster recovery capabilities of PGO. You will need to set a few options on `spec.dataSource.postgresCluster.options` to perform a PITR. These options include: - -- `--type=time`: This tells pgBackRest to perform a PITR. -- `--target`: Where to perform the PITR to. Any example recovery target is `2021-06-09 14:15:11 EDT`. -- `--set` (optional): Choose which backup to start the PITR from. - -A few quick notes before we begin: - -- To perform a PITR, you must have a backup that is older than your PITR time. In other words, you can't perform a PITR back to a time where you do not have a backup! -- All relevant WAL files must be successfully pushed for the restore to complete correctly. -- Be sure to select the correct repository name containing the desired backup! - - -With that in mind, let's use the `elephant` example above. Let's say we want to perform a point-in-time-recovery (PITR) to `2021-06-09 14:15:11 EDT`, we can use the following manifest: - -``` -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: elephant -spec: - dataSource: - postgresCluster: - clusterName: hippo - repoName: repo1 - options: - - --type=time - - --target="2021-06-09 14:15:11 EDT" - image: registry.developers.crunchydata.com/crunchydata/crunchy-postgres-ha:centos8-13.4-0 - postgresVersion: 13 - instances: - - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - backups: - pgbackrest: - image: registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:centos8-2.33-2 - repos: - - name: repo1 - volume: - volumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi -``` - -The section to pay attention to is this: - -``` -spec: - dataSource: - postgresCluster: - clusterName: hippo - repoName: repo1 - options: --type=time --target="2021-06-09 14:15:11 EDT" -``` - -Notice how we put in the options to specify where to make the PITR. - -Using the above manifest, PGO will go ahead and create a new Postgres cluster that recovers its data up until `2021-06-09 14:15:11 EDT`. At that point, the cluster is promoted and you can start accessing your database from that specific point in time! - -## Perform an In-Place Point-in-time-Recovery (PITR) - -Similar to the PITR restore described above, you may want to perform a similar reversion back to a state before a change occurred, but without creating another PostgreSQL cluster. Fortunately, PGO can help you do this as well. - -You can set up a PITR using the [restore](https://pgbackrest.org/command.html#command-restore) command of [pgBackRest](https://www.pgbackrest.org), the backup management tool that powers the disaster recovery capabilities of PGO. You will need to set a few options on `spec.dataSource.postgresCluster.options` to perform a PITR. These options include: - -- `--type=time`: This tells pgBackRest to perform a PITR. -- `--target`: Where to perform the PITR to. Any example recovery target is `2021-06-09 14:15:11 EDT`. -- `--set` (optional): Choose which backup to start the PITR from. - -A few quick notes before we begin: - -- To perform a PITR, you must have a backup that is older than your PITR time. In other words, you can't perform a PITR back to a time where you do not have a backup! -- All relevant WAL files must be successfully pushed for the restore to complete correctly. -- Be sure to select the correct repository name containing the desired backup! - -To perform an in-place restore, users will first fill out the restore section of the spec as follows: - -``` -spec: - backups: - pgbackrest: - restore: - enabled: true - repoName: repo1 - options: - - --type=time - - --target="2021-06-09 14:15:11 EDT" -``` - -And to trigger the restore, you will then annotate the PostgresCluster as follows: - -``` -kubectl annotate postgrescluster hippo --overwrite \ - postgres-operator.crunchydata.com/pgbackrest-restore=id1 -``` - -And once the restore is complete, in-place restores can be disabled: - -``` -spec: - backups: - pgbackrest: - restore: - enabled: false -``` - -Notice how we put in the options to specify where to make the PITR. - -Using the above manifest, PGO will go ahead and re-create your Postgres cluster that will recover its data up until `2021-06-09 14:15:11 EDT`. At that point, the cluster is promoted and you can start accessing your database from that specific point in time! - - -## Standby Cluster - -Advanced high-availability and disaster recovery strategies involve spreading -your database clusters across multiple data centers to help maximize uptime. -In Kubernetes, this technique is known as "[federation](https://en.wikipedia.org/wiki/Federation_(information_technology))". -Federated Kubernetes clusters are able to communicate with each other, -coordinate changes, and provide resiliency for applications that have high -uptime requirements. - -As of this writing, federation in Kubernetes is still in ongoing development. -In the meantime, PGO provides a way to deploy Postgres clusters that can span -multiple Kubernetes clusters using an external storage system: - -- Amazon S3, or a system that uses the S3 protocol, -- Azure Blob Storage, or -- Google Cloud Storage - -Standby Postgres clusters are managed just like any other Postgres cluster in PGO. -For example, adding replicas to a standby cluster is matter of increasing the -`spec.instances.replicas` value. The main difference is that PostgreSQL data in -the cluster is read-only: one PostgreSQL instance is reading in the database -changes from an external repository while the other instances are replicas of it. -This is known as [cascading replication](https://www.postgresql.org/docs/current/warm-standby.html#CASCADING-REPLICATION). - -The following manifest defines a Postgres cluster that recovers from WAL files -stored in an S3 bucket: - -``` -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: hippo-standby -spec: - image: registry.developers.crunchydata.com/crunchydata/crunchy-postgres-ha:centos8-13.4-0 - postgresVersion: 13 - instances: - - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - backups: - pgbackrest: - image: registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:centos8-2.33-2 - repos: - - name: repo1 - s3: - bucket: "my-bucket" - endpoint: "s3.ca-central-1.amazonaws.com" - region: "ca-central-1" - standby: - enabled: true - repoName: repo1 -``` - -There comes a time where a standby cluster needs to be promoted to an active -cluster. Promoting a standby cluster means that a PostgreSQL instance within -it will start accepting both reads and writes. This has the net effect of -pushing WAL (transaction archives) to the pgBackRest repository, so we need to -take a few steps first to ensure we don't accidentally create a split-brain scenario. - -First, if this is not a disaster scenario, you will want to "shutdown" the -active PostgreSQL cluster. This can be done with the `spec.shutdown` attribute: - -``` -spec: - shutdown: true -``` - -The effect of this is that all the Kubernetes workloads for this cluster are -scaled to 0. You can verify this with the following command: - -``` -kubectl get deploy,sts,cronjob --selector=postgres-operator.crunchydata.com/cluster=hippo - -NAME READY UP-TO-DATE AVAILABLE AGE -deployment.apps/hippo-pgbouncer 0/0 0 0 1h - -NAME READY AGE -statefulset.apps/hippo-00-lwgx 0/0 1h - -NAME SCHEDULE SUSPEND ACTIVE -cronjob.batch/hippo-pgbackrest-repo1-full @daily True 0 -``` - -We can then promote the standby cluster by removing or disabling its -`spec.standby` section: - -``` -spec: - standby: - enabled: false -``` - -This change triggers the promotion of the standby leader to a primary PostgreSQL -instance, and the cluster begins accepting writes. - - -## Next Steps - -Now we've seen how to clone a cluster and perform a point-in-time-recovery, let's see how we can [monitor]({{< relref "./monitoring.md" >}}) our Postgres cluster to detect and prevent issues from occurring. diff --git a/docs/content/tutorial/getting-started.md b/docs/content/tutorial/getting-started.md deleted file mode 100644 index d34deb7701..0000000000 --- a/docs/content/tutorial/getting-started.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -title: "Getting Started" -date: -draft: false -weight: 10 ---- - -If you have not done so, please install PGO by following the [quickstart]({{< relref "quickstart/_index.md" >}}#installation). - -As part of the installation, please be sure that you have done the following: - -1. [Forked the Postgres Operator examples repository](https://github.com/CrunchyData/postgres-operator-examples/fork) and cloned it to your host machine. -1. Installed PGO to the `postgres-operator` namespace. If you are inside your `postgres-operator-examples` directory, you can run the `kubectl apply -k kustomize/install` command. - -Throughout this tutorial, we will be building on the example provided in the `kustomize/postgres`. - -When referring to a nested object within a YAML manifest, we will be using the `.` format similar to `kubectl explain`. For example, if we want to refer to the deepest element in this yaml file: - -``` -spec: - hippos: - appetite: huge -``` - -we would say `spec.hippos.appetite`. - -`kubectl explain` is your friend. You can use `kubectl explain postgrescluster` to introspect the `postgrescluster.postgres-operator.crunchydata.com` custom resource definition. You can also review the [CRD reference]({{< relref "references/crd.md" >}}). - -With PGO, the Postgres Operator installed, let's go and [create a Postgres cluster]({{< relref "./create-cluster.md" >}})! diff --git a/docs/content/tutorial/high-availability.md b/docs/content/tutorial/high-availability.md deleted file mode 100644 index 7c6c211e25..0000000000 --- a/docs/content/tutorial/high-availability.md +++ /dev/null @@ -1,396 +0,0 @@ ---- -title: "High Availability" -date: -draft: false -weight: 40 ---- - -Postgres is known for its reliability: it is very stable and typically "just works." However, there are many things that can happen in a distributed environment like Kubernetes that can affect Postgres uptime, including: - -- The database storage disk fails or some other hardware failure occurs -- The network on which the database resides becomes unreachable -- The host operating system becomes unstable and crashes -- A key database file becomes corrupted -- A data center is lost -- A Kubernetes component (e.g. a Service) is accidentally deleted - -There may also be downtime events that are due to the normal case of operations, such as performing a minor upgrade, security patching of operating system, hardware upgrade, or other maintenance. - -The good news: PGO is prepared for this, and your Postgres cluster is protected from many of these scenarios. However, to maximize your high availability (HA), let's first scale up your Postgres cluster. - -## HA Postgres: Adding Replicas to your Postgres Cluster - -PGO provides several ways to add replicas to make a HA cluster: - -- Increase the `spec.instances.replicas` value -- Add an additional entry in `spec.instances` - -For the purposes of this tutorial, we will go with the first method and set `spec.instances.replicas` to `2`. Your manifest should look similar to: - -``` -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: hippo -spec: - image: registry.developers.crunchydata.com/crunchydata/crunchy-postgres-ha:centos8-13.4-0 - postgresVersion: 13 - instances: - - name: instance1 - replicas: 2 - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - backups: - pgbackrest: - image: registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:centos8-2.33-2 - repos: - - name: repo1 - volume: - volumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi -``` - -Apply these updates to your Kubernetes cluster with the following command: - -``` -kubectl apply -k kustomize/postgres -``` - -Within moment, you should see a new Postgres instance initializing! You can see all of your Postgres Pods for the `hippo` cluster by running the following command: - -``` -kubectl -n postgres-operator get pods \ - --selector=postgres-operator.crunchydata.com/cluster=hippo,postgres-operator.crunchydata.com/instance-set -``` - -Let's test our high availability set up. - -## Testing Your HA Cluster - -An important part of building a resilient Postgres environment is testing its resiliency, so let's run a few tests to see how PGO performs under pressure! - -### Test #1: Remove a Service - -Let's try removing the primary Service that our application is connected to. This test does not actually require a HA Postgres cluster, but it will demonstrate PGO's ability to react to environmental changes and heal things to ensure your applications can stay up. - -Recall in the [connecting a Postgres cluster]({{< relref "./connect-cluster.md" >}}) that we observed the Services that PGO creates, e.g: - -``` -kubectl -n postgres-operator get svc \ - --selector=postgres-operator.crunchydata.com/cluster=hippo -``` - -yields something similar to: - -``` -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -hippo-ha ClusterIP 10.103.73.92 5432/TCP 4h8m -hippo-ha-config ClusterIP None 4h8m -hippo-pods ClusterIP None 4h8m -hippo-primary ClusterIP None 5432/TCP 4h8m -hippo-replicas ClusterIP 10.98.110.215 5432/TCP 4h8m -``` - -We also mentioned that the application is connected to the `hippo-primary` Service. What happens if we were to delete this Service? - -``` -kubectl -n postgres-operator delete svc hippo-primary -``` - -This would seem like it could create a downtime scenario, but run the above selector again: - -``` -kubectl -n postgres-operator get svc \ - --selector=postgres-operator.crunchydata.com/cluster=hippo -``` - -You should see something similar to: - -``` -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -hippo-ha ClusterIP 10.103.73.92 5432/TCP 4h8m -hippo-ha-config ClusterIP None 4h8m -hippo-pods ClusterIP None 4h8m -hippo-primary ClusterIP None 5432/TCP 3s -hippo-replicas ClusterIP 10.98.110.215 5432/TCP 4h8m -``` - -Wow -- PGO detected that the primary Service was deleted and it recreated it! Based on how your application connects to Postgres, it may not have even noticed that this event took place! - -Now let's try a more extreme downtime event. - -### Test #2: Remove the Primary StatefulSet - -[StatefulSets](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) are a Kubernetes object that provide helpful mechanisms for managing Pods that interface with stateful applications, such as databases. They provide a stable mechanism for managing Pods to help ensure data is retrievable in a predictable way. - -What happens if we remove the StatefulSet that is pointed to the Pod that represents the Postgres primary? First, let's determine which Pod is the primary. We'll store it in an environmental variable for convenience. - -``` -PRIMARY_POD=$(kubectl -n postgres-operator get pods \ - --selector=postgres-operator.crunchydata.com/role=master \ - -o jsonpath='{.items[*].metadata.labels.postgres-operator\.crunchydata\.com/instance}') -``` - -Inspect the environmental variable to see which Pod is the current primary: - -``` -echo $PRMIARY_POD -``` - -should yield something similar to: - -``` -hippo-instance1-zj5s -``` - -We can use the value above to delete the StatefulSet associated with the current Postgres primary instance: - -``` -kubectl delete sts -n postgres-operator "${PRIMARY_POD}" -``` - -Let's see what happens. Try getting all of the StatefulSets for the Postgres instances in the `hippo` cluster: - -``` -kubectl get sts -n postgres-operator \ - --selector=postgres-operator.crunchydata.com/cluster=hippo,postgres-operator.crunchydata.com/instance -``` - -You should see something similar to: - -``` -NAME READY AGE -hippo-instance1-6kbw 1/1 15m -hippo-instance1-zj5s 0/1 1s -``` - -PGO recreated the StatefulSet that was deleted! After this "catastrophic" event, PGO proceeds to heal the Postgres instance so it can rejoin the cluster. We cover the high availability process in greater depth later in the documentation. - -What about the other instance? We can see that it became the new primary though the following command: - -``` -kubectl -n postgres-operator get pods \ - --selector=postgres-operator.crunchydata.com/role=master \ - -o jsonpath='{.items[*].metadata.labels.postgres-operator\.crunchydata\.com/instance}' -``` - -which should yield something similar to: - -``` -hippo-instance1-6kbw -``` - -You can test that the failover successfully occurred in a few ways. You can connect to the example Keycloak application that we [deployed in the previous section]({{< relref "./connect-cluster.md" >}}). Based on Keycloak's connection retry logic, you may need to wait a moment for it to reconnect, but you will see it connected and resume being able to read and write data. You can also connect to the Postgres instance directly and execute the following command: - -``` -SELECT NOT pg_catalog.pg_is_in_recovery() is_primary; -``` - -If it returns `true` (or `t`), then the Postgres instance is a primary! - -What if PGO was down during the downtime event? Failover would still occur: the Postgres HA system works independently of PGO and can maintain its own uptime. PGO will still need to assist with some of the healing aspects, but your application will still maintain read/write connectivity to your Postgres cluster! - -## Affinity - -[Kubernetes affinity](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/) rules, which include Pod anti-affinity and Node affinity, can help you to define where you want your workloads to reside. Pod anti-affinity is important for high availability: when used correctly, it ensures that your Postgres instances are distributed amongst different Nodes. Node affinity can be used to assign instances to specific Nodes, e.g. to utilize hardware that's optimized for databases. - -### Understanding Pod Labels - -PGO sets up several labels for Postgres cluster management that can be used for Pod anti-affinity or affinity rules in general. These include: - -- `postgres-operator.crunchydata.com/cluster`: This is assigned to all managed Pods in a Postgres cluster. The value of this label is the name of your Postgres cluster, in this case: `hippo`. -- `postgres-operator.crunchydata.com/instance-set`: This is assigned to all Postgres instances within a group of `spec.instances`. In the example above, the value of this label is `instance1`. If you do not assign a label, the value is automatically set by PGO using a `NN` format, e.g. `00`. -- `postgres-operator.crunchydata.com/instance`: This is a unique label assigned to each Postgres instance containing the name of the Postgres instance. - -Let's look at how we can set up affinity rules for our Postgres cluster to help improve high availability. - -### Pod Anti-affinity - -Kubernetes has two types of Pod anti-affinity: - -- Preferred: With preferred (`preferredDuringSchedulingIgnoredDuringExecution`) Pod anti-affinity, Kubernetes will make a best effort to schedule Pods matching the anti-affinity rules to different Nodes. However, if it is not possible to do so, then Kubernetes may schedule one or more Pods to the same Node. -- Required: With required (`requiredDuringSchedulingIgnoredDuringExecution`) Pod anti-affinity, Kubernetes mandates that each Pod matching the anti-affinity rules **must** be scheduled to different Nodes. However, a Pod may not be scheduled if Kubernetes cannot find a Node that does not contain a Pod matching the rules. - -There is a tradeoff with these two types of pod anti-affinity: while "required" anti-affinity will ensure that all the matching Pods are scheduled on different Nodes, if Kubernetes cannot find an available Node, your Postgres instance may not be scheduled. Likewise, while "preferred" anti-affinity will make a best effort to scheduled your Pods on different Nodes, Kubernetes may compromise and schedule more than one Postgres instance of the same cluster on the same Node. - -By understanding these tradeoffs, the makeup of your Kubernetes cluster, and your requirements, you can choose the method that makes the most sense for your Postgres deployment. We'll show examples of both methods below! - -#### Using Preferred Pod Anti-Affinity - -First, let's deploy our Postgres cluster with preferred Pod anti-affinity. Note that if you have a single-node Kubernetes cluster, you will not see your Postgres instances deployed to different nodes. However, your Postgres instances _will_ be deployed. - -We can set up our HA Postgres cluster with preferred Pod anti-affinity like so: - -``` -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: hippo -spec: - image: registry.developers.crunchydata.com/crunchydata/crunchy-postgres-ha:centos8-13.4-0 - postgresVersion: 13 - instances: - - name: instance1 - replicas: 2 - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 1 - podAffinityTerm: - topologyKey: kubernetes.io/hostname - labelSelector: - matchLabels: - postgres-operator.crunchydata.com/cluster: hippo - postgres-operator.crunchydata.com/instance-set: instance1 - backups: - pgbackrest: - image: registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:centos8-2.33-2 - repos: - - name: repo1 - volume: - volumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi -``` - -Apply those changes in your Kubernetes cluster. - -Let's take a closer look at this section: - -``` -affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 1 - podAffinityTerm: - topologyKey: kubernetes.io/hostname - labelSelector: - matchLabels: - postgres-operator.crunchydata.com/cluster: hippo - postgres-operator.crunchydata.com/instance-set: instance1 -``` - -`spec.instances.affinity.podAntiAffinity` follows the standard Kubernetes [Pod anti-affinity spec](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/). The values for the `matchLabels` are derived from what we described in the previous section: `postgres-operator.crunchydata.com/cluster` is set to our cluster name of `hippo`, and `postgres-operator.crunchydata.com/instance-set` is set to the instance set name of `instance1`. We choose a `topologyKey` of `kubernetes.io/hostname`, which is standard in Kubernetes clusters. - -Preferred Pod anti-affinity will perform a best effort to schedule your Postgres Pods to different nodes. Let's see how you can require your Postgres Pods to be scheduled to different nodes. - -#### Using Required Pod Anti-Affinity - -Required Pod anti-affinity forces Kubernetes to scheduled your Postgres Pods to different Nodes. Note that if Kubernetes is unable to schedule all Pods to different Nodes, some of your Postgres instances may become unavailable. - -Using the previous example, let's indicate to Kubernetes that we want to use required Pod anti-affinity for our Postgres clusters: - -``` -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: hippo -spec: - image: registry.developers.crunchydata.com/crunchydata/crunchy-postgres-ha:centos8-13.4-0 - postgresVersion: 13 - instances: - - name: instance1 - replicas: 2 - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - topologyKey: kubernetes.io/hostname - labelSelector: - matchLabels: - postgres-operator.crunchydata.com/cluster: hippo - postgres-operator.crunchydata.com/instance-set: instance1 - backups: - pgbackrest: - image: registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:centos8-2.33-2 - repos: - - name: repo1 - volume: - volumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi -``` - -Apply those changes in your Kubernetes cluster. - -If you are in a single Node Kubernetes clusters, you will notice that not all of your Postgres instance Pods will be scheduled. This is due to the `requiredDuringSchedulingIgnoredDuringExecution` preference. However, if you have enough Nodes available, you will see the Postgres instance Pods scheduled to different Nodes: - -``` -kubectl get pods -n postgres-operator -o wide \ - --selector=postgres-operator.crunchydata.com/cluster=hippo,postgres-operator.crunchydata.com/instance -``` - -### Node Affinity - -Node affinity can be used to assign your Postgres instances to Nodes with specific hardware or to guarantee a Postgres instance resides in a specific zone. Node affinity can be set within the `spec.instances.affinity.nodeAffinity` attribute, following the standard Kubernetes [node affinity spec](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/). - -Let's see an example with required Node affinity. Let's say we have a set of Nodes that are reserved for database usage that have a label `workload-role=db`. We can create a Postgres cluster with a required Node affinity rule to scheduled all of the databases to those Nodes using the following configuration: - -``` -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: hippo -spec: - image: registry.developers.crunchydata.com/crunchydata/crunchy-postgres-ha:centos8-13.4-0 - postgresVersion: 13 - instances: - - name: instance1 - replicas: 2 - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: workload-role - operator: In - values: - - db - backups: - pgbackrest: - image: registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:centos8-2.33-2 - repos: - - name: repo1 - volume: - volumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi -``` - -## Next Steps - -We've now seen how PGO helps your application stay "always on" with your Postgres database. Now let's explore how PGO can minimize or eliminate downtime for operations that would normally cause that, such as [resizing your Postgres cluster]({{< relref "./resize-cluster.md" >}}). diff --git a/docs/content/tutorial/monitoring.md b/docs/content/tutorial/monitoring.md deleted file mode 100644 index 727b1f8f54..0000000000 --- a/docs/content/tutorial/monitoring.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -title: "Monitoring" -date: -draft: false -weight: 90 ---- - -While having [high availability]({{< relref "tutorial/high-availability.md" >}}) and [disaster recovery]({{< relref "tutorial/disaster-recovery.md" >}}) systems in place helps in the event of something going wrong with your PostgreSQL cluster, monitoring helps you anticipate problems before they happen. Additionally, monitoring can help you diagnose and resolve issues that may cause degraded performance rather than downtime. - -Let's look at how PGO allows you to enable monitoring in your cluster. - -## Adding the Exporter Sidecar - -Let's look at how we can add the Crunchy PostgreSQL Exporter sidecar to your cluster using the `kustomize/postgres` example in the [Postgres Operator examples](https://github.com/CrunchyData/postgres-operator-examples/fork) repository. - -Monitoring tools are added using the `spec.monitoring` section of the custom resource. Currently, the only monitoring tool supported is the Crunchy PostgreSQL Exporter configured with [pgMonitor]. - -The only required attribute for adding the Exporter sidecar is to set `spec.monitoring.pgmonitor.exporter.image`. In the `kustomize/postgres/postgres.yaml` file, add the following YAML to the spec: - -``` -monitoring: - pgmonitor: - exporter: - image: registry.developers.crunchydata.com/crunchydata/crunchy-postgres-exporter:ubi8-5.0.2-0 -``` - -Save your changes and run: - -``` -kubectl apply -k kustomize/postgres -``` - -PGO will detect the change and add the Exporter sidecar to all Postgres Pods that exist in your cluster. PGO will also do the work to allow the Exporter to connect to the database and gather metrics that can be accessed using the [PGO Monitoring] stack. - -## Accessing the Metrics - -Once the Crunchy PostgreSQL Exporter has been enabled in your cluster, follow the steps outlined in [PGO Monitoring] to install the monitoring stack. This will allow you to deploy a [pgMonitor] configuration of [Prometheus], [Grafana], and [Alertmanager] monitoring tools in Kubernetes. These tools will be set up by default to connect to the Exporter containers on your Postgres Pods. - -## Next Steps - -Now that we can monitor our cluster, let's explore how [connection pooling]({{< relref "connection-pooling.md" >}}) can be enabled using PGO and how it is helpful. - -[pgMonitor]: https://github.com/CrunchyData/pgmonitor -[Grafana]: https://grafana.com/ -[Prometheus]: https://prometheus.io/ -[Alertmanager]: https://prometheus.io/docs/alerting/latest/alertmanager/ -[PGO Monitoring]: {{< relref "installation/monitoring/_index.md" >}} diff --git a/docs/content/tutorial/resize-cluster.md b/docs/content/tutorial/resize-cluster.md deleted file mode 100644 index 8d125f96aa..0000000000 --- a/docs/content/tutorial/resize-cluster.md +++ /dev/null @@ -1,181 +0,0 @@ ---- -title: "Resize a Postgres Cluster" -date: -draft: false -weight: 50 ---- - -You did it -- the application is a success! Traffic is booming, so much so that you need to add more resources to your Postgres cluster. However, you're worried that any resize operation may cause downtime and create a poor experience for your end users. - -This is where PGO comes in: PGO will help orchestrate rolling out any potentially disruptive changes to your cluster to minimize or eliminate and downtime for your application. To do so, we will assume that you have [deployed a high availability Postgres cluster]({{< relref "./high-availability.md" >}}) as described in the [previous section]({{< relref "./high-availability.md" >}}). - -Let's dive in. - -## Resize Memory and CPU - -Memory and CPU resources are an important component for vertically scaling your Postgres cluster. Couple with [tweaks to your Postgres configuration file]({{< relref "./customize-cluster.md" >}}), allowing your cluster to have more memory and CPU allotted to it can help it to perform better under load. - -It's important for instances in the same high availability set to have the same resources. PGO lets you adjust CPU and memory within the `spec.instances.resources` section of the `postgresclusters.postgres-operator.crunchydata.com` custom resource. The layout of `spec.instances.resources` should be familiar: it follows the same pattern as the standard Kubernetes structure for setting [container resources](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/). - -For example, let's say we want to update our `hippo` Postgres cluster so that each instance has a limit of `2.0` CPUs and `4Gi` of memory. We can make the following changes to the manifest: - -``` -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: hippo -spec: - image: registry.developers.crunchydata.com/crunchydata/crunchy-postgres-ha:centos8-13.4-0 - postgresVersion: 13 - instances: - - name: instance1 - replicas: 2 - resources: - limits: - cpu: 2.0 - memory: 4Gi - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - backups: - pgbackrest: - image: registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:centos8-2.33-2 - repos: - - name: repo1 - volume: - volumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi -``` - -In particular, we added the following to `spec.instances`: - -``` -resources: - limits: - cpu: 2.0 - memory: 4Gi -``` - -Apply these updates to your Kubernetes cluster with the following command: - -``` -kubectl apply -k kustomize/postgres -``` - -Now, let's watch how the rollout happens: - -``` -watch "kubectl -n postgres-operator get pods \ - --selector=postgres-operator.crunchydata.com/cluster=hippo,postgres-operator.crunchydata.com/instance \ - -o=jsonpath='{range .items[*]}{.metadata.name}{\"\t\"}{.metadata.labels.postgres-operator\.crunchydata\.com/role}{\"\t\"}{.status.phase}{\"\t\"}{.spec.containers[].resources.limits}{\"\n\"}{end}'" -``` - -Observe how each Pod is terminated one-at-a-time. This is part of a "rolling update". Because updating the resources of a Pod is a destructive action, PGO first applies the CPU and memory changes to the replicas. PGO ensures that the changes are successfully applied to a replica instance before moving on to the next replica. - -Once all of the changes are applied, PGO will perform a "controlled switchover": it will promote a replica to become a primary, and apply the changes to the final Postgres instance. - -By rolling out the changes in this way, PGO ensures there is minimal to zero disruption to your application: you are able to successfully roll out updates and your users may not even notice! - -## Resize PVC - -Your application is a success! Your data continues to grow, and it's becoming apparently that you need more disk. That's great: you can resize your PVC directly on your `postgresclusters.postgres-operator.crunchydata.com` custom resource with minimal to zero downtime. - -PVC resizing, also known as [volume expansion](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#expanding-persistent-volumes-claims), is a function of your storage class: it must support volume resizing. Additionally, PVCs can only be **sized up**: you cannot shrink the size of a PVC. - -You can adjust PVC sizes on all of the managed storage instances in a Postgres instance that are using Kubernetes storage. These include: - -- `spec.instances.dataVolumeClaimSpec.resources.requests.storage`: The Postgres data directory (aka your database). -- `spec.backups.pgbackrest.repos.volume.volumeClaimSpec.resources.requests.storage`: The pgBackRest repository when using "volume" storage - -The above should be familiar: it follows the same pattern as the standard [Kubernetes PVC](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) structure. - -For example, let's say we want to update our `hippo` Postgres cluster so that each instance now uses a `10Gi` PVC and our backup repository uses a `20Gi` PVC. We can do so with the following markup: - -``` -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: hippo -spec: - image: registry.developers.crunchydata.com/crunchydata/crunchy-postgres-ha:centos8-13.4-0 - postgresVersion: 13 - instances: - - name: instance1 - replicas: 2 - resources: - limits: - cpu: 2.0 - memory: 4Gi - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 10Gi - backups: - pgbackrest: - image: registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:centos8-2.33-2 - repos: - - name: repo1 - volume: - volumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 20Gi -``` - -In particular, we added the following to `spec.instances`: - -``` -dataVolumeClaimSpec: - resources: - requests: - storage: 10Gi -``` - -and added the following to `spec.backups.pgbackrest.repos.volume`: - -``` -volumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 20Gi -``` - -Apply these updates to your Kubernetes cluster with the following command: - -``` -kubectl apply -k kustomize/postgres -``` - -## Troubleshooting - -### Postgres Pod Can't Be Scheduled - -There are many reasons why a PostgreSQL Pod may not be scheduled: - -- **Resources are unavailable**. Ensure that you have a Kubernetes [Node](https://kubernetes.io/docs/concepts/architecture/nodes/) with enough resources to satisfy your memory or CPU Request. -- **PVC cannot be provisioned**. Ensure that you request a PVC size that is available, or that your PVC storage class is set up correctly. - -### PVCs Do Not Resize - -Ensure that your storage class supports PVC resizing. You can check that by inspecting the `allowVolumeExpansion` attribute: - -``` -kubectl get sc -``` - -## Next Steps - -You've now resized your Postgres cluster, but how can you configure Postgres to take advantage of the new resources? Let's look at how we can [customize the Postgres cluster configuration]({{< relref "./customize-cluster.md" >}}). diff --git a/docs/content/tutorial/update-cluster.md b/docs/content/tutorial/update-cluster.md deleted file mode 100644 index d7055ff22e..0000000000 --- a/docs/content/tutorial/update-cluster.md +++ /dev/null @@ -1,62 +0,0 @@ ---- -title: "Apply Software Updates" -date: -draft: false -weight: 70 ---- - -Did you know that Postgres releases bug fixes [once every three months](https://www.postgresql.org/developer/roadmap/)? Additionally, we periodically refresh the container images to ensure the base images have the latest software that may fix some CVEs. - -It's generally good practice to keep your software up-to-date for stability and security purposes, so let's learn how PGO helps to you accept low risk, "patch" type updates. - -The good news: you do not need to update PGO itself to apply component updates: you can update each Postgres cluster whenever you want to apply the update! This lets you choose when you want to apply updates to each of your Postgres clusters, so you can update it on your own schedule. If you have a [high availability Postgres]({{< relref "./high-availability.md" >}}) cluster, PGO uses a rolling update to minimize or eliminate any downtime for your application. - -## Applying Minor Postgres Updates - -The Postgres image is referenced using the `spec.image` and looks similar to the below: - -``` -spec: - image: registry.developers.crunchydata.com/crunchydata/crunchy-postgres-ha:centos8-13.4-0 -``` - -Diving into the tag a bit further, you will notice the `13.4-0` portion. This represents the Postgres minor version (`13.4`) and the patch number of the release `0`. If the patch number is incremented (e.g. `13.4-1`), this means that the container is rebuilt, but there are no changes to the Postgres version. If the minor version is incremented (e.g. `13.4-0`), this means that the is a newer bug fix release of Postgres within the container. - -To update the image, you just need to modify the `spec.image` field with the new image reference, e.g. - -``` -spec: - image: registry.developers.crunchydata.com/crunchydata/crunchy-postgres-ha:centos8-13.4-0 -``` - -You can apply the changes using `kubectl apply`. Similar to the rolling update example when we [resized the cluster]({{< relref "./resize-cluster.md" >}}), the update is first applied to the Postgres replicas, then a controlled switchover occurs, and the final instance is updated. - -For the `hippo` cluster, you can see the status of the rollout by running the command below: - -``` -kubectl -n postgres-operator get pods \ - --selector=postgres-operator.crunchydata.com/cluster=hippo,postgres-operator.crunchydata.com/instance \ - -o=jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.metadata.labels.postgres-operator\.crunchydata\.com/role}{"\t"}{.status.phase}{"\t"}{.spec.containers[].image}{"\n"}{end}' -``` - -or by running a watch: - -``` -watch "kubectl -n postgres-operator get pods \ - --selector=postgres-operator.crunchydata.com/cluster=hippo,postgres-operator.crunchydata.com/instance \ - -o=jsonpath='{range .items[*]}{.metadata.name}{\"\t\"}{.metadata.labels.postgres-operator\.crunchydata\.com/role}{\"\t\"}{.status.phase}{\"\t\"}{.spec.containers[].image}{\"\n\"}{end}'" -``` - -## Rolling Back Minor Postgres Updates - -This methodology also allows you to rollback changes from minor Postgres updates. You can change the `spec.image` field to your desired container image. PGO will then ensure each Postgres instance in the cluster rolls back to the desired image. - -## Applying Other Component Updates - -There are other components that go into a PGO Postgres cluster. These include pgBackRest, PgBouncer and others. Each one of these components has its own image: for example, you can find a reference to the pgBackRest image in the `spec.backups.pgbackrest.image` attribute. - -Applying software updates for the other components in a Postgres cluster works similarly to the above. As pgBackRest and PgBouncer are Kubernetes [Deployments](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/), Kubernetes will help manage the rolling update to minimize disruption. - -## Next Steps - -Now that we know how to update our software components, let's look at how PGO handles [disaster recovery]({{< relref "./backups.md" >}})! diff --git a/docs/content/tutorial/user-management.md b/docs/content/tutorial/user-management.md deleted file mode 100644 index ab4928e4e7..0000000000 --- a/docs/content/tutorial/user-management.md +++ /dev/null @@ -1,120 +0,0 @@ ---- -title: "User / Database Management" -date: -draft: false -weight: 65 ---- - -PGO comes with some out-of-the-box conveniences for managing users and databases in your Postgres cluster. However, you may have requirements where you need to create additional users, adjust user privileges or add additional databases to your cluster. - -For detailed information for how user and database management works in PGO, please see the [User Management]({{< relref "architecture/user-management.md" >}}) section of the architecture guide. - -## Creating a New User - -You can create a new user with the following snippet in the `postgrescluster` custom resource. Let's add this to our `hippo` database: - -``` -spec: - users: - - name: rhino -``` - -You can now apply the changes and see that the new user is created. Note the following: - -- The user would only be able to connect to the default `postgres` database. -- The user will not have any connection credentials populated into the `hippo-pguser-rhino` Secret. -- The user is unprivileged. - -Let's create a new database named `zoo` that we will let the `rhino` user access: - -``` -spec: - users: - - name: rhino - databases: - - zoo -``` - -Inspect the `hippo-pguser-rhino` Secret. You should now see that the `dbname` and `uri` fields are now populated! - -We can set role privileges by using the standard [role attributes](https://www.postgresql.org/docs/current/role-attributes.html) that Postgres provides and adding them to the `spec.users.options`. Let's say we want the rhino to become a superuser (be careful about doling out Postgres superuser privileges!). You can add the following to the spec: - -``` -spec: - users: - - name: rhino - databases: - - zoo - options: "SUPERUSER" -``` - -There you have it: we have created a Postgres user named `rhino` with superuser privileges that has access to the `rhino` database (though a superuser has access to all databases!). - -## Adjusting Privileges - -Let's say you want to revoke the superuser privilege from `rhino`. You can do so with the following: - -``` -spec: - users: - - name: rhino - databases: - - zoo - options: "NOSUPERUSER" -``` - -If you want to add multiple privileges, you can add each privilege with a space between them in `options`, e.g: - -``` -spec: - users: - - name: rhino - databases: - - zoo - options: "CREATEDB CREATEROLE" -``` - -## Managing the `postgres` User - -By default, PGO does not give you access to the `postgres` user. However, you can get access to this account by doing the following: - -``` -spec: - users: - - name: postgres -``` - -This will create a Secret of the pattern `-pguser-postgres` that contains the credentials of the `postgres` account. For our `hippo` cluster, this would be `hippo-pguser-postgres`. - -## Deleting a User - -As mentioned earlier, PGO does not let you delete a user automatically: if you remove the user from the spec, it will still exist in your cluster. To remove a user and all of its objects, as a superuser you will need to run [`DROP OWNED`](https://www.postgresql.org/docs/current/sql-drop-owned.html) in each database the user has objects in, and [`DROP ROLE`](https://www.postgresql.org/docs/current/sql-droprole.html) -in your Postgres cluster. - -For example, with the above `rhino` user, you would run the following: - -``` -DROP OWNED BY rhino; -DROP ROLE rhino; -``` - -Note that you may need to run `DROP OWNED BY rhino CASCADE;` based upon your object ownership structure -- be very careful with this command! - -Once you have removed the user in the database, you can remove the user from the custom resource. - -## Deleting a Database - -As mentioned earlier, PGO does not let you delete a database automatically: if you remove all instances of the database from the spec, it will still exist in your cluster. To completely remove the database, you must run the [`DROP DATABASE`](https://www.postgresql.org/docs/current/sql-dropdatabase.html) -command as a Postgres superuser. - -For example, to remove the `zoo` database, you would execute the following: - -``` -DROP DATABASE zoo; -``` - -Once you have removed the database, you can remove any references to the database from the custom resource. - -## Next Steps - -You now know how to manage users and databases in your cluster and have now a well-rounded set of tools to support your "Day 1" operations. Let's start looking at some of the "Day 2" work you can do with PGO, such as [updating to the next Postgres version]({{< relref "./update-cluster.md" >}}), in the [next section]({{< relref "./update-cluster.md" >}}). diff --git a/docs/layouts/shortcodes/exporter_metrics.html b/docs/layouts/shortcodes/exporter_metrics.html deleted file mode 100644 index a69cd351a0..0000000000 --- a/docs/layouts/shortcodes/exporter_metrics.html +++ /dev/null @@ -1,17 +0,0 @@ -{{ range $metricsfile, $value0 := .Site.Data.pgmonitor.general }} -

{{ $metricsfile }}

- -{{ range $query, $value1 := $value0 }} -

{{ $query }}

-

SQL Query:

-{{ $value1.query }} - -

Metrics:

-{{ range $key2, $value2 := $value1.metrics }} -{{ range $metric, $value3 := $value2 }} -
{{ $metric }}
-{{ $value3.description }} -{{end}} -{{end}} -{{end}} -{{end}} diff --git a/docs/layouts/shortcodes/pgnodemx_metrics.html b/docs/layouts/shortcodes/pgnodemx_metrics.html deleted file mode 100644 index 919aadd428..0000000000 --- a/docs/layouts/shortcodes/pgnodemx_metrics.html +++ /dev/null @@ -1,17 +0,0 @@ -{{ range $metricsfile, $value0 := .Site.Data.pgmonitor.pgnodemx }} -

{{ $metricsfile }}

- -{{ range $query, $value1 := $value0 }} -

{{ $query }}

-

SQL Query:

-{{ $value1.query }} - -

Metrics:

-{{ range $key2, $value2 := $value1.metrics }} -{{ range $metric, $value3 := $value2 }} -
{{ $metric }}
-{{ $value3.description }} -{{end}} -{{end}} -{{end}} -{{end}} diff --git a/docs/static/Operator-Architecture-wCRDs.png b/docs/static/Operator-Architecture-wCRDs.png deleted file mode 100644 index 291cbefef3..0000000000 Binary files a/docs/static/Operator-Architecture-wCRDs.png and /dev/null differ diff --git a/docs/static/Operator-Architecture.png b/docs/static/Operator-Architecture.png deleted file mode 100644 index aa8a43a134..0000000000 Binary files a/docs/static/Operator-Architecture.png and /dev/null differ diff --git a/docs/static/Operator-DR-Storage.png b/docs/static/Operator-DR-Storage.png deleted file mode 100644 index 7bab1bc27c..0000000000 Binary files a/docs/static/Operator-DR-Storage.png and /dev/null differ diff --git a/docs/static/OperatorReferenceDiagram.1.png b/docs/static/OperatorReferenceDiagram.1.png deleted file mode 100644 index ed2b7164e6..0000000000 Binary files a/docs/static/OperatorReferenceDiagram.1.png and /dev/null differ diff --git a/docs/static/OperatorReferenceDiagram.png b/docs/static/OperatorReferenceDiagram.png deleted file mode 100644 index ed2b7164e6..0000000000 Binary files a/docs/static/OperatorReferenceDiagram.png and /dev/null differ diff --git a/docs/static/crunchy_logo.png b/docs/static/crunchy_logo.png deleted file mode 100644 index 2fbf3352c1..0000000000 Binary files a/docs/static/crunchy_logo.png and /dev/null differ diff --git a/docs/static/favicon.ico b/docs/static/favicon.ico deleted file mode 100644 index b30f559497..0000000000 Binary files a/docs/static/favicon.ico and /dev/null differ diff --git a/docs/static/favicon.png b/docs/static/favicon.png deleted file mode 100644 index 66ce2072e9..0000000000 Binary files a/docs/static/favicon.png and /dev/null differ diff --git a/docs/static/images/namespace-multi.png b/docs/static/images/namespace-multi.png deleted file mode 100644 index 8bb0c3bb1a..0000000000 Binary files a/docs/static/images/namespace-multi.png and /dev/null differ diff --git a/docs/static/images/namespace-own.png b/docs/static/images/namespace-own.png deleted file mode 100644 index d1f9bde948..0000000000 Binary files a/docs/static/images/namespace-own.png and /dev/null differ diff --git a/docs/static/images/namespace-single.png b/docs/static/images/namespace-single.png deleted file mode 100644 index a32d628388..0000000000 Binary files a/docs/static/images/namespace-single.png and /dev/null differ diff --git a/docs/static/images/pgadmin4-login.png b/docs/static/images/pgadmin4-login.png deleted file mode 100644 index 84c72ef692..0000000000 Binary files a/docs/static/images/pgadmin4-login.png and /dev/null differ diff --git a/docs/static/images/pgadmin4-login2.png b/docs/static/images/pgadmin4-login2.png deleted file mode 100644 index a75f990bfd..0000000000 Binary files a/docs/static/images/pgadmin4-login2.png and /dev/null differ diff --git a/docs/static/images/pgadmin4-query.png b/docs/static/images/pgadmin4-query.png deleted file mode 100644 index 5c0d306016..0000000000 Binary files a/docs/static/images/pgadmin4-query.png and /dev/null differ diff --git a/docs/static/images/postgresql-cluster-dr-base.png b/docs/static/images/postgresql-cluster-dr-base.png deleted file mode 100644 index 515e597500..0000000000 Binary files a/docs/static/images/postgresql-cluster-dr-base.png and /dev/null differ diff --git a/docs/static/images/postgresql-cluster-dr-schedule.png b/docs/static/images/postgresql-cluster-dr-schedule.png deleted file mode 100644 index 098c5e5658..0000000000 Binary files a/docs/static/images/postgresql-cluster-dr-schedule.png and /dev/null differ diff --git a/docs/static/images/postgresql-cluster-ha-s3.png b/docs/static/images/postgresql-cluster-ha-s3.png deleted file mode 100644 index 6922772d1a..0000000000 Binary files a/docs/static/images/postgresql-cluster-ha-s3.png and /dev/null differ diff --git a/docs/static/images/postgresql-cluster-restore-step-1.png b/docs/static/images/postgresql-cluster-restore-step-1.png deleted file mode 100644 index d8d2439fbd..0000000000 Binary files a/docs/static/images/postgresql-cluster-restore-step-1.png and /dev/null differ diff --git a/docs/static/images/postgresql-cluster-restore-step-2.png b/docs/static/images/postgresql-cluster-restore-step-2.png deleted file mode 100644 index cf6c653d54..0000000000 Binary files a/docs/static/images/postgresql-cluster-restore-step-2.png and /dev/null differ diff --git a/docs/static/images/postgresql-ha-multi-data-center.png b/docs/static/images/postgresql-ha-multi-data-center.png deleted file mode 100644 index bb3b18cf51..0000000000 Binary files a/docs/static/images/postgresql-ha-multi-data-center.png and /dev/null differ diff --git a/docs/static/images/postgresql-ha-overview.png b/docs/static/images/postgresql-ha-overview.png deleted file mode 100644 index bb74de6739..0000000000 Binary files a/docs/static/images/postgresql-ha-overview.png and /dev/null differ diff --git a/docs/static/images/postgresql-monitoring-alerts.png b/docs/static/images/postgresql-monitoring-alerts.png deleted file mode 100644 index 13f49f3fe1..0000000000 Binary files a/docs/static/images/postgresql-monitoring-alerts.png and /dev/null differ diff --git a/docs/static/images/postgresql-monitoring-backups.png b/docs/static/images/postgresql-monitoring-backups.png deleted file mode 100644 index de5530f552..0000000000 Binary files a/docs/static/images/postgresql-monitoring-backups.png and /dev/null differ diff --git a/docs/static/images/postgresql-monitoring-cluster.png b/docs/static/images/postgresql-monitoring-cluster.png deleted file mode 100644 index ea83ce4270..0000000000 Binary files a/docs/static/images/postgresql-monitoring-cluster.png and /dev/null differ diff --git a/docs/static/images/postgresql-monitoring-overview.png b/docs/static/images/postgresql-monitoring-overview.png deleted file mode 100644 index 8d623aa0f8..0000000000 Binary files a/docs/static/images/postgresql-monitoring-overview.png and /dev/null differ diff --git a/docs/static/images/postgresql-monitoring-pod.png b/docs/static/images/postgresql-monitoring-pod.png deleted file mode 100644 index 30e8183f54..0000000000 Binary files a/docs/static/images/postgresql-monitoring-pod.png and /dev/null differ diff --git a/docs/static/images/postgresql-monitoring-query-topn.png b/docs/static/images/postgresql-monitoring-query-topn.png deleted file mode 100644 index a09c25c6c9..0000000000 Binary files a/docs/static/images/postgresql-monitoring-query-topn.png and /dev/null differ diff --git a/docs/static/images/postgresql-monitoring-query-total.png b/docs/static/images/postgresql-monitoring-query-total.png deleted file mode 100644 index 8c9485ebe5..0000000000 Binary files a/docs/static/images/postgresql-monitoring-query-total.png and /dev/null differ diff --git a/docs/static/images/postgresql-monitoring-service.png b/docs/static/images/postgresql-monitoring-service.png deleted file mode 100644 index a24baf56a2..0000000000 Binary files a/docs/static/images/postgresql-monitoring-service.png and /dev/null differ diff --git a/docs/static/images/postgresql-monitoring.png b/docs/static/images/postgresql-monitoring.png deleted file mode 100644 index 96ed4017fc..0000000000 Binary files a/docs/static/images/postgresql-monitoring.png and /dev/null differ diff --git a/docs/static/operator-backrest-integration.png b/docs/static/operator-backrest-integration.png deleted file mode 100644 index 7d1f64b500..0000000000 Binary files a/docs/static/operator-backrest-integration.png and /dev/null differ diff --git a/docs/static/operator-backrest-integration.xml b/docs/static/operator-backrest-integration.xml deleted file mode 100644 index 7b27e5c83e..0000000000 --- a/docs/static/operator-backrest-integration.xml +++ /dev/null @@ -1 +0,0 @@ -7Vxbd6I6FP41PtrFXXysvc6szqqnnXU6c15mIURkioRCaGt//UkgQUKC2jG2Tqt9qNkJIdmXb18I9syT+fNF5qWzbzAAcc/QgueeedozDF1zNfyPUBYVZWBRQphFAR20JNxGL4BdSalFFICcG4ggjFGU8kQfJgnwEUfzsgw+8cOmMObvmnohEAi3vheL1LsoQDNK1Z3hsuMSROGM3to1BlXHxPPvwwwWCb1fzzCn5afqnntsLrrRfOYF8KlBMs965kkGIaq+zZ9PQEx4y9hWXXfe0VuvOwMJ2uQCo7rg0YsLuvXjzJ9FCDO1yDDlGHeOYY7CDIvD0K5TkHkIZvhrv2c43jztmaMQ32lE7hqS3eOBCDe+JAiEeGwEE7pTtGDcnUZxfAJjPA1pmtbpsX5u4zlylMF7wHoSmABCZBzSya0yL4jw1lpjpjBBVIl0C7e9OAoT3IjBlKwtTz0/SsKrsnVqa/SKxhKojMzRDM1jei+RlZS7jyBD4LlBoqy9AHAOULbAQ1gv0xdqBswKnpY65Zp01llDndg4j6pxWM+8FCX+QqUpl6ytuQLjQYC1nDZhhmYwhIkXny2po1J1AZlC47kRexMQj2rtbgsJeRk6JoaHaZMY+vffZ1HCOs6jmE0DkkA2DJMbg34DhBZUnl6BICYtV3sFYUrH8fpCNMlwrXOz7mGma7RURKIAw/LTUB0fSx3gzlHg5bOaIXiddBKLba7Rxpdkix9k6JHNmj/Zxp8j1OjCrZ90zgQ2BLBa83JYZD6V5a8ne2j51pd/ovHpt+LrjzNwPe7bOhU6XlkIENMEejkR/0q1zUCMLfaRB0GZEtJLxzBK0FLdTZtXd9O1+SmqVdGrWqpcL2Mj7f5+/t/Vv/Au9a+t88FLPrl4uUn7NkPVBpxxoET+QQxrbbvA5ox4dccjoxdvUg4gQkrJmstd2KOefSpTFAILEXYhx7RjHgVBaVMdptOEHVHitfkKaFN7Pbo8znPIUKivHelYwznRUNz/U4GzIXA6zcG2srSGtkRssPSOGCkqp1qkgtQE8w/KDzFu3sHUbOZ8CY8ROgd+pOVNchgXCGCHyICDUOuWwUSL/WNU+jmJMly1BtRKsVZ9JhAhOBe81yrsamIcWd0TceS3+HJCecKR2nK2EZ293IVap1eLk6qabrtHtuD3Bpro9lxze7dnDR1BmQTN4TCdt6aW22tp0sAmfytClZTMMX8OSVh8FPqpcRR7KSIea4SZijq5vLSBjfmsSbhqS7hqmtvZOkMRoyXWWqpKwaB2VqvktweW/9p4goaiik2NWRFzuPpQohT6UKIVjpIQU0wf5gs/LnIMaX3md/sZSKE6E8QhTjPG28QEJ1H4UACigU0jbGN3VjFnc+imIu30+6204xXALQsHNleORTc+WJYEH4ZK8IFHh/5SE9XCg24KqvQ2WU2TRnMV7cMnNDRpYQlMmbKUCYzapIW67LdIWrCwvEVjAA3suRpJS7NdvZ3Ft7Cz2qGQ1ogmoq2ZaNf5kS7GRlx+1BFo72N6xHBg+/QIZ0euafGxzXaIqBjxRKGRApwKGWEoyhsBzmq/VkWx1Kl9LzGrb70CiBTEO26rnmaaA8HFGZJgR1cQ7KyGrpXRqiwmCWCBRXNTFq9F3N82nlXA63btsiO2NF1JbOnujN0DCbudGFG1K42fMct5KGBVmTa18tMkOWXROg1hfwl/1Tx4YdVUDq1rv2Pgiv2xfx9kGIKyvzh2XWM5nzugXV3G/XiwUtdnWAxkOe8OKsOdgUq/rjgfwOVNwaWreH4Al75tmBJ9378KmwKwcVr1Md3WRbCRBYwqqmMdzLcE5o8vcPukqpq9KyL4MSyCX/lD/OHwwOjKFg94gJljC1p3OCjwJnU1/jBAtW16hKBu7KjmxiziXQ4K2Hb7ucVmhbA/KN4ZA5e7lWG14siO4p0661pTc/PIeatH0E+LfCbY4R5W3lZjiJLTCoZr8kKzlGCstQNEzYPLCTq9Qde/Jrcv068Pg9u7cBl+qkHUxrEiS3N4XBhaQ0YYgyzCKyciL2fYFBylJv+nqDIQUWVV+r8FsmwZhomFpOrIyeGsyd901qR+tM2AwhVPWO7qpEmHYm1Qudm/4ydrjOSTn0npAPmhSpB/D7Q2OzzxllJxLN4ocVjZFou6B52rdiaB964ToAd830t8H7gtfHfeG9/Zcf2PgO+1lXxyfL98iu15MNYj1539Di9/38weHncWxEsS+9dh/xqo79iM5IjN6sKAuhrApgbXsXKlbrZLDI5lqMmjlEnn7Q9AyW1w4LRqNbbbFOprx+Mv1QoUG6usykIcvp8BD9XP3yYZe/Tms3p7u+MQGvydoYGpyZzTroKDDi2UHU/Z++BgjUV98uCg400zS6VXOjwzUfVypdlrnlTWDPlzEzKi7eY3ct+1U2566vrdnV0/NtEt/uyMoW2WVosvarawU3jere6hSIf5yN74azwUiZKp+MrIHj4MWQ0OH+7VTdxc/jRBNXz5+w/m2f8= \ No newline at end of file diff --git a/docs/static/operator-crd-architecture.png b/docs/static/operator-crd-architecture.png deleted file mode 100644 index da86fa51b2..0000000000 Binary files a/docs/static/operator-crd-architecture.png and /dev/null differ diff --git a/docs/static/operator-crd-architecture.xml b/docs/static/operator-crd-architecture.xml deleted file mode 100644 index 0c57bd52f4..0000000000 --- a/docs/static/operator-crd-architecture.xml +++ /dev/null @@ -1 +0,0 @@ -7Vtbd9o4EP41fgzHN7D9GELSnrPZ3TTtXrovewwWRo2xvLJooL9+R7ZkW5ZNSAgph0AfikZXz/fNjGZwDOdquf5Aw2zxK4lQYthmtDaciWHbljky4T8u2ZQSz7JLQUxxJAbVgs/4B5IzhXSFI5QrAxkhCcOZKpyRNEUzpshCSsmjOmxOEnXXLIyRJvg8CxNd+heO2EJIrVFQd3xEOF6IrX3bKzum4ewhpmSViv0M25kXn7J7Gcq1xIPmizAijw2Rc204V5QQVn5brq9QwnUr1VbOu+nprc5NUcp2meCPhuWU72GyEg9/R3IWU/T50y3If88QDRmh8PXqfpKLU7ON1FTxrIivZhnOOJzmJFkxdElnAtRCWrXg+cYLtkxER84oeUBXJIH1nUlKUhgzjmkYYTh+S1xpyoRGFOaLYlPemJOUyd1s0ZaTQf/BNf/Hj5HgOAVZguagm/F3RBkGyC+FmJGMb5OFM5zGfC2zbn7hfZMLl6+Ok6Sx+rV1M7zhq+uaF2DwfdC6IRJIfEBkiRjdwBDRa1uCXsJqbFOw5LHmoOsJ2aJBP1fSKRS8j6u1a+zhi4C/hwqWvzMVAN8FZmB5KwojLzk5VjkjS/hyj3KyojMEXydojlPMMEl13rTU6E4uQZF9lKiwt/r50WSBq6MtgLwtWpOhqfNE2KlC0FcAdaRi6uqQBkEXpK+B6MjTENWAUBUOeoiKj870Sj8KGuV06SS55PkeoQldBzBm8WlAOgM0EH11pAAJBSpHBq0GVlaX+XmvgpVufYY9SpjQRxHspEpG/614gBjXymmIRjH/P4tnCZgkqEksAgco1yn7pXhK2xKxW00RuTTvuMgLnLjFW1a21vedCZeRc5+gnqDeqnYM/cfQztvirRICxD0gnBa9pkqNFo+Hlu/eOFtcTcbXWK5jfrUZxLPMHsDyLMQpov+iNMbFOGAaE+sn4RQl8NjlAzkTWjKjCjC3rf4ljiJ+zmcFJOm4OuJb8emIg2rs8juNpHYRT5tJMwq5uhU4wnooSkKGv6s3qS7TEDvcEVwQTmw+VGzQU6eT+TxHTLOr6pC7mZrvaGxCEdz/RBMlU/J4XQsaHo0zq+iG7xIulWwFGcbVLbDNLga0ucFJfQGCtsCMBy2URpf8+grNaUJmD18WOC3FjUnQakz5hhjbiHa4YgREhLIFiUkK1CMF+Ppdi4dcW7ED6cLbDrnjRtXvkBWj7HXJ4oKgEhA0ESPJSk+M5LBs5eTzyAa6DTeNARmnXt7PRVceTl7IPKfFvXLF3WY7jq2er9SDmLUXpX/7J74yqfP1Iv/28fGXe+sP/OnPCz+wtZBiDgapRn4wfKayGPw3/tHwpkJRMHo4NoaTLujb3qvycj0W0bxR9LigKr8TJ6myGdWFlebcG9bNgW/69n6+6VX9Tw9YjgaW9c6QujAHgeX5itFYRwSc77vnwPEGgQOtMfubLzEwzZFof+V9A8ezRHuybgyebBqNO0QxgMqX3TsMOW8chuo6zVOxZPTWscR8H7HEfSKWeJYkhUDiYnREHqoHO0vD7vRCy3bgILTAEDW0uEcEXHXj3Vqq6cpNI7ICxdwXVXDdV+9bzDmS0owbuAp0EIQMvTQj4FVKM34/njuXZjz9Hr1faYaF+cPOdZklzmdA0RRhUsSn/CHXB4VpxLVB6MM8ATg7qirn8olxgPKJtzuVT6Z84umZytlV9boqx9cr/gd0Ve4ruyqKsgSsamdv1Sj/3qtTu8q/HZ7s7KkO5KmcnZl8Kp6q5zbcZSKn5L46rtCaR9v5R7Ch9Ybuqwcw/RWF/XxaRsAvYZS/xKkVczf6kOjs097Epz1B7m0m/+4c3Ujj3blaebhqpTkYGo1apbG1TpkCtuUk17WkgM+zBubQk4J6ctFSZneXOVuVyifrnttDpFIMLSuQb/0TnCNfhJFFm6HZMpZtP8E5busyHrSiVE/ZVF/IatWOZJiUC5WqOmD9VX+V5wTrr9sc2akVZfX3fc6ACvSOvFjb81DBOa/YvSzy8/OKwNR/0tovr+Avf6+yl2QV1cxwya/N6TTPOqfARYqtzgnG8SUYwTtMMAJT/xXxnGAcMsHwGwkGJAq+fDli95chGonHsJl21ElGd8rxsuyi862K51nQgV/wc1rZRfBTsgu5bRUc3daDHji7CEz73V5GhRc7rewiME//bcIXoXlkqQU067/1K4fXf1DpXP8P \ No newline at end of file diff --git a/docs/static/operator-diagram-cluster.png b/docs/static/operator-diagram-cluster.png deleted file mode 100644 index 201a18a5ed..0000000000 Binary files a/docs/static/operator-diagram-cluster.png and /dev/null differ diff --git a/docs/static/operator-diagram-database.png b/docs/static/operator-diagram-database.png deleted file mode 100644 index 6cfb3959d0..0000000000 Binary files a/docs/static/operator-diagram-database.png and /dev/null differ diff --git a/docs/static/operator-diagram.png b/docs/static/operator-diagram.png deleted file mode 100644 index a37c738ffe..0000000000 Binary files a/docs/static/operator-diagram.png and /dev/null differ diff --git a/docs/themes/crunchy-hugo-theme b/docs/themes/crunchy-hugo-theme deleted file mode 160000 index cda8fd1e16..0000000000 --- a/docs/themes/crunchy-hugo-theme +++ /dev/null @@ -1 +0,0 @@ -Subproject commit cda8fd1e169ee0a62583b88685c4b55b340bbd1d diff --git a/installers/olm/config/community/kustomization.yaml b/examples/pgadmin/kustomization.yaml similarity index 62% rename from installers/olm/config/community/kustomization.yaml rename to examples/pgadmin/kustomization.yaml index a34c7b4844..600eb8b82d 100644 --- a/installers/olm/config/community/kustomization.yaml +++ b/examples/pgadmin/kustomization.yaml @@ -1,6 +1,7 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization +namespace: postgres-operator + resources: -- ../operator -- ../examples +- pgadmin.yaml diff --git a/examples/pgadmin/pgadmin.yaml b/examples/pgadmin/pgadmin.yaml new file mode 100644 index 0000000000..b87856aa86 --- /dev/null +++ b/examples/pgadmin/pgadmin.yaml @@ -0,0 +1,19 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGAdmin +metadata: + name: rhino +spec: + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + serverGroups: + - name: supply + # An empty selector selects all postgresclusters in the Namespace + postgresClusterSelector: {} + - name: demand + postgresClusterSelector: + matchLabels: + postgres-operator.crunchydata.com/cluster: hippo diff --git a/examples/postgrescluster/kustomization.yaml b/examples/postgrescluster/kustomization.yaml index 240a6e2067..7035765b87 100644 --- a/examples/postgrescluster/kustomization.yaml +++ b/examples/postgrescluster/kustomization.yaml @@ -1,3 +1,6 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + namespace: postgres-operator resources: diff --git a/examples/postgrescluster/postgrescluster.yaml b/examples/postgrescluster/postgrescluster.yaml index f65b1f3e94..75756af94e 100644 --- a/examples/postgrescluster/postgrescluster.yaml +++ b/examples/postgrescluster/postgrescluster.yaml @@ -3,25 +3,23 @@ kind: PostgresCluster metadata: name: hippo spec: - image: registry.developers.crunchydata.com/crunchydata/crunchy-postgres-ha:centos8-13.4-0 - postgresVersion: 13 + postgresVersion: 16 instances: - name: instance1 dataVolumeClaimSpec: accessModes: - - "ReadWriteMany" + - "ReadWriteOnce" resources: requests: storage: 1Gi backups: pgbackrest: - image: registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:centos8-2.33-2 repos: - name: repo1 volume: volumeClaimSpec: accessModes: - - "ReadWriteMany" + - "ReadWriteOnce" resources: requests: storage: 1Gi @@ -29,10 +27,9 @@ spec: volume: volumeClaimSpec: accessModes: - - "ReadWriteMany" + - "ReadWriteOnce" resources: requests: storage: 1Gi proxy: - pgBouncer: - image: registry.developers.crunchydata.com/crunchydata/crunchy-pgbouncer:centos8-1.15-2 + pgBouncer: {} diff --git a/go.mod b/go.mod index 307e51508b..d268d66018 100644 --- a/go.mod +++ b/go.mod @@ -1,27 +1,96 @@ module github.com/crunchydata/postgres-operator -go 1.15 +go 1.22.0 require ( - github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c // indirect - github.com/evanphx/json-patch v4.9.0+incompatible - github.com/go-logr/logr v0.4.0 - github.com/google/go-cmp v0.5.4 - github.com/onsi/ginkgo v1.14.1 - github.com/onsi/gomega v1.11.0 + github.com/go-logr/logr v1.4.2 + github.com/golang-jwt/jwt/v5 v5.2.1 + github.com/google/go-cmp v0.6.0 + github.com/google/uuid v1.6.0 + github.com/kubernetes-csi/external-snapshotter/client/v8 v8.0.0 + github.com/onsi/ginkgo/v2 v2.17.2 + github.com/onsi/gomega v1.33.1 + github.com/pganalyze/pg_query_go/v5 v5.1.0 github.com/pkg/errors v0.9.1 - github.com/sirupsen/logrus v1.6.0 - github.com/wojas/genericr v0.2.0 - github.com/xdg/stringprep v1.0.0 - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.14.0 - go.opentelemetry.io/otel v0.14.0 - go.opentelemetry.io/otel/exporters/stdout v0.14.0 - go.opentelemetry.io/otel/exporters/trace/jaeger v0.14.0 - golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 - gotest.tools/v3 v3.0.3 - k8s.io/api v0.20.8 - k8s.io/apimachinery v0.20.8 - k8s.io/client-go v0.20.8 - sigs.k8s.io/controller-runtime v0.8.3 - sigs.k8s.io/yaml v1.2.0 + github.com/sirupsen/logrus v1.9.3 + github.com/xdg-go/stringprep v1.0.2 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 + go.opentelemetry.io/otel v1.27.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.2.0 + go.opentelemetry.io/otel/sdk v1.27.0 + go.opentelemetry.io/otel/trace v1.27.0 + golang.org/x/crypto v0.27.0 + gotest.tools/v3 v3.1.0 + k8s.io/api v0.30.2 + k8s.io/apimachinery v0.30.2 + k8s.io/client-go v0.30.2 + k8s.io/component-base v0.30.2 + sigs.k8s.io/controller-runtime v0.18.4 + sigs.k8s.io/yaml v1.4.0 +) + +require ( + github.com/beorn7/perks v1.0.1 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/emicklei/go-restful/v3 v3.12.1 // indirect + github.com/evanphx/json-patch v5.6.0+incompatible // indirect + github.com/evanphx/json-patch/v5 v5.9.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect + github.com/go-openapi/jsonreference v0.21.0 // indirect + github.com/go-openapi/swag v0.23.0 // indirect + github.com/go-task/slim-sprig/v3 v3.0.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 // indirect + github.com/gorilla/websocket v1.5.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect + github.com/imdario/mergo v0.3.16 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/moby/spdystream v0.2.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect + github.com/prometheus/client_golang v1.19.1 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.54.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect + github.com/spf13/pflag v1.0.5 // indirect + go.opentelemetry.io/otel/metric v1.27.0 // indirect + go.opentelemetry.io/proto/otlp v1.3.1 // indirect + golang.org/x/exp v0.0.0-20240604190554-fc45aab8b7f8 // indirect + golang.org/x/net v0.29.0 // indirect + golang.org/x/oauth2 v0.21.0 // indirect + golang.org/x/sys v0.25.0 // indirect + golang.org/x/term v0.24.0 // indirect + golang.org/x/text v0.18.0 // indirect + golang.org/x/time v0.5.0 // indirect + golang.org/x/tools v0.22.0 // indirect + gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240610135401-a8a62080eff3 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect + google.golang.org/grpc v1.66.2 // indirect + google.golang.org/protobuf v1.34.2 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/apiextensions-apiserver v0.30.2 // indirect + k8s.io/klog/v2 v2.120.1 // indirect + k8s.io/kube-openapi v0.0.0-20240521193020-835d969ad83a // indirect + k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect ) diff --git a/go.sum b/go.sum index a92849ca85..aed2056f6f 100644 --- a/go.sum +++ b/go.sum @@ -1,825 +1,251 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0 h1:Dg9iHVQfrhq82rUNu9ZxUDrJLaxFUe/HlCVaLyRruq8= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= -github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= -github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= -github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= -github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= -github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/DataDog/sketches-go v0.0.1 h1:RtG+76WKgZuz6FIaGsjoPePmadDBkuD/KC6+ZWu78b8= -github.com/DataDog/sketches-go v0.0.1/go.mod h1:Q5DbzQ+3AkgGwymQO7aZFNP7ns2lZKGtvRBzRXfdi60= -github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/apache/thrift v0.13.0 h1:5hryIiq9gtn+MiLVn0wP37kb/uTeRZgN08WoCsAhIhI= -github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/benbjohnson/clock v1.0.3 h1:vkLuvpK4fmtSCuo60+yC63p7y0BmQ8gm5ZXGuBCJyXg= -github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= -github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= -github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c h1:ZfSZ3P3BedhKGUhzj7BQlPSU4OvT6tfOKe3DVHzOA7s= -github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses= -github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/felixge/httpsnoop v1.0.1 h1:lvB5Jl89CsZtGIWuTcDM1E/vkVs49/Ml7JJe07l8SPQ= -github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v0.3.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc= -github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/zapr v0.2.0 h1:v6Ji8yBW77pva6NkJKQdHLAJKrIJKRHz0RXwPqCHSR4= -github.com/go-logr/zapr v0.2.0/go.mod h1:qhKdvif7YF5GI9NWEpyxTSSBdGmzkNguibrdCNVPunU= -github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= -github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= -github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU= +github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= +github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= +github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= +github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= +github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= +github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= -github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= -github.com/googleapis/gnostic v0.5.1 h1:A8Yhf6EtqTv9RMsU6MQTyrtV1TjWlR6xU9BsZIwuTCM= -github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 h1:k7nVchz72niMH6YLQNvHSdIE7iqsQxK1P41mySCvssg= +github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= -github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.10 h1:6q5mVkdH/vYmqngx7kZQTjJ5HRsx+ImorDIEQ+beJgc= -github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= -github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= +github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kubernetes-csi/external-snapshotter/client/v8 v8.0.0 h1:mjQG0Vakr2h246kEDR85U8y8ZhPgT3bguTCajRa/jaw= +github.com/kubernetes-csi/external-snapshotter/client/v8 v8.0.0/go.mod h1:E3vdYxHj2C2q6qo8/Da4g7P+IcwqRZyy3gJBzYybV9Y= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= +github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.14.1 h1:jMU0WaQrP0a/YAEq8eJmJKjBoMs+pClEr1vDMlM/Do4= -github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.11.0 h1:+CqWgvj0OZycCaqclBD1pxKHAU+tOkHmQIWvDHq2aug= -github.com/onsi/gomega v1.11.0/go.mod h1:azGKhqFUon9Vuj0YmTfLSmx0FUwqXYSTl5re8lQLTUg= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/onsi/ginkgo/v2 v2.17.2 h1:7eMhcy3GimbsA3hEnVKdw/PQM9XN9krpKVXsZdph0/g= +github.com/onsi/ginkgo/v2 v2.17.2/go.mod h1:nP2DPOQoNsQmsVyv5rDA8JkXQoCs6goXIvr/PRJ1eCc= +github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= +github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= +github.com/pganalyze/pg_query_go/v5 v5.1.0 h1:MlxQqHZnvA3cbRQYyIrjxEjzo560P6MyTgtlaf3pmXg= +github.com/pganalyze/pg_query_go/v5 v5.1.0/go.mod h1:FsglvxidZsVN+Ltw3Ai6nTgPVcK2BPukH3jCDEqc1Ug= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.7.1 h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNjaVUwnjpdpA= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.10.0 h1:RyRA7RzGXQZiW+tGMr7sxa85G1z0yOpM1qq5c8lNawc= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.2.0 h1:wH4vA7pcjKuZzjF7lM8awk4fnuJO6idemZXoKnULUx4= -github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= +github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.54.0 h1:ZlZy0BgJhTwVZUn7dLOkwCZHUkrAqd3WYtcFCWnM1D8= +github.com/prometheus/common v0.54.0/go.mod h1:/TQgMJP5CuVYveyT7n/0Ix8yLNNXy9yRSkhnLTHPDIQ= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= -github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/wojas/genericr v0.2.0 h1:pr3jrA2dnChfwK0IlKNnu/OnLQATNL+mK1Ft94RtDWc= -github.com/wojas/genericr v0.2.0/go.mod h1:I+Dk5IWkJB1eAc/qh3Ry/zIp5TvkrTp+OYbhhjclYr8= -github.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0= -github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/xdg-go/stringprep v1.0.2 h1:6iq84/ryjjeRmMJwxutI51F2GIPlP5BfTvXHeYjyhBc= +github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= -go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opentelemetry.io/contrib v0.14.0 h1:ntrQmEKqYQL6z2YNCk+3Cg4lpJwd9aHK/JMOFpda8yc= -go.opentelemetry.io/contrib v0.14.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.14.0 h1:f7M+R7vO1Q8hq29huD14olXE9Seor47BjPzs1p+VW38= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.14.0/go.mod h1:Rw8yZpEGuffGoRJ8yoxjvQd3qZZuWfDj163NEfux2sw= -go.opentelemetry.io/otel v0.14.0 h1:YFBEfjCk9MTjaytCNSUkp9Q8lF7QJezA06T71FbQxLQ= -go.opentelemetry.io/otel v0.14.0/go.mod h1:vH5xEuwy7Rts0GNtsCW3HYQoZDY+OmBJ6t1bFGGlxgw= -go.opentelemetry.io/otel/exporters/stdout v0.14.0 h1:gDMMj9fo1V70W5EImpnK3chkhk+xE193slrvofXYHDM= -go.opentelemetry.io/otel/exporters/stdout v0.14.0/go.mod h1:KG9w470+KbZZexYbC/g3TPKgluS0VgBJHh4KlnJpG18= -go.opentelemetry.io/otel/exporters/trace/jaeger v0.14.0 h1:bZBrD38x5mIcgJfX7pb9IstnOYCyOwjst1zwozk05Yo= -go.opentelemetry.io/otel/exporters/trace/jaeger v0.14.0/go.mod h1:UnksyFj1mLMRzy64G8Is+joarp8tUHIxJlQQJ4PHVgI= -go.opentelemetry.io/otel/sdk v0.14.0 h1:Pqgd85y5XhyvHQlOxkKW+FD4DAX7AoeaNIDKC2VhfHQ= -go.opentelemetry.io/otel/sdk v0.14.0/go.mod h1:kGO5pEMSNqSJppHAm8b73zztLxB5fgDQnD56/dl5xqE= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= -go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0= -go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= -go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= -go.uber.org/zap v1.8.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.15.0 h1:ZZCA22JRF2gQE5FoNmhmrf7jeJJ2uhqDUNRYKm8dvmM= -go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= +go.opentelemetry.io/otel v1.2.0/go.mod h1:aT17Fk0Z1Nor9e0uisf98LrntPGMnk4frBO9+dkf69I= +go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg= +go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 h1:R9DE4kQ4k+YtfLI2ULwX82VtNQ2J8yZmA7ZIF/D+7Mc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0/go.mod h1:OQFyQVrDlbe+R7xrEyDr/2Wr67Ol0hRUgsfA+V5A95s= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 h1:QY7/0NeRPKlzusf40ZE4t1VlMKbqSNT7cJRYzWuja0s= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0/go.mod h1:HVkSiDhTM9BoUJU8qE6j2eSWLLXvi1USXjyd2BXT8PY= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.2.0 h1:OiYdrCq1Ctwnovp6EofSPwlp5aGy4LgKNbkg7PtEUw8= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.2.0/go.mod h1:DUFCmFkXr0VtAHl5Zq2JRx24G6ze5CAq8YfdD36RdX8= +go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik= +go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak= +go.opentelemetry.io/otel/sdk v1.2.0/go.mod h1:jNN8QtpvbsKhgaC6V5lHiejMoKD+V8uadoSafgHPx1U= +go.opentelemetry.io/otel/sdk v1.27.0 h1:mlk+/Y1gLPLn84U4tI8d3GNJmGT/eXe3ZuOXN9kTWmI= +go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A= +go.opentelemetry.io/otel/trace v1.2.0/go.mod h1:N5FLswTubnxKxOJHM7XZC074qpeEdLy3CgAVsdMucK0= +go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw= +go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4= +go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= +go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= +go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 h1:hb9wdF1z5waM+dSIICn1l0DkLVDT3hqhhQsDNUmHPRE= -golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= +golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= +golang.org/x/exp v0.0.0-20240604190554-fc45aab8b7f8 h1:LoYXNGAShUG3m/ehNk4iFctuhGX/+R1ZpfJ4/ia80JM= +golang.org/x/exp v0.0.0-20240604190554-fc45aab8b7f8/go.mod h1:jj3sYF3dwk5D+ghuXyeI3r5MFf+NT2An6/9dOA95KSI= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb h1:eBmm0M9fYhWpKZLjQUUKka/LtIxf46G4fxeEz5KJr9U= -golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43 h1:ld7aEMNHoBnnDAX15v1T6z31v8HwR2A9FYOuAhWqkwc= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= +golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= +golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201112073958-5cba982894dd h1:5CtCZbICpIOFdgO940moixOPjc0178IU44m4EjOO5IY= -golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= +golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s= -golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a h1:CB3a9Nez8M13wwlr/E2YtwoU+qYHKfC+JrDa45RXXoQ= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA= +golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gomodules.xyz/jsonpatch/v2 v2.1.0 h1:Phva6wqu+xR//Njw6iorylFFgn/z547tw5Ne3HZPQ+k= -gomodules.xyz/jsonpatch/v2 v2.1.0/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.32.0 h1:Le77IccnTqEa8ryp9wIpX5W3zYm7Gf9LhOp9PHcwFts= -google.golang.org/api v0.32.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= +gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +google.golang.org/genproto/googleapis/api v0.0.0-20240610135401-a8a62080eff3 h1:QW9+G6Fir4VcRXVH8x3LilNAb6cxBGLa6+GM4hRwexE= +google.golang.org/genproto/googleapis/api v0.0.0-20240610135401-a8a62080eff3/go.mod h1:kdrSS/OiLkPrNUpzD4aHgCq2rVuC/YRxok32HXZ4vRE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/grpc v1.66.2 h1:3QdXkuq3Bkh7w+ywLdLvM56cmGvQHUMZpiCzt6Rqaoo= +google.golang.org/grpc v1.66.2/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= -gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= -gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= -gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4 h1:UoveltGrhghAA7ePc+e+QYDHXrBps2PqFZiHkGR/xK8= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= -k8s.io/api v0.20.2/go.mod h1:d7n6Ehyzx+S+cE3VhTGfVNNqtGc/oL9DCdYYahlurV8= -k8s.io/api v0.20.8 h1:UwbT15oAJ1jaWxkHYWtjxuVEu2CvRiaTz1udlU7ybYI= -k8s.io/api v0.20.8/go.mod h1:blZHVhFokrHWei9SvRTS3ocPWbi2YJgi6T+wC/mhe6k= -k8s.io/apiextensions-apiserver v0.20.1 h1:ZrXQeslal+6zKM/HjDXLzThlz/vPSxrfK3OqL8txgVQ= -k8s.io/apiextensions-apiserver v0.20.1/go.mod h1:ntnrZV+6a3dB504qwC5PN/Yg9PBiDNt1EVqbW2kORVk= -k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= -k8s.io/apimachinery v0.20.2/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= -k8s.io/apimachinery v0.20.8 h1:EBP8Q2JVl+HgwydgAXxRM4sAzSeawH34Z4xusK2+CbY= -k8s.io/apimachinery v0.20.8/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= -k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= -k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= -k8s.io/client-go v0.20.2/go.mod h1:kH5brqWqp7HDxUFKoEgiI4v8G1xzbe9giaCenUWJzgE= -k8s.io/client-go v0.20.8 h1:ewvQIQDqUkQVajs06zzKErH/qpYcHaMvz+P7AF7nsWs= -k8s.io/client-go v0.20.8/go.mod h1:ufY6eLPP3u1Xjc3YjvzyXbYwtGVKMNyeH3m7oA/2s/w= -k8s.io/code-generator v0.20.1/go.mod h1:UsqdF+VX4PU2g46NC2JRs4gc+IfrctnwHb76RNbWHJg= -k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= -k8s.io/component-base v0.20.2 h1:LMmu5I0pLtwjpp5009KLuMGFqSc2S2isGw8t1hpYKLE= -k8s.io/component-base v0.20.2/go.mod h1:pzFtCiwe/ASD0iV7ySMu8SYVJjCapNM9bjvk7ptpKh0= -k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.4.0 h1:7+X0fUguPyrKEC4WjH8iGDg3laWgMo5tMnRTIGTTxGQ= -k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd h1:sOHNzJIkytDF6qadMNKhhDRpc6ODik8lVC6nOur7B2c= -k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= -k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20210111153108-fddb29f9d009 h1:0T5IaWHO3sJTEmCP6mUlBvMukxPKUQWqiI/YuiBNMiQ= -k8s.io/utils v0.0.0-20210111153108-fddb29f9d009/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= -sigs.k8s.io/controller-runtime v0.8.3 h1:GMHvzjTmaWHQB8HadW+dIvBoJuLvZObYJ5YoZruPRao= -sigs.k8s.io/controller-runtime v0.8.3/go.mod h1:U/l+DUopBc1ecfRZ5aviA9JDmGFQKvLf5YkZNx2e0sU= -sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.0.3 h1:4oyYo8NREp49LBBhKxEqCulFjg26rawYKrnCmg+Sr6c= -sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= -sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.1.0 h1:rVV8Tcg/8jHUkPUorwjaMTtemIMVXfIPKiOqnhEhakk= +gotest.tools/v3 v3.1.0/go.mod h1:fHy7eyTmJFO5bQbUsEGQ1v4m2J3Jz9eWL54TP2/ZuYQ= +k8s.io/api v0.30.2 h1:+ZhRj+28QT4UOH+BKznu4CBgPWgkXO7XAvMcMl0qKvI= +k8s.io/api v0.30.2/go.mod h1:ULg5g9JvOev2dG0u2hig4Z7tQ2hHIuS+m8MNZ+X6EmI= +k8s.io/apiextensions-apiserver v0.30.2 h1:l7Eue2t6QiLHErfn2vwK4KgF4NeDgjQkCXtEbOocKIE= +k8s.io/apiextensions-apiserver v0.30.2/go.mod h1:lsJFLYyK40iguuinsb3nt+Sj6CmodSI4ACDLep1rgjw= +k8s.io/apimachinery v0.30.2 h1:fEMcnBj6qkzzPGSVsAZtQThU62SmQ4ZymlXRC5yFSCg= +k8s.io/apimachinery v0.30.2/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= +k8s.io/client-go v0.30.2 h1:sBIVJdojUNPDU/jObC+18tXWcTJVcwyqS9diGdWHk50= +k8s.io/client-go v0.30.2/go.mod h1:JglKSWULm9xlJLx4KCkfLLQ7XwtlbflV6uFFSHTMgVs= +k8s.io/component-base v0.30.2 h1:pqGBczYoW1sno8q9ObExUqrYSKhtE5rW3y6gX88GZII= +k8s.io/component-base v0.30.2/go.mod h1:yQLkQDrkK8J6NtP+MGJOws+/PPeEXNpwFixsUI7h/OE= +k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= +k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20240521193020-835d969ad83a h1:zD1uj3Jf+mD4zmA7W+goE5TxDkI7OGJjBNBzq5fJtLA= +k8s.io/kube-openapi v0.0.0-20240521193020-835d969ad83a/go.mod h1:UxDHUPsUwTOOxSU+oXURfFBcAS6JwiRXTYqYwfuGowc= +k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 h1:jgGTlFYnhF1PM1Ax/lAlxUPE+KfCIXHaathvJg1C3ak= +k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/controller-runtime v0.18.4 h1:87+guW1zhvuPLh1PHybKdYFLU0YJp4FhJRmiHvm5BZw= +sigs.k8s.io/controller-runtime v0.18.4/go.mod h1:TVoGrfdpbA9VRFaRnKgk9P5/atA0pMwq+f+msb9M8Sg= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/hack/boilerplate.go.txt b/hack/boilerplate.go.txt index 3d47189be2..7fc3d63c10 100644 --- a/hack/boilerplate.go.txt +++ b/hack/boilerplate.go.txt @@ -1,15 +1,3 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 diff --git a/hack/config_sync.sh b/hack/config_sync.sh deleted file mode 100755 index 4767747c2e..0000000000 --- a/hack/config_sync.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/bash - -# Copyright 2021 Crunchy Data Solutions, Inc. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -test="${PGOROOT:?Need to set PGOROOT env variable}" - -# sync a master config file with kubectl and helm installers -sync_config() { - - KUBECTL_SPEC_PREFIX=$1 - INSTALLER_ROOT=$2 - MASTER_CONFIG=$3 - - yq write --inplace --doc 2 "$INSTALLER_ROOT/kubectl/$KUBECTL_SPEC_PREFIX.yml" 'data"values.yaml"' -- "$(cat $MASTER_CONFIG)" - yq write --inplace --doc 2 "$INSTALLER_ROOT/kubectl/$KUBECTL_SPEC_PREFIX-ocp311.yml" 'data"values.yaml"' -- "$(cat $MASTER_CONFIG)" - - cat "$INSTALLER_ROOT/helm/helm_template.yaml" "$MASTER_CONFIG" > "$INSTALLER_ROOT/helm/values.yaml" -} - -# sync operator configuration -sync_config "postgres-operator" "$PGOROOT/installers" "$PGOROOT/installers/ansible/values.yaml" - -# sync metrics configuration -sync_config "postgres-operator-metrics" "$PGOROOT/installers/metrics" "$PGOROOT/installers/metrics/ansible/values.yaml" - -echo "Configuration sync complete" diff --git a/hack/controller-generator.sh b/hack/controller-generator.sh deleted file mode 100755 index aff160e5c6..0000000000 --- a/hack/controller-generator.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2021 Crunchy Data Solutions, Inc. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -eu - -# Create and cleanup a temporary directory. -DIR="$(mktemp -d)" -trap "rm -rf '$DIR'" EXIT - -# Find the Go install path. -[ "${GOBIN:-}" ] || GOBIN="$(go env GOBIN)" -[ "${GOBIN:-}" ] || GOBIN="$(go env GOPATH)/bin" - -# Find `controller-gen` on the current PATH or install it to the Go install path. -tool="$(command -v controller-gen || true)" -[ -n "$tool" ] || tool="$GOBIN/controller-gen" -[ -x "$tool" ] || ( cd "$DIR" && go mod init tmp && go get 'sigs.k8s.io/controller-tools/cmd/controller-gen@v0.4.1' ) - -"$tool" "$@" diff --git a/hack/create-kubeconfig.sh b/hack/create-kubeconfig.sh index 276890c62c..3bebcd194e 100755 --- a/hack/create-kubeconfig.sh +++ b/hack/create-kubeconfig.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# Copyright 2021 Crunchy Data Solutions, Inc. +# Copyright 2021 - 2024 Crunchy Data Solutions, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -32,10 +32,13 @@ kubeconfig="${directory}/${namespace}/${account}" mkdir -p "${directory}/${namespace}" kubectl config view --minify --raw > "${kubeconfig}" -# grab the service account token -token=$(kubectl get secret -n "${namespace}" -o go-template=' +# Grab the service account token. If one has not already been generated, +# create a secret to do so. See the LegacyServiceAccountTokenNoAutoGeneration +# feature gate. +for i in 1 2 3 4; do + token=$(kubectl get secret -n "${namespace}" -o go-template=' {{- range .items }} - {{- if and (eq (or .type "") "kubernetes.io/service-account-token") .metadata.annotations }} + {{- if and (eq (or .type "") "kubernetes.io/service-account-token") .metadata.annotations .data }} {{- if (eq (or (index .metadata.annotations "kubernetes.io/service-account.name") "") "'"${account}"'") }} {{- if (ne (or (index .metadata.annotations "kubernetes.io/created-by") "") "openshift.io/create-dockercfg-secrets") }} {{- .data.token | base64decode }} @@ -43,6 +46,22 @@ token=$(kubectl get secret -n "${namespace}" -o go-template=' {{- end }} {{- end }} {{- end }}') + + [[ -n "${token}" ]] && break + + kubectl apply -n "${namespace}" --server-side --filename=- <<< " +apiVersion: v1 +kind: Secret +type: kubernetes.io/service-account-token +metadata: { + name: ${account}-token, + annotations: { kubernetes.io/service-account.name: ${account} } +}" + # If we are on our third or fourth loop, try sleeping to give kube time to create the token + if [ $i -gt 2 ]; then + sleep $(($i-2)) + fi +done kubectl config --kubeconfig="${kubeconfig}" set-credentials "${account}" --token="${token}" # remove any namespace setting, replace the username, and minify once more diff --git a/hack/generate-rbac.sh b/hack/generate-rbac.sh deleted file mode 100755 index 4148c0daff..0000000000 --- a/hack/generate-rbac.sh +++ /dev/null @@ -1,72 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2021 Crunchy Data Solutions, Inc. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -eu - -declare -r paths="$1" directory="$2" - -# Use `controller-gen` to parse Go markers. -( set -x -"${BASH_SOURCE[0]%/*}/controller-generator.sh" \ - rbac:roleName='generated' \ - paths="${paths}" \ - output:dir="${directory}" # ${directory}/role.yaml -) - -# NOTE(cbandy): `kustomize` v4.1 and `kubectl` v1.22 will be able to change the -# kind of a resource: https://pr.k8s.io/101120 -ruby -r 'set' -r 'yaml' -e ' -directory = ARGV[0] -roles = YAML.load_stream(IO.read(File.join(directory, "role.yaml"))) -operator = roles.shift - -abort "Expected the operator ClusterRole first!" unless operator and operator["kind"] == "ClusterRole" - -# The client used by the controller sets up a cache and an informer for any GVK -# that it GETs. That informer needs the "watch" permission. -# - https://github.com/kubernetes-sigs/controller-runtime/issues/1249 -# - https://github.com/kubernetes-sigs/controller-runtime/issues/1454 -# TODO(cbandy): Move this into an RBAC marker when it can be configured on the Manager. -operator["rules"].each do |rule| - verbs = rule["verbs"].to_set - rule["verbs"] = verbs.add("watch").sort if verbs.intersect? Set["get", "list"] -end - -# Combine the other parsed Roles into the ClusterRole. -rules = operator["rules"] + roles.flat_map { |role| role["rules"] } -rules = rules. - group_by { |rule| rule.slice("apiGroups", "resources") }. - map do |(group_resource, rules)| - verbs = rules.flat_map { |rule| rule["verbs"] }.to_set.sort - group_resource.merge("verbs" => verbs) - end -operator["rules"] = rules.sort_by { |rule| rule.to_a } - -# Combine resources that have the same verbs. -rules = operator["rules"]. - group_by { |rule| rule.slice("apiGroups", "verbs") }. - map do |(group_verb, rules)| - resources = rules.flat_map { |rule| rule["resources"] }.to_set.sort - rule = group_verb.merge("resources" => resources) - rule.slice("apiGroups", "resources", "verbs") # keep the keys in order - end -operator["rules"] = rules.sort_by { |rule| rule.to_a } - -operator["metadata"] = { "name" => "postgres-operator" } -IO.write(File.join(directory, "cluster", "role.yaml"), YAML.dump(operator)) - -operator["kind"] = "Role" -IO.write(File.join(directory, "namespace", "role.yaml"), YAML.dump(operator)) -' -- "${directory}" diff --git a/hack/update-pgmonitor-installer.sh b/hack/update-pgmonitor-installer.sh new file mode 100755 index 0000000000..148a4761c9 --- /dev/null +++ b/hack/update-pgmonitor-installer.sh @@ -0,0 +1,76 @@ +#!/usr/bin/env bash + +# Copyright 2022 - 2024 Crunchy Data Solutions, Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script updates the Kustomize installer for monitoring with the latest Grafana, +# Prometheus and Alert Manager configuration per the pgMonitor tag specified + +directory=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) + +# The pgMonitor tag to use to refresh the current monitoring installer +pgmonitor_tag=v4.8.1 + +# Set the directory for the monitoring Kustomize installer +pgo_examples_monitoring_dir="${directory}/../../postgres-operator-examples/kustomize/monitoring" + +# Create a tmp directory for checking out the pgMonitor tag +tmp_dir="${directory}/pgmonitor_tmp/" +mkdir -p "${tmp_dir}" + +# Clone the pgMonitor repo and checkout the tag provided +git -C "${tmp_dir}" clone https://github.com/CrunchyData/pgmonitor.git +cd "${tmp_dir}/pgmonitor" +git checkout "${pgmonitor_tag}" + +# Deviation from pgMonitor default! +# Update "${DS_PROMETHEUS}" to "PROMETHEUS" in all containers dashboards +find "grafana/containers" -type f -exec \ + sed -i 's/${DS_PROMETHEUS}/PROMETHEUS/' {} \; +# Copy Grafana dashboards for containers +cp -r "grafana/containers/." "${pgo_examples_monitoring_dir}/config/grafana/dashboards" + +# Deviation from pgMonitor default! +# Update the dashboard location to the default for the Grafana container. +sed -i 's#/etc/grafana/crunchy_dashboards#/etc/grafana/provisioning/dashboards#' \ + "grafana/linux/crunchy_grafana_dashboards.yml" +cp "grafana/linux/crunchy_grafana_dashboards.yml" "${pgo_examples_monitoring_dir}/config/grafana" + +# Deviation from pgMonitor default! +# Update the URL for the Grafana data source configuration to use env vars for the Prometheus host +# and port. +sed -i 's#localhost:9090#$PROM_HOST:$PROM_PORT#' \ + "grafana/common/crunchy_grafana_datasource.yml" +cp "grafana/common/crunchy_grafana_datasource.yml" "${pgo_examples_monitoring_dir}/config/grafana" + +# Deviation from pgMonitor default! +# Update the URL for the Grafana data source configuration to use env vars for the Prometheus host +# and port. +cp "prometheus/containers/crunchy-prometheus.yml.containers" "prometheus/containers/crunchy-prometheus.yml" +cat << EOF >> prometheus/containers/crunchy-prometheus.yml +alerting: + alertmanagers: + - scheme: http + static_configs: + - targets: + - "crunchy-alertmanager:9093" +EOF +cp "prometheus/containers/crunchy-prometheus.yml" "${pgo_examples_monitoring_dir}/config/prometheus" + +# Copy the default Alert Manager configuration +cp "alertmanager/common/crunchy-alertmanager.yml" "${pgo_examples_monitoring_dir}/config/alertmanager" +cp "prometheus/containers/alert-rules.d/crunchy-alert-rules-pg.yml.containers.example" \ + "${pgo_examples_monitoring_dir}/config/alertmanager/crunchy-alert-rules-pg.yml" + +# Cleanup any temporary resources +rm -rf "${tmp_dir}" diff --git a/img/CrunchyDataPrimaryIcon.png b/img/CrunchyDataPrimaryIcon.png new file mode 100644 index 0000000000..e238a688dd Binary files /dev/null and b/img/CrunchyDataPrimaryIcon.png differ diff --git a/installers/favicon.png b/installers/favicon.png deleted file mode 100644 index 66ce2072e9..0000000000 Binary files a/installers/favicon.png and /dev/null differ diff --git a/installers/gcp-marketplace/Dockerfile b/installers/gcp-marketplace/Dockerfile deleted file mode 100644 index adf85a355a..0000000000 --- a/installers/gcp-marketplace/Dockerfile +++ /dev/null @@ -1,44 +0,0 @@ -ARG MARKETPLACE_VERSION -FROM gcr.io/cloud-marketplace-tools/k8s/deployer_envsubst:${MARKETPLACE_VERSION} AS build - -# Verify Bash (>= 4.3) has `wait -n` -RUN bash -c 'echo -n & wait -n' - - -FROM gcr.io/cloud-marketplace-tools/k8s/deployer_envsubst:${MARKETPLACE_VERSION} - -RUN install -D /bin/create_manifests.sh /opt/postgres-operator/cloud-marketplace-tools/bin/create_manifests.sh - -# https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html#installing-ansible-on-debian -RUN if [ -f /etc/os-release ] && [ debian = "$(. /etc/os-release; echo $ID)" ] && [ 10 -ge "$(. /etc/os-release; echo $VERSION_ID)" ]; then \ - apt-get update && apt-get install -y --no-install-recommends gnupg && rm -rf /var/lib/apt/lists/* && \ - wget -qO- 'https://keyserver.ubuntu.com/pks/lookup?op=get&search=0x93C4A3FD7BB9C367' | apt-key add && \ - echo > /etc/apt/sources.list.d/ansible.list deb http://ppa.launchpad.net/ansible/ansible-2.9/ubuntu trusty main ; \ - fi - -RUN apt-get update \ - && apt-get install -y --no-install-recommends ansible=2.9.* openssh-client \ - && rm -rf /var/lib/apt/lists/* - -COPY installers/ansible/* \ - /opt/postgres-operator/ansible/ -COPY installers/favicon.png \ - installers/gcp-marketplace/install-job.yaml \ - installers/gcp-marketplace/install.sh \ - installers/gcp-marketplace/values.yaml \ - /opt/postgres-operator/ - -COPY installers/gcp-marketplace/install-hook.sh \ - /bin/create_manifests.sh -COPY installers/gcp-marketplace/schema.yaml \ - /data/ -COPY installers/gcp-marketplace/application.yaml \ - /data/manifest/ -COPY installers/gcp-marketplace/test-pod.yaml \ - /data-test/manifest/ - -ARG PGO_VERSION -RUN for file in \ - /data/schema.yaml \ - /data/manifest/application.yaml \ - ; do envsubst '$PGO_VERSION' < "$file" > /tmp/sponge && mv /tmp/sponge "$file" ; done diff --git a/installers/gcp-marketplace/Makefile b/installers/gcp-marketplace/Makefile deleted file mode 100644 index 5f4f0c6eb1..0000000000 --- a/installers/gcp-marketplace/Makefile +++ /dev/null @@ -1,55 +0,0 @@ -.DEFAULT_GOAL := help - -DEPLOYER_IMAGE ?= registry.localhost:5000/postgres-operator-gcp-marketplace-deployer:$(PGO_VERSION) -IMAGE_BUILDER ?= buildah -MARKETPLACE_TOOLS ?= gcr.io/cloud-marketplace-tools/k8s/dev:$(MARKETPLACE_VERSION) -MARKETPLACE_VERSION ?= 0.9.4 -KUBECONFIG ?= $(HOME)/.kube/config -PARAMETERS ?= {} -PGO_VERSION ?= 4.5.0 - -IMAGE_BUILD_ARGS = --build-arg MARKETPLACE_VERSION='$(MARKETPLACE_VERSION)' \ - --build-arg PGO_VERSION='$(PGO_VERSION)' - -MARKETPLACE_TOOLS_DEV = docker run --net=host --rm \ - --mount 'type=bind,source=/var/run/docker.sock,target=/var/run/docker.sock,readonly' \ - --mount 'type=bind,source=$(KUBECONFIG),target=/mount/config/.kube/config,readonly' \ - '$(MARKETPLACE_TOOLS)' - -# One does _not_ need to be logged in with gcloud. -.PHONY: doctor -doctor: ## Check development prerequisites - $(MARKETPLACE_TOOLS_DEV) doctor - -.PHONY: doctor-fix -doctor-fix: - @# https://github.com/kubernetes-sigs/application/tree/master/config/crds - kubectl 2>/dev/null get crd/applications.app.k8s.io -o jsonpath='{""}' || \ - kubectl create -f https://raw.githubusercontent.com/GoogleCloudPlatform/marketplace-k8s-app-tools/master/crd/app-crd.yaml - -.PHONY: help -help: ALIGN=14 -help: ## Print this message - @awk -F ': ## ' -- "/^[^':]+: ## /"' { printf "'$$(tput bold)'%-$(ALIGN)s'$$(tput sgr0)' %s\n", $$1, $$2 }' $(MAKEFILE_LIST) - -.PHONY: image -image: image-$(IMAGE_BUILDER) - -.PHONY: image-buildah -image-buildah: ## Build the deployer image with Buildah - sudo buildah bud --file Dockerfile --tag '$(DEPLOYER_IMAGE)' $(IMAGE_BUILD_ARGS) --layers ../.. - sudo buildah push '$(DEPLOYER_IMAGE)' docker-daemon:'$(DEPLOYER_IMAGE)' - -.PHONY: image-docker -image-docker: ## Build the deployer image with Docker - docker build --file Dockerfile --tag '$(DEPLOYER_IMAGE)' $(IMAGE_BUILD_ARGS) ../.. - -# PARAMETERS='{"OPERATOR_NAMESPACE": "", "OPERATOR_NAME": "", "OPERATOR_ADMIN_PASSWORD": ""}' -.PHONY: install -install: ## Execute the deployer image in an existing Kubernetes namespace - $(MARKETPLACE_TOOLS_DEV) install --deployer='$(DEPLOYER_IMAGE)' --parameters='$(PARAMETERS)' - -# PARAMETERS='{"OPERATOR_ADMIN_PASSWORD": ""}' -.PHONY: verify -verify: ## Execute and test the deployer image in a new (random) Kubernetes namespace then clean up - $(MARKETPLACE_TOOLS_DEV) verify --deployer='$(DEPLOYER_IMAGE)' --parameters='$(PARAMETERS)' diff --git a/installers/gcp-marketplace/README.md b/installers/gcp-marketplace/README.md deleted file mode 100644 index ef9585e322..0000000000 --- a/installers/gcp-marketplace/README.md +++ /dev/null @@ -1,144 +0,0 @@ -This directory contains the files that are used to install [Crunchy PostgreSQL for GKE][gcp-details], -which uses PGO: the PostgreSQL Operator from [Crunchy Data][crunchy-data], from the Google Cloud Marketplace. - -The integration centers around a container [image](./Dockerfile) that contains an installation -[schema](./schema.yaml) and an [Application][k8s-app] [manifest](./application.yaml). -Consult the [technical requirements][gcp-k8s-requirements] when making changes. - -[crunchy-data]: https://www.crunchydata.com -[k8s-app]: https://github.com/kubernetes-sigs/application/ -[gcp-k8s]: https://cloud.google.com/marketplace/docs/kubernetes-apps/ -[gcp-k8s-requirements]: https://cloud.google.com/marketplace/docs/partners/kubernetes-solutions/create-app-package -[gcp-k8s-tool-images]: https://console.cloud.google.com/gcr/images/cloud-marketplace-tools -[gcp-k8s-tool-repository]: https://github.com/GoogleCloudPlatform/marketplace-k8s-app-tools -[gcp-details]: https://console.cloud.google.com/marketplace/details/crunchydata/crunchy-postgresql-operator - - -# Installation - -## Quick install with Google Cloud Marketplace - -Install [Crunchy PostgreSQL for GKE][gcp-details] to a Google Kubernetes Engine cluster using -Google Cloud Marketplace. - -## Command line instructions - -### Prepare - -1. You'll need the following tools in your development environment. If you are using Cloud Shell, - everything is already installed. - - - envsubst - - [git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git) - - [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/) - -2. Clone this repository. - - ```shell - git clone https://github.com/CrunchyData/postgres-operator.git - ``` - -3. Install the [Application][k8s-app] Custom Resource Definition. - - ```shell - kubectl apply -f 'https://raw.githubusercontent.com/GoogleCloudPlatform/marketplace-k8s-app-tools/master/crd/app-crd.yaml' - ``` - -4. At least one Storage Class is required. Google Kubernetes Engine is preconfigured with a default. - - ```shell - kubectl get storageclasses - ``` - -### Install the PostgreSQL Operator - -1. Configure the installation by setting environment variables. - - 1. Choose a version to install. - - ```shell - IMAGE_REPOSITORY=gcr.io/crunchydata-public/postgres-operator - - export PGO_VERSION=4.5.0 - export INSTALLER_IMAGE=${IMAGE_REPOSITORY}/deployer:${PGO_VERSION} - export OPERATOR_IMAGE=${IMAGE_REPOSITORY}:${PGO_VERSION} - export OPERATOR_IMAGE_API=${IMAGE_REPOSITORY}/pgo-apiserver:${PGO_VERSION} - ``` - - 2. Choose a namespace and name for the application. - - ```shell - export OPERATOR_NAMESPACE=pgo OPERATOR_NAME=pgo - ``` - - 2. Choose a password for the application admin. - - ```shell - export OPERATOR_ADMIN_PASSWORD=changethis - ``` - - 4. Choose default values for new PostgreSQL clusters. - - ```shell - export POSTGRES_METRICS=false - export POSTGRES_SERVICE_TYPE=ClusterIP - export POSTGRES_CPU=1000 # mCPU - export POSTGRES_MEM=2 # GiB - export POSTGRES_STORAGE_CAPACITY=1 # GiB - export POSTGRES_STORAGE_CLASS=ssd - export PGBACKREST_STORAGE_CAPACITY=2 # GiB - export PGBACKREST_STORAGE_CLASS=ssd - export BACKUP_STORAGE_CAPACITY=1 # GiB - export BACKUP_STORAGE_CLASS=ssd - ``` - -2. Prepare the Kubernetes namespace. - - ```shell - export INSTALLER_SERVICE_ACCOUNT=postgres-operator-installer - - kubectl create namespace "$OPERATOR_NAMESPACE" - kubectl create serviceaccount -n "$OPERATOR_NAMESPACE" "$INSTALLER_SERVICE_ACCOUNT" - kubectl create clusterrolebinding \ - "$OPERATOR_NAMESPACE:$INSTALLER_SERVICE_ACCOUNT:cluster-admin" \ - --serviceaccount="$OPERATOR_NAMESPACE:$INSTALLER_SERVICE_ACCOUNT" \ - --clusterrole=cluster-admin - ``` - -3. Generate and apply Kubernetes manifests. - - ```shell - envsubst < application.yaml > "${OPERATOR_NAME}_application.yaml" - envsubst < install-job.yaml > "${OPERATOR_NAME}_install-job.yaml" - envsubst < inventory.ini > "${OPERATOR_NAME}_inventory.ini" - - kubectl create -n "$OPERATOR_NAMESPACE" secret generic install-postgres-operator \ - --from-file=inventory="${OPERATOR_NAME}_inventory.ini" - - kubectl create -n "$OPERATOR_NAMESPACE" -f "${OPERATOR_NAME}_application.yaml" - kubectl create -n "$OPERATOR_NAMESPACE" -f "${OPERATOR_NAME}_install-job.yaml" - ``` - -The application can be seen in Google Cloud Platform Console at [Kubernetes Applications][]. - -[Kubernetes Applications]: https://console.cloud.google.com/kubernetes/application - - -# Uninstallation - -## Using Google Cloud Platform Console - -1. In the Console, open [Kubernetes Applications][]. -2. From the list of applications, select _Crunchy PostgreSQL Operator_ then click _Delete_. - -## Command line instructions - -Delete the Kubernetes resources created during install. - -```shell -export OPERATOR_NAMESPACE=pgo OPERATOR_NAME=pgo - -kubectl delete -n "$OPERATOR_NAMESPACE" job install-postgres-operator -kubectl delete -n "$OPERATOR_NAMESPACE" secret install-postgres-operator -kubectl delete -n "$OPERATOR_NAMESPACE" application "$OPERATOR_NAME" -``` diff --git a/installers/gcp-marketplace/application.yaml b/installers/gcp-marketplace/application.yaml deleted file mode 100644 index 9af6b1642c..0000000000 --- a/installers/gcp-marketplace/application.yaml +++ /dev/null @@ -1,50 +0,0 @@ -apiVersion: app.k8s.io/v1beta1 -kind: Application -metadata: - name: '${OPERATOR_NAME}' - labels: - app.kubernetes.io/name: '${OPERATOR_NAME}' -spec: - selector: - matchLabels: - app.kubernetes.io/name: '${OPERATOR_NAME}' - componentKinds: - - { group: core, kind: ConfigMap } - - { group: core, kind: Secret } - - { group: core, kind: Service } - - { group: apps, kind: Deployment } - - { group: batch, kind: Job } - descriptor: - description: Enterprise PostgreSQL-as-a-Service for Kubernetes - type: Crunchy PostgreSQL Operator - version: '${PGO_VERSION}' - maintainers: - - name: Crunchy Data - url: https://www.crunchydata.com/ - email: info@crunchydata.com - keywords: - - postgres - - postgresql - - database - - sql - - operator - - crunchy data - links: - - description: Crunchy PostgreSQL for Kubernetes - url: https://www.crunchydata.com/products/crunchy-postgresql-for-kubernetes/ - - description: Documentation - url: 'https://access.crunchydata.com/documentation/postgres-operator/${PGO_VERSION}' - - description: GitHub - url: https://github.com/CrunchyData/postgres-operator - - info: - - name: Operator API - value: kubectl port-forward --namespace '${OPERATOR_NAMESPACE}' service/postgres-operator 8443 - - name: Operator Client - value: 'https://github.com/CrunchyData/postgres-operator/releases/tag/v${PGO_VERSION}' - - name: Operator User - type: Reference - valueFrom: { type: SecretKeyRef, secretKeyRef: { name: pgouser-admin, key: username } } - - name: Operator Password - type: Reference - valueFrom: { type: SecretKeyRef, secretKeyRef: { name: pgouser-admin, key: password } } diff --git a/installers/gcp-marketplace/install-hook.sh b/installers/gcp-marketplace/install-hook.sh deleted file mode 100755 index 96688f75ac..0000000000 --- a/installers/gcp-marketplace/install-hook.sh +++ /dev/null @@ -1,48 +0,0 @@ -#!/usr/bin/env bash -# vim: set noexpandtab : -set -eu - -kc() { kubectl --namespace="$NAMESPACE" "$@"; } - -application_ownership="$( kc get "applications.app.k8s.io/$NAME" --output=json )" -application_ownership="$( jq <<< "$application_ownership" '{ metadata: { - labels: { "app.kubernetes.io/name": .metadata.name }, - ownerReferences: [{ - apiVersion, kind, name: .metadata.name, uid: .metadata.uid - }] -} }' )" - -existing="$( kc get deployment/postgres-operator --output=json 2> /dev/null || true )" - -if [ -n "$existing" ]; then - >&2 echo ERROR: Crunchy PostgreSQL Operator is already installed in this namespace - exit 1 -fi - -install_values="$( /bin/config_env.py envsubst < /opt/postgres-operator/values.yaml )" -installer="$( /bin/config_env.py envsubst < /opt/postgres-operator/install-job.yaml )" - -kc create --filename=/dev/stdin <<< "$installer" -kc patch job/install-postgres-operator --type=strategic --patch="$application_ownership" - -job_ownership="$( kc get job/install-postgres-operator --output=json )" -job_ownership="$( jq <<< "$job_ownership" '{ metadata: { - labels: { "app.kubernetes.io/name": .metadata.labels["app.kubernetes.io/name"] }, - ownerReferences: [{ - apiVersion, kind, name: .metadata.name, uid: .metadata.uid - }] -} }' )" - -kc create secret generic install-postgres-operator --from-file=values.yaml=/dev/stdin <<< "$install_values" -kc patch secret/install-postgres-operator --type=strategic --patch="$job_ownership" - -# Wait for either status condition then terminate the other. -kc wait --for=condition=complete --timeout=5m job/install-postgres-operator & -kc wait --for=condition=failed --timeout=5m job/install-postgres-operator & -wait -n -kill -s INT %% 2> /dev/null || true - -kc logs --selector=job-name=install-postgres-operator --tail=-1 -test 'Complete' = "$( kc get job/install-postgres-operator --output=jsonpath='{.status.conditions[*].type}' )" - -exec /opt/postgres-operator/cloud-marketplace-tools/bin/create_manifests.sh "$@" diff --git a/installers/gcp-marketplace/install-job.yaml b/installers/gcp-marketplace/install-job.yaml deleted file mode 100644 index 574aae7b12..0000000000 --- a/installers/gcp-marketplace/install-job.yaml +++ /dev/null @@ -1,23 +0,0 @@ -apiVersion: batch/v1 -kind: Job -metadata: - name: install-postgres-operator - labels: - app.kubernetes.io/name: '${OPERATOR_NAME}' -spec: - template: - spec: - serviceAccountName: '${INSTALLER_SERVICE_ACCOUNT}' - restartPolicy: Never - containers: - - name: installer - image: '${INSTALLER_IMAGE}' - imagePullPolicy: Always - command: ['/opt/postgres-operator/install.sh'] - env: - - { name: NAMESPACE, value: '${OPERATOR_NAMESPACE}' } - - { name: NAME, value: '${OPERATOR_NAME}' } - volumeMounts: - - { mountPath: /etc/ansible, name: configuration } - volumes: - - { name: configuration, secret: { secretName: install-postgres-operator } } diff --git a/installers/gcp-marketplace/install.sh b/installers/gcp-marketplace/install.sh deleted file mode 100755 index 6dc770b993..0000000000 --- a/installers/gcp-marketplace/install.sh +++ /dev/null @@ -1,52 +0,0 @@ -#!/usr/bin/env bash -# vim: set noexpandtab : -set -eu - -kc() { kubectl --namespace="$NAMESPACE" "$@"; } - -application_ownership="$( kc get "applications.app.k8s.io/$NAME" --output=json )" -application_ownership="$( jq <<< "$application_ownership" '{ metadata: { - labels: { "app.kubernetes.io/name": .metadata.name }, - ownerReferences: [{ - apiVersion, kind, name: .metadata.name, uid: .metadata.uid - }] -} }' )" - -existing="$( kc get clusterrole/pgo-cluster-role --output=json 2> /dev/null || true )" - -if [ -n "$existing" ]; then - >&2 echo ERROR: Crunchy PostgreSQL Operator is already installed in another namespace - exit 1 -fi - -application_icon="$( base64 --wrap=0 /opt/postgres-operator/favicon.png )" -application_metadata="$( jq <<< '{}' --arg icon "$application_icon" '{ metadata: { - annotations: { "kubernetes-engine.cloud.google.com/icon": "data:image/png;base64,\($icon)" } -} }' )" - -kc patch "applications.app.k8s.io/$NAME" --type=merge --patch="$application_metadata" - -/usr/bin/ansible-playbook \ - --extra-vars 'kubernetes_in_cluster=true' \ - --extra-vars 'config_path=/etc/ansible/values.yaml' \ - --inventory /opt/postgres-operator/ansible/inventory.yaml \ - --tags=install /opt/postgres-operator/ansible/main.yml - -resources=( - clusterrole/pgo-cluster-role - clusterrolebinding/pgo-cluster-role - configmap/pgo-config - deployment/postgres-operator - role/pgo-role - rolebinding/pgo-role - secret/pgo.tls - secret/pgo-backrest-repo-config - secret/pgorole-pgoadmin - secret/pgouser-admin - service/postgres-operator - serviceaccount/postgres-operator -) - -for resource in "${resources[@]}"; do - kc patch "$resource" --type=strategic --patch="$application_ownership" -done diff --git a/installers/gcp-marketplace/schema.yaml b/installers/gcp-marketplace/schema.yaml deleted file mode 100644 index b0de6eb78d..0000000000 --- a/installers/gcp-marketplace/schema.yaml +++ /dev/null @@ -1,116 +0,0 @@ -applicationApiVersion: v1beta1 -properties: - BACKUP_STORAGE_CAPACITY: - title: Backup Storage Capacity [GiB] - description: Default gigabytes allocated to new backup PVCs - type: integer - default: 1 - minimum: 1 - - INSTALLER_IMAGE: { type: string, x-google-marketplace: { type: DEPLOYER_IMAGE } } - - INSTALLER_SERVICE_ACCOUNT: # This key appears in the ClusterRoleBinding name. - title: Cluster Admin Service Account - description: >- - Name of a service account in the target namespace that has cluster-admin permissions. - This is used by the operator installer to create Custom Resource Definitions. - type: string - x-google-marketplace: - type: SERVICE_ACCOUNT - serviceAccount: - roles: - - type: ClusterRole - rulesType: PREDEFINED - rulesFromRoleName: cluster-admin - - OPERATOR_ADMIN_PASSWORD: - title: Operator admin password - type: string - pattern: .+ - x-google-marketplace: - type: MASKED_FIELD - - OPERATOR_IMAGE: - type: string - default: gcr.io/crunchydata-public/postgres-operator:${PGO_VERSION} - x-google-marketplace: { type: IMAGE } - - OPERATOR_IMAGE_API: - type: string - default: gcr.io/crunchydata-public/postgres-operator/pgo-apiserver:${PGO_VERSION} - x-google-marketplace: { type: IMAGE } - - OPERATOR_NAME: { type: string, x-google-marketplace: { type: NAME } } - OPERATOR_NAMESPACE: { type: string, x-google-marketplace: { type: NAMESPACE } } - - PGBACKREST_STORAGE_CAPACITY: - title: pgBackRest Storage Capacity [GiB] - description: Default gigabytes allocated to new pgBackRest repositories - type: integer - default: 2 - minimum: 2 - - POSTGRES_CPU: - title: PostgreSQL CPU [mCPU] - description: Default mCPU allocated to new PostgreSQL clusters (1000 equals one Core) - type: integer - default: 1000 - minimum: 100 - - POSTGRES_MEM: - title: PostgreSQL Memory [GiB] - description: Default gigabytes allocated to new PostgreSQL clusters - type: integer - default: 2 - minimum: 1 - - POSTGRES_METRICS: - title: Always collect PostgreSQL metrics - description: When disabled, collection can be enabled per PostgreSQL cluster - type: boolean - default: false - - POSTGRES_SERVICE_TYPE: - title: PostgreSQL service type - description: Default type of the Service that exposes new PostgreSQL clusters - type: string - enum: [ ClusterIP, LoadBalancer, NodePort ] - default: ClusterIP - - POSTGRES_STORAGE_CAPACITY: - title: PostgreSQL Storage Capacity [GiB] - description: Default gigabytes allocated to new PostgreSQL clusters - type: integer - default: 1 - minimum: 1 - -required: - - INSTALLER_IMAGE - - INSTALLER_SERVICE_ACCOUNT - - - OPERATOR_ADMIN_PASSWORD - - OPERATOR_IMAGE - - OPERATOR_IMAGE_API - - OPERATOR_NAME - - OPERATOR_NAMESPACE - - - POSTGRES_SERVICE_TYPE - - POSTGRES_CPU - - POSTGRES_MEM - - POSTGRES_STORAGE_CAPACITY - - POSTGRES_METRICS - - - PGBACKREST_STORAGE_CAPACITY - - - BACKUP_STORAGE_CAPACITY - -x-google-marketplace: - clusterConstraints: - istio: { type: UNSUPPORTED } - -form: - - widget: help - description: |- - Only one instance of Crunchy PostgreSQL Operator is necessary per Kubernetes cluster. - - If you have further questions, contact us at info@crunchydata.com. diff --git a/installers/gcp-marketplace/test-pod.yaml b/installers/gcp-marketplace/test-pod.yaml deleted file mode 100644 index e05e926ddb..0000000000 --- a/installers/gcp-marketplace/test-pod.yaml +++ /dev/null @@ -1,37 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - name: test-postgres-operator - labels: - app.kubernetes.io/name: '${OPERATOR_NAME}' - annotations: - marketplace.cloud.google.com/verification: test -spec: - dnsPolicy: ClusterFirst - restartPolicy: Never - containers: - - name: tester - image: '${INSTALLER_IMAGE}' - imagePullPolicy: Always - command: ['sh', '-ce'] - args: - - >- - wget --quiet --output-document=- - --no-check-certificate - --http-user="${PGOUSERNAME}" - --http-password="${PGOUSERPASS}" - --private-key="${PGO_CLIENT_KEY}" - --certificate="${PGO_CLIENT_CERT}" - --ca-certificate="${PGO_CA_CERT}" - "${PGO_APISERVER_URL}/version" - env: - - { name: PGO_APISERVER_URL, value: 'https://postgres-operator:8443' } - - { name: PGOUSERNAME, valueFrom: { secretKeyRef: { name: pgouser-admin, key: username } } } - - { name: PGOUSERPASS, valueFrom: { secretKeyRef: { name: pgouser-admin, key: password } } } - - { name: PGO_CA_CERT, value: '/etc/pgo/certificates/tls.crt' } - - { name: PGO_CLIENT_CERT, value: '/etc/pgo/certificates/tls.crt' } - - { name: PGO_CLIENT_KEY, value: '/etc/pgo/certificates/tls.key' } - volumeMounts: - - { mountPath: /etc/pgo/certificates, name: certificates } - volumes: - - { name: certificates, secret: { secretName: pgo.tls } } diff --git a/installers/gcp-marketplace/values.yaml b/installers/gcp-marketplace/values.yaml deleted file mode 100644 index 64382a3eaa..0000000000 --- a/installers/gcp-marketplace/values.yaml +++ /dev/null @@ -1,69 +0,0 @@ ---- -pgo_image: '${OPERATOR_IMAGE}' -pgo_apiserver_image: '${OPERATOR_IMAGE_API}' - -archive_mode: "true" -archive_timeout: "60" -badger: "false" -ccp_image_prefix: "registry.developers.crunchydata.com/crunchydata" -ccp_image_pull_secret: "" -ccp_image_pull_secret_manifest: "" -ccp_image_tag: "centos7-12.4-4.5.0" -create_rbac: "true" -db_name: "" -db_password_age_days: "0" -db_password_length: "24" -db_port: "5432" -db_replicas: "0" -db_user: "testuser" -default_instance_memory: "128Mi" -default_pgbackrest_memory: "48Mi" -default_pgbouncer_memory: "24Mi" -default_exporter_memory: "24Mi" -disable_auto_failover: "false" -exporterport: "9187" -metrics: '${POSTGRES_METRICS}' -pgbadgerport: "10000" -pgo_admin_password: '${OPERATOR_ADMIN_PASSWORD}' -pgo_admin_perms: "*" -pgo_admin_role_name: "pgoadmin" -pgo_admin_username: "admin" -pgo_client_container_install: "false" -pgo_client_install: 'false' -pgo_client_version: "4.5.0" -pgo_image_prefix: "registry.developers.crunchydata.com/crunchydata" -pgo_image_tag: "centos7-4.5.0" -pgo_installation_name: '${OPERATOR_NAME}' -pgo_operator_namespace: '${OPERATOR_NAMESPACE}' -service_type: '${POSTGRES_SERVICE_TYPE}' -sync_replication: "false" - -backrest_storage: 'pgbackrest-default' -backup_storage: 'backup-default' -primary_storage: 'primary-default' -replica_storage: 'replica-default' -wal_storage: '' - -storage1_name: 'backup-default' -storage1_access_mode: 'ReadWriteOnce' -storage1_size: '${BACKUP_STORAGE_CAPACITY}Gi' -storage1_type: 'dynamic' -storage1_class: '' - -storage2_name: 'pgbackrest-default' -storage2_access_mode: 'ReadWriteOnce' -storage2_size: '${PGBACKREST_STORAGE_CAPACITY}Gi' -storage2_type: 'dynamic' -storage2_class: '' - -storage3_name: 'primary-default' -storage3_access_mode: 'ReadWriteOnce' -storage3_size: '${POSTGRES_STORAGE_CAPACITY}Gi' -storage3_type: 'dynamic' -storage3_class: '' - -storage4_name: 'replica-default' -storage4_access_mode: 'ReadWriteOnce' -storage4_size: '${POSTGRES_STORAGE_CAPACITY}Gi' -storage4_type: 'dynamic' -storage4_class: '' diff --git a/installers/olm/.gitignore b/installers/olm/.gitignore deleted file mode 100644 index 94b77ebff7..0000000000 --- a/installers/olm/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -/bundles/ -/projects/ -/tools/ diff --git a/installers/olm/Makefile b/installers/olm/Makefile deleted file mode 100644 index 568b85ca10..0000000000 --- a/installers/olm/Makefile +++ /dev/null @@ -1,102 +0,0 @@ -.DEFAULT_GOAL := help -.SUFFIXES: - -CONTAINER ?= docker -PGO_VERSION ?= 5.0.2 - -OS_KERNEL ?= $(shell bash -c 'echo $${1,,}' - `uname -s`) -OS_MACHINE ?= $(shell bash -c 'echo $${1/x86_/amd}' - `uname -m`) -SYSTEM = $(OS_KERNEL)-$(OS_MACHINE) - -export PATH := $(CURDIR)/tools/$(SYSTEM):$(PATH) - -export PGO_VERSION - -distros = community redhat - -.PHONY: bundles -bundles: ## Build OLM bundles -bundles: $(distros:%=bundles/%) - -# https://olm.operatorframework.io/docs/tasks/creating-operator-bundle/#validating-your-bundle -# https://github.com/operator-framework/community-operators/blob/8a36a33/docs/packaging-required-criteria-ocp.md -.PHONY: bundles/community -bundles/community: - ./generate.sh community - env operator-sdk bundle validate $@ --select-optional='suite=operatorframework' - env operator-sdk bundle validate $@ --select-optional='name=community' --optional-values='index-path=$@/Dockerfile' - -# https://redhat-connect.gitbook.io/certified-operator-guide/ocp-deployment/operator-metadata/reviewing-your-metadata-bundle -.PHONY: bundles/redhat -bundles/redhat: - ./generate.sh redhat - env operator-sdk bundle validate $@ --select-optional='suite=operatorframework' - -.PHONY: clean -clean: clean-deprecated -clean: ## Remove generated files and downloaded tools - rm -rf ./bundles ./projects ./tools - -.PHONY: clean-deprecated -clean-deprecated: - rm -rf ./package - -.PHONY: help -help: ALIGN=18 -help: ## Print this message - @awk -F ': ## ' -- "/^[^':]+: ## /"' { printf "'$$(tput bold)'%-$(ALIGN)s'$$(tput sgr0)' %s\n", $$1, $$2 }' $(MAKEFILE_LIST) - -.PHONY: install-olm -install-olm: ## Install OLM in Kubernetes - env operator-sdk olm install - -.PHONY: tools -tools: ## Download tools needed to build bundles - -tools: tools/$(SYSTEM)/jq -tools/$(SYSTEM)/jq: - install -d '$(dir $@)' - curl -fSL -o '$@' "https://github.com/stedolan/jq/releases/download/jq-1.6/jq-$$(SYSTEM='$(SYSTEM)'; \ - case "$$SYSTEM" in \ - (linux-*) echo "$${SYSTEM/-amd/}";; (darwin-*) echo "$${SYSTEM/darwin/osx}";; (*) echo '$(SYSTEM)';; \ - esac)" - chmod u+x '$@' - -tools: tools/$(SYSTEM)/kubectl -tools/$(SYSTEM)/kubectl: - install -d '$(dir $@)' - curl -fSL -o '$@' 'https://dl.k8s.io/release/$(shell curl -Ls https://dl.k8s.io/release/stable-1.21.txt)/bin/$(OS_KERNEL)/$(OS_MACHINE)/kubectl' - chmod u+x '$@' - -# quay.io/operator-framework/operator-sdk -tools: tools/$(SYSTEM)/operator-sdk -tools/$(SYSTEM)/operator-sdk: - install -d '$(dir $@)' - curl -fSL -o '$@' 'https://github.com/operator-framework/operator-sdk/releases/download/v1.9.0/operator-sdk_$(OS_KERNEL)_$(OS_MACHINE)' - chmod u+x '$@' - -tools: tools/$(SYSTEM)/opm -tools/$(SYSTEM)/opm: - install -d '$(dir $@)' - curl -fSL -o '$@' 'https://github.com/operator-framework/operator-registry/releases/download/v1.17.5/$(OS_KERNEL)-$(OS_MACHINE)-opm' - chmod u+x '$@' - -tools/$(SYSTEM)/venv: - install -d '$(dir $@)' - python3 -m venv '$@' - -tools: tools/$(SYSTEM)/yq -tools/$(SYSTEM)/yq: | tools/$(SYSTEM)/venv - 'tools/$(SYSTEM)/venv/bin/python' -m pip install yq - cd '$(dir $@)' && ln -s venv/bin/yq - -.PHONY: validate-bundles -validate-bundles: ## Build temporary bundle images and run scorecard tests in Kubernetes -validate-bundles: $(distros:%=validate-%-image) -validate-bundles: $(distros:%=validate-%-directory) - -validate-%-directory: - ./validate-directory.sh 'bundles/$*' - -validate-%-image: - ./validate-image.sh '$(CONTAINER)' 'bundles/$*' diff --git a/installers/olm/README.md b/installers/olm/README.md deleted file mode 100644 index 0fea1912ce..0000000000 --- a/installers/olm/README.md +++ /dev/null @@ -1,54 +0,0 @@ -This directory contains the files that are used to install [Crunchy PostgreSQL for Kubernetes][hub-listing], -which includes PGO, the Postgres Operator from [Crunchy Data][], using [Operator Lifecycle Manager][OLM]. - -The integration centers around a [ClusterServiceVersion][olm-csv] [manifest](./bundle.csv.yaml) -that gets packaged for OperatorHub. Changes there are accepted only if they pass all the [scorecard][] -tests. Consult the [technical requirements][hub-contrib] when making changes. - - - -[Crunchy Data]: https://www.crunchydata.com -[hub-contrib]: https://operator-framework.github.io/community-operators/packaging-operator/ -[hub-listing]: https://operatorhub.io/operator/postgresql -[OLM]: https://github.com/operator-framework/operator-lifecycle-manager -[olm-csv]: https://github.com/operator-framework/operator-lifecycle-manager/blob/master/doc/design/building-your-csv.md -[scorecard]: https://sdk.operatorframework.io/docs/advanced-topics/scorecard/ - -[Red Hat Container Certification]: https://redhat-connect.gitbook.io/partner-guide-for-red-hat-openshift-and-container/ -[Red Hat Operator Certification]: https://redhat-connect.gitbook.io/certified-operator-guide/ - - - - -## Testing - -### Setup - -```sh -make tools -``` - -### Testing - -```sh -make bundles validate-bundles -``` - -```sh -BUNDLE_DIRECTORY='bundles/community' -BUNDLE_IMAGE='gcr.io/.../postgres-operator-bundle:latest' -INDEX_IMAGE='gcr.io/.../postgres-operator-bundle-index:latest' -NAMESPACE='pgo' - -docker build --tag "$BUNDLE_IMAGE" "$BUNDLE_DIRECTORY" -docker push "$BUNDLE_IMAGE" - -opm index add --bundles "$BUNDLE_IMAGE" --tag "$INDEX_IMAGE" --container-tool=docker -docker push "$INDEX_IMAGE" - -./install.sh operator "$BUNDLE_DIRECTORY" "$INDEX_IMAGE" "$NAMESPACE" "$NAMESPACE" - -# Cleanup -operator-sdk cleanup postgresql --namespace="$NAMESPACE" -kubectl -n "$NAMESPACE" delete operatorgroup olm-operator-group -``` diff --git a/installers/olm/bundle.Dockerfile b/installers/olm/bundle.Dockerfile deleted file mode 100644 index a81d16f73e..0000000000 --- a/installers/olm/bundle.Dockerfile +++ /dev/null @@ -1,18 +0,0 @@ -# Used to build the bundle image. This file is ignored by the community operator -# registries which work with bundle directories instead. -# https://operator-framework.github.io/community-operators/packaging-operator/ - -FROM scratch AS builder - -COPY manifests/ /build/manifests/ -COPY metadata/ /build/metadata/ -COPY tests/ /build/tests - - -FROM scratch - -# ANNOTATIONS is replaced with bundle.annotations.yaml -LABEL \ - ${ANNOTATIONS} - -COPY --from=builder /build/ / diff --git a/installers/olm/bundle.annotations.yaml b/installers/olm/bundle.annotations.yaml deleted file mode 100644 index 20814d454e..0000000000 --- a/installers/olm/bundle.annotations.yaml +++ /dev/null @@ -1,42 +0,0 @@ ---- -annotations: - # https://olm.operatorframework.io/docs/tasks/creating-operator-bundle/ - # https://docs.openshift.com/container-platform/4.7/operators/understanding/olm-packaging-format.html - operators.operatorframework.io.bundle.mediatype.v1: registry+v1 - operators.operatorframework.io.bundle.manifests.v1: manifests/ - operators.operatorframework.io.bundle.metadata.v1: metadata/ - - operators.operatorframework.io.test.mediatype.v1: scorecard+v1 - operators.operatorframework.io.test.config.v1: tests/scorecard/ - - # "package.v1" is the name of the PackageManifest. It also determines the URL - # of the details page at OperatorHub.io; "postgresql" here becomes: - # https://operatorhub.io/operator/postgresql - # - # A package consists of multiple bundles (versions) arranged into channels. - # https://olm.operatorframework.io/docs/concepts/olm-architecture/operator-catalog/creating-an-update-graph/ - operators.operatorframework.io.bundle.package.v1: '' # generate.sh - - # "channels.v1" is the comma-separated list of channels from which this bundle - # can be installed. - # - # "channel.default.v1" is the default channel of the PackageManifest. It is - # the first channel presented, the first used to satisfy dependencies, and - # the one used by a Subscription that does not specify a channel. OLM uses - # the value from the bundle with the highest semantic version. - # - # https://olm.operatorframework.io/docs/best-practices/channel-naming/ - operators.operatorframework.io.bundle.channels.v1: stable - operators.operatorframework.io.bundle.channel.default.v1: stable - - # OpenShift v4.6 is the first version to support CustomResourceDefinition v1. - # https://github.com/operator-framework/community-operators/blob/8a36a33/docs/packaging-required-criteria-ocp.md - # https://redhat-connect.gitbook.io/certified-operator-guide/ocp-deployment/operator-metadata/bundle-directory - com.redhat.delivery.operator.bundle: true - com.redhat.openshift.versions: 'v4.6' - - # https://github.com/opencontainers/image-spec/blob/master/annotations.md - org.opencontainers.image.authors: info@crunchydata.com - org.opencontainers.image.url: https://crunchydata.com - org.opencontainers.image.vendor: Crunchy Data -... diff --git a/installers/olm/bundle.csv.yaml b/installers/olm/bundle.csv.yaml deleted file mode 100644 index 84431bcc4f..0000000000 --- a/installers/olm/bundle.csv.yaml +++ /dev/null @@ -1,84 +0,0 @@ -# https://olm.operatorframework.io/docs/concepts/crds/clusterserviceversion/ -# https://docs.openshift.com/container-platform/4.7/operators/operator_sdk/osdk-generating-csvs.html -# https://redhat-connect.gitbook.io/certified-operator-guide/ocp-deployment/operator-metadata/creating-the-csv -# https://pkg.go.dev/github.com/operator-framework/api@v0.10.1/pkg/operators/v1alpha1#ClusterServiceVersion - -apiVersion: operators.coreos.com/v1alpha1 -kind: ClusterServiceVersion -metadata: - name: '' # generate.sh - annotations: - support: crunchydata.com - olm.properties: '[]' - - # The following affect how the package is indexed at OperatorHub.io: - # https://operatorhub.io/?category=Database - # https://sdk.operatorframework.io/docs/advanced-topics/operator-capabilities/operator-capabilities/ - categories: Database - capabilities: Auto Pilot - description: Production Postgres Made Easy - - # The following appear on the details page at OperatorHub.io: - # https://operatorhub.io/operator/postgresql - createdAt: 2019-12-31 19:40Z - repository: https://github.com/CrunchyData/postgres-operator - containerImage: # kustomize config/operator - alm-examples: |- # kustomize config/examples - -spec: - # The following affect how the package is indexed at OperatorHub.io: - # https://operatorhub.io/ - displayName: Crunchy Postgres for Kubernetes - provider: - # These values become labels on the PackageManifest. - name: Crunchy Data - url: https://www.crunchydata.com/ - keywords: - - postgres - - postgresql - - database - - sql - - operator - - crunchy data - - # The following appear on the details page at OperatorHub.io: - # https://operatorhub.io/operator/postgresql - description: |- # description.md - version: '' # generate.sh - links: - - name: Crunchy Data - url: https://www.crunchydata.com/ - - name: Documentation - url: https://access.crunchydata.com/documentation/postgres-operator/v5/ - maintainers: - - name: Crunchy Data - email: info@crunchydata.com - - # https://olm.operatorframework.io/docs/best-practices/common/ - minKubeVersion: 1.18.0 - maturity: stable - - # https://github.com/operator-framework/operator-lifecycle-manager/blob/v0.18.2/doc/design/building-your-csv.md#your-custom-resource-definitions - customresourcedefinitions: - # The "displayName" and "description" fields appear in the "Custom Resource Definitions" section - # on the details page at OperatorHub.io: https://operatorhub.io/operator/postgresql - # - # The "specDescriptors" and "statusDescriptors" fields appear in the OpenShift Console: - # https://github.com/openshift/console/tree/a8b35e4/frontend/packages/operator-lifecycle-manager/src/components/descriptors - owned: # operator-sdk generate kustomize manifests - - # https://olm.operatorframework.io/docs/advanced-tasks/operator-scoping-with-operatorgroups/ - installModes: - - { type: OwnNamespace, supported: true } - - { type: SingleNamespace, supported: true } - - { type: MultiNamespace, supported: false } - - { type: AllNamespaces, supported: true } - - install: - strategy: deployment - spec: - permissions: # kustomize config/operator - deployments: # kustomize config/operator - - # https://docs.openshift.com/container-platform/4.7/operators/operator_sdk/osdk-generating-csvs.html#olm-enabling-operator-for-restricted-network_osdk-generating-csvs - relatedImages: diff --git a/installers/olm/config/examples/kustomization.yaml b/installers/olm/config/examples/kustomization.yaml deleted file mode 100644 index b01a39610a..0000000000 --- a/installers/olm/config/examples/kustomization.yaml +++ /dev/null @@ -1,17 +0,0 @@ -# Custom resources that are imported into the ClusterServiceVersion. -# -# The first for each GVK appears in the "Custom Resource Definitions" section on -# the details page at OperatorHub.io: https://operatorhub.io/operator/postgresql -# -# The "metadata.name" fields should be unique so they can be given a description -# that is presented by compatible UIs. -# https://github.com/operator-framework/operator-lifecycle-manager/blob/v0.18.2/doc/design/building-your-csv.md#crd-templates -# -# The "image" fields should be omitted so the defaults are used. -# https://redhat-connect.gitbook.io/certified-operator-guide/troubleshooting-and-resources/offline-enabled-operators - -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -resources: -- postgrescluster.example.yaml diff --git a/installers/olm/config/operator/kustomization.yaml b/installers/olm/config/operator/kustomization.yaml deleted file mode 100644 index dfdce41618..0000000000 --- a/installers/olm/config/operator/kustomization.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -resources: -- ../../../../config/default - -patches: -- path: target-namespace.yaml diff --git a/installers/olm/config/operator/target-namespace.yaml b/installers/olm/config/operator/target-namespace.yaml deleted file mode 100644 index d7dbaadeef..0000000000 --- a/installers/olm/config/operator/target-namespace.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: pgo -spec: - template: - spec: - containers: - - name: operator - env: - # https://docs.openshift.com/container-platform/4.7/operators/understanding/olm/olm-understanding-operatorgroups.html - - name: PGO_TARGET_NAMESPACE - valueFrom: { fieldRef: { fieldPath: "metadata.annotations['olm.targetNamespaces']" } } diff --git a/installers/olm/config/redhat/related-images.yaml b/installers/olm/config/redhat/related-images.yaml deleted file mode 100644 index 5564ecb718..0000000000 --- a/installers/olm/config/redhat/related-images.yaml +++ /dev/null @@ -1,32 +0,0 @@ -# Red Hat Marketplace requires that bundles work offline. OSBS will fill out -# the "spec.relatedImages" field of the ClusterServiceVersion if it is blank. -# -# https://redhat-connect.gitbook.io/certified-operator-guide/troubleshooting-and-resources/offline-enabled-operators -# https://osbs.readthedocs.io/en/latest/users.html#pinning-pullspecs-for-related-images -apiVersion: apps/v1 -kind: Deployment -metadata: - name: pgo -spec: - template: - spec: - containers: - - name: operator - env: - - { name: RELATED_IMAGE_PGBACKREST, value: 'registry.connect.redhat.com/crunchydata/crunchy-pgbackrest:ubi8-2.33-2' } - - { name: RELATED_IMAGE_PGBOUNCER, value: 'registry.connect.redhat.com/crunchydata/crunchy-pgbouncer:ubi8-1.15-2' } - - { name: RELATED_IMAGE_PGEXPORTER, value: 'registry.connect.redhat.com/crunchydata/crunchy-postgres-exporter:ubi8-5.0.2-0' } - - - { name: RELATED_IMAGE_POSTGRES_10, value: 'registry.connect.redhat.com/crunchydata/crunchy-postgres-ha:ubi8-10.18-0' } - - { name: RELATED_IMAGE_POSTGRES_11, value: 'registry.connect.redhat.com/crunchydata/crunchy-postgres-ha:ubi8-11.13-0' } - - { name: RELATED_IMAGE_POSTGRES_12, value: 'registry.connect.redhat.com/crunchydata/crunchy-postgres-ha:ubi8-12.8-0' } - - { name: RELATED_IMAGE_POSTGRES_13, value: 'registry.connect.redhat.com/crunchydata/crunchy-postgres-ha:ubi8-13.4-0' } - - - { name: RELATED_IMAGE_POSTGRES_10_GIS_2.3, value: 'registry.connect.redhat.com/crunchydata/crunchy-postgres-gis-ha:ubi8-10.18-2.3-0' } - - { name: RELATED_IMAGE_POSTGRES_10_GIS_2.4, value: 'registry.connect.redhat.com/crunchydata/crunchy-postgres-gis-ha:ubi8-10.18-2.4-0' } - - { name: RELATED_IMAGE_POSTGRES_11_GIS_2.4, value: 'registry.connect.redhat.com/crunchydata/crunchy-postgres-gis-ha:ubi8-11.13-2.4-0' } - - { name: RELATED_IMAGE_POSTGRES_11_GIS_2.5, value: 'registry.connect.redhat.com/crunchydata/crunchy-postgres-gis-ha:ubi8-11.13-2.5-0' } - - { name: RELATED_IMAGE_POSTGRES_12_GIS_2.5, value: 'registry.connect.redhat.com/crunchydata/crunchy-postgres-gis-ha:ubi8-12.8-2.5-0' } - - { name: RELATED_IMAGE_POSTGRES_12_GIS_3.0, value: 'registry.connect.redhat.com/crunchydata/crunchy-postgres-gis-ha:ubi8-12.8-3.0-0' } - - { name: RELATED_IMAGE_POSTGRES_13_GIS_3.0, value: 'registry.connect.redhat.com/crunchydata/crunchy-postgres-gis-ha:ubi8-13.4-3.0-0' } - - { name: RELATED_IMAGE_POSTGRES_13_GIS_3.1, value: 'registry.connect.redhat.com/crunchydata/crunchy-postgres-gis-ha:ubi8-13.4-3.1-0' } diff --git a/installers/olm/description.md b/installers/olm/description.md deleted file mode 100644 index 06a9ac9af0..0000000000 --- a/installers/olm/description.md +++ /dev/null @@ -1,68 +0,0 @@ -[PGO](https://github.com/CrunchyData/postgres-operator), the -[Postgres Operator](https://github.com/CrunchyData/postgres-operator) from -[Crunchy Data](https://www.crunchydata.com), gives you a **declarative Postgres** solution that -automatically manages your [PostgreSQL](https://www.postgresql.org) clusters. - -Designed for your GitOps workflows, it is [easy to get started](https://access.crunchydata.com/documentation/postgres-operator/v5/quickstart/) -with Postgres on Kubernetes with PGO. Within a few moments, you can have a production grade Postgres -cluster complete with high availability, disaster recovery, and monitoring, all over secure TLS communications. -Even better, PGO lets you easily customize your Postgres cluster to tailor it to your workload! - -With conveniences like cloning Postgres clusters to using rolling updates to roll out disruptive -changes with minimal downtime, PGO is ready to support your Postgres data at every stage of your -release pipeline. Built for resiliency and uptime, PGO will keep your desired Postgres in a desired -state so you do not need to worry about it. - -PGO is developed with many years of production experience in automating Postgres management on -Kubernetes, providing a seamless cloud native Postgres solution to keep your data always available. - -- **PostgreSQL Cluster Provisioning**: [Create, Scale, & Delete PostgreSQL clusters with ease][provisioning], - while fully customizing your Pods and PostgreSQL configuration! -- **High-Availability**: Safe, automated failover backed by a [distributed consensus based high-availability solution][high-availability]. - Uses [Pod Anti-Affinity][k8s-anti-affinity] to help resiliency; you can configure how aggressive this can be! - Failed primaries automatically heal, allowing for faster recovery time. You can even create regularly scheduled - backups as well and set your backup retention policy -- **Disaster Recovery**: [Backups][backups] and [restores][disaster-recovery] leverage the open source [pgBackRest][] utility and - [includes support for full, incremental, and differential backups as well as efficient delta restores][backups]. - Set how long you want your backups retained for. Works great with very large databases! -- **Monitoring**: [Track the health of your PostgreSQL clusters][monitoring] using the open source [pgMonitor][] library. -- **Clone**: [Create new clusters from your existing clusters or backups][clone] with efficient data cloning. -- **TLS**: All connections are over [TLS][tls]. You can also [bring your own TLS infrastructure][tls] if you do not want to use the provided defaults. -- **Connection Pooling**: Advanced [connection pooling][pool] support using [pgBouncer][]. -- **Affinity and Tolerations**: Have your PostgreSQL clusters deployed to [Kubernetes Nodes][k8s-nodes] of your preference. - Set your [pod anti-affinity][k8s-anti-affinity], node affinity, Pod tolerations and more rules to customize your deployment topology! -- **Full Customizability**: Crunchy PostgreSQL for Kubernetes makes it easy to get your own PostgreSQL-as-a-Service up and running - and fully customize your deployments, including: - - Choose the resources for your Postgres cluster: [container resources and storage size][resize-cluster]. [Resize at any time][resize-cluster] with minimal disruption. - - Use your own container image repository, including support `imagePullSecrets` and private repositories - - [Customize your PostgreSQL configuration][customize-cluster] - -and much more! - -[backups]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorial/backups/ -[clone]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorial/disaster-recovery/#clone-a-postgres-cluster -[customize-cluster]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorial/customize-cluster/ -[disaster-recovery]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorial/disaster-recovery/ -[high-availability]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorial/high-availability/ -[monitoring]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorial/monitoring/ -[pool]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorial/connection-pooling/ -[provisioning]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorial/create-cluster/ -[resize-cluster]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorial/resize-cluster/ -[tls]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorial/customize-cluster/#customize-tls - -[k8s-anti-affinity]: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#inter-pod-affinity-and-anti-affinity -[k8s-nodes]: https://kubernetes.io/docs/concepts/architecture/nodes/ - -[pgBackRest]: https://www.pgbackrest.org -[pgBouncer]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorial/connection-pooling/ -[pgMonitor]: https://github.com/CrunchyData/pgmonitor - - -## Post-Installation - -### Tutorial - -Want to [learn more about the PostgreSQL Operator][tutorial]? Browse through the [tutorial][] to learn more about what you can do! - -[tutorial]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorial - diff --git a/installers/olm/generate.sh b/installers/olm/generate.sh deleted file mode 100755 index 0c4dc4c508..0000000000 --- a/installers/olm/generate.sh +++ /dev/null @@ -1,143 +0,0 @@ -#!/usr/bin/env bash -# shellcheck disable=SC2016 -# vim: set noexpandtab : -set -eu - -DISTRIBUTION="$1" - -cd "${BASH_SOURCE[0]%/*}" - -bundle_directory="bundles/${DISTRIBUTION}" -project_directory="projects/${DISTRIBUTION}" -go_api_directory=$(cd ../../pkg/apis && pwd) - -package_name='postgresql' -case "${DISTRIBUTION}" in - # https://redhat-connect.gitbook.io/certified-operator-guide/appendix/what-if-ive-already-published-a-community-operator - 'redhat') package_name='crunchy-postgres-operator' ;; -esac - -operator_yamls=$(kubectl kustomize "config/${DISTRIBUTION}") -operator_crds=$(yq <<< "${operator_yamls}" --slurp --yaml-roundtrip 'map(select(.kind == "CustomResourceDefinition"))') -operator_deployments=$(yq <<< "${operator_yamls}" --slurp --yaml-roundtrip 'map(select(.kind == "Deployment"))') -operator_accounts=$(yq <<< "${operator_yamls}" --slurp --yaml-roundtrip 'map(select(.kind == "ServiceAccount"))') -operator_roles=$(yq <<< "${operator_yamls}" --slurp --yaml-roundtrip 'map(select(.kind == "ClusterRole"))') - -# Recreate the Operator SDK project. -[ ! -d "${project_directory}" ] || rm -r "${project_directory}" -install -d "${project_directory}" -( - cd "${project_directory}" - operator-sdk init --fetch-deps='false' --project-name='postgresoperator' - rm ./*.go go.* - - # Generate CRD descriptions from Go markers. - # https://sdk.operatorframework.io/docs/building-operators/golang/references/markers/ - crd_gvks=$(yq <<< "${operator_crds}" 'map({ - group: .spec.group, kind: .spec.names.kind, version: .spec.versions[].name - })') - yq --in-place --yaml-roundtrip --argjson resources "${crd_gvks}" \ - '.multigroup = true | .resources = $resources | .' ./PROJECT - - ln -s "${go_api_directory}" . - operator-sdk generate kustomize manifests --interactive='false' -) - -# Recreate the OLM bundle. -[ ! -d "${bundle_directory}" ] || rm -r "${bundle_directory}" -install -d \ - "${bundle_directory}/manifests" \ - "${bundle_directory}/metadata" \ - "${bundle_directory}/tests/scorecard" \ - -# `echo "${operator_yamls}" | operator-sdk generate bundle` includes the ServiceAccount which cannot -# be upgraded: https://github.com/operator-framework/operator-lifecycle-manager/issues/2193 - -# Include Operator SDK scorecard tests. -# https://sdk.operatorframework.io/docs/advanced-topics/scorecard/scorecard/ -kubectl kustomize "${project_directory}/config/scorecard" \ - > "${bundle_directory}/tests/scorecard/config.yaml" - -# Render bundle annotations and strip comments. -yq --yaml-roundtrip < bundle.annotations.yaml > "${bundle_directory}/metadata/annotations.yaml" \ - --arg package "${package_name}" \ -' - .annotations["operators.operatorframework.io.bundle.package.v1"] = $package | -.' - -# Copy annotations into Dockerfile LABELs. -labels=$(yq --raw-output < "${bundle_directory}/metadata/annotations.yaml" \ - '.annotations | to_entries | map(.key +"="+ (.value | tojson)) | join(" \\\n\t")') -ANNOTATIONS="${labels}" envsubst '$ANNOTATIONS' < bundle.Dockerfile > "${bundle_directory}/Dockerfile" - -# Include CRDs as manifests. -crd_names=$(yq --raw-output <<< "${operator_crds}" 'to_entries[] | [.key, .value.metadata.name] | @tsv') -while IFS=$'\t' read -r index name; do - yq --yaml-roundtrip <<< "${operator_crds}" ".[${index}]" > "${bundle_directory}/manifests/${name}.crd.yaml" -done <<< "${crd_names}" - - -abort() { echo >&2 "$@"; exit 1; } -dump() { yq --color-output; } - -yq > /dev/null <<< "${operator_deployments}" --exit-status 'length == 1' || - abort "too many deployments!" $'\n'"$(dump <<< "${operator_deployments}")" - -yq > /dev/null <<< "${operator_accounts}" --exit-status 'length == 1' || - abort "too many service accounts!" $'\n'"$(dump <<< "${operator_accounts}")" - -yq > /dev/null <<< "${operator_roles}" --exit-status 'length == 1' || - abort "too many roles!" $'\n'"$(dump <<< "${operator_roles}")" - -# Render bundle CSV and strip comments. - -csv_stem=$(yq --raw-output '.projectName' "${project_directory}/PROJECT") -crd_descriptions=$(yq '.spec.customresourcedefinitions.owned' \ - "${project_directory}/config/manifests/bases/${csv_stem}.clusterserviceversion.yaml") - -crd_gvks=$(yq <<< "${operator_crds}" 'map({ - group: .spec.group, kind: .spec.names.kind, version: .spec.versions[].name -} | { - apiVersion: "\(.group)/\(.version)", kind -})') -crd_examples=$(yq <<< "${operator_yamls}" --slurp --argjson gvks "${crd_gvks}" 'map(select( - IN({ apiVersion, kind }; $gvks | .[]) -))') - -yq --yaml-roundtrip < bundle.csv.yaml > "${bundle_directory}/manifests/${csv_stem}.clusterserviceversion.yaml" \ - --argjson deployment "$(yq <<< "${operator_deployments}" 'first')" \ - --argjson account "$(yq <<< "${operator_accounts}" 'first | .metadata.name')" \ - --argjson rules "$(yq <<< "${operator_roles}" 'first | .rules')" \ - --argjson crds "${crd_descriptions}" \ - --arg examples "${crd_examples}" \ - --arg version "${PGO_VERSION}" \ - --arg description "$(< description.md)" \ - --arg icon "$(base64 ../seal.svg | tr -d '\n')" \ - --arg stem "${csv_stem}" \ -' - .metadata.annotations["alm-examples"] = $examples | - .metadata.annotations["containerImage"] = ($deployment.spec.template.spec.containers[0].image) | - - .metadata.name = "\($stem).v\($version)" | - .spec.version = $version | - - .spec.customresourcedefinitions.owned = $crds | - .spec.description = $description | - .spec.icon = [{ mediatype: "image/svg+xml", base64data: $icon }] | - - .spec.install.spec.permissions = [{ serviceAccountName: $account, rules: $rules }] | - .spec.install.spec.deployments = [( $deployment | { name: .metadata.name, spec } )] | -.' - -case "${DISTRIBUTION}" in - 'redhat') - # https://redhat-connect.gitbook.io/certified-operator-guide/appendix/what-if-ive-already-published-a-community-operator - yq --in-place --yaml-roundtrip \ - ' - .metadata.annotations.certified = "true" | - .' \ - "${bundle_directory}/manifests/${csv_stem}.clusterserviceversion.yaml" - ;; -esac - -if > /dev/null command -v tree; then tree -C "${bundle_directory}"; fi diff --git a/installers/olm/install.sh b/installers/olm/install.sh deleted file mode 100755 index 2c4f6ce190..0000000000 --- a/installers/olm/install.sh +++ /dev/null @@ -1,144 +0,0 @@ -#!/usr/bin/env bash -# vim: set noexpandtab : -set -eu - -if command -v oc >/dev/null; then - kubectl() { oc "$@"; } - kubectl version -else - kubectl version --short -fi - -catalog_source() ( - source_namespace="$1" - source_name="$2" - index_image="$3" - - kc() { kubectl --namespace="$source_namespace" "$@"; } - kc get namespace "$source_namespace" --output=jsonpath='{""}' 2>/dev/null || - kc create namespace "$source_namespace" - - # See https://godoc.org/github.com/operator-framework/api/pkg/operators/v1alpha1#CatalogSource - source_json=$(jq --null-input \ - --arg name "${source_name}" \ - --arg image "${index_image}" \ - '{ - apiVersion: "operators.coreos.com/v1alpha1", kind: "CatalogSource", - metadata: { name: $name }, - spec: { - displayName: "Test Registry", - sourceType: "grpc", image: $image - } - }') - kc create --filename=- <<< "$source_json" - - # Wait for Pod to exist and be healthy. - for _ in $(seq 10); do - [ '[]' != "$( kc get pod --selector="olm.catalogSource=${source_name}" --output=jsonpath='{.items}' )" ] && - break || sleep 1s - done - if ! kc wait --for='condition=ready' --timeout='30s' pod --selector="olm.catalogSource=${source_name}"; then - kc logs --previous --tail='-1' --selector="olm.catalogSource=${source_name}" - fi -) - -operator_group() ( - group_namespace="$1" - group_name="$2" - target_namespaces=("${@:3}") - - kc() { kubectl --namespace="$group_namespace" "$@"; } - kc get namespace "$group_namespace" --output=jsonpath='{""}' 2>/dev/null || - kc create namespace "$group_namespace" - - group_json="$( jq <<< '{}' --arg name "$group_name" '{ - apiVersion: "operators.coreos.com/v1", kind: "OperatorGroup", - metadata: { "name": $name }, - spec: { targetNamespaces: [] } - }' )" - - for ns in "${target_namespaces[@]}"; do - group_json="$( jq <<< "$group_json" --arg namespace "$ns" '.spec.targetNamespaces += [ $namespace ]' )" - done - - kc create --filename=- <<< "$group_json" -) - -operator() ( - bundle_directory="$1" index_image="$2" - operator_namespace="$3" - target_namespaces=("${@:4}") - - package_name=$(yq \ - --raw-output '.annotations["operators.operatorframework.io.bundle.package.v1"]' \ - "${bundle_directory}"/*/annotations.yaml) - channel_name=$(yq \ - --raw-output '.annotations["operators.operatorframework.io.bundle.channels.v1"]' \ - "${bundle_directory}"/*/annotations.yaml) - csv_name=$(yq --raw-output '.metadata.name' \ - "${bundle_directory}"/*/*.clusterserviceversion.yaml) - - kc() { kubectl --namespace="$operator_namespace" "$@"; } - - catalog_source "$operator_namespace" olm-catalog-source "${index_image}" - operator_group "$operator_namespace" olm-operator-group "${target_namespaces[@]}" - - # Create a Subscription to install the operator. - # See https://godoc.org/github.com/operator-framework/api/pkg/operators/v1alpha1#Subscription - subscription_json=$(jq --null-input \ - --arg channel "$channel_name" \ - --arg namespace "$operator_namespace" \ - --arg package "$package_name" \ - --arg version "$csv_name" \ - '{ - apiVersion: "operators.coreos.com/v1alpha1", kind: "Subscription", - metadata: { name: $package }, - spec: { - name: $package, - sourceNamespace: $namespace, - source: "olm-catalog-source", - startingCSV: $version, - channel: $channel - } - }') - kc create --filename=- <<< "$subscription_json" - - # Wait for the InstallPlan to exist and be healthy. - for _ in $(seq 10); do - [ '[]' != "$( kc get installplan --output=jsonpath="{.items}" )" ] && - break || sleep 1s - done - if ! kc wait --for='condition=installed' --timeout='30s' installplan --all; then - subscription_uid="$( kc get subscription "$package_name" --output=jsonpath='{.metadata.uid}' )" - installplan_json="$( kc get installplan --output=json )" - - jq <<< "$installplan_json" --arg uid "$subscription_uid" \ - '.items[] | select(.metadata.ownerReferences[] | select(.uid == $uid)).status.conditions' - exit 1 - fi - - # Wait for Deployment to exist and be healthy. - for _ in $(seq 10); do - [ '[]' != "$( kc get deploy --selector="olm.owner=$csv_name" --output=jsonpath='{.items}' )" ] && - break || sleep 1s - done - if ! kc wait --for='condition=available' --timeout='30s' deploy --selector="olm.owner=$csv_name"; then - kc describe pod --selector="olm.owner=$csv_name" - - crashed_containers="$( kc get pod --selector="olm.owner=$csv_name" --output=json )" - crashed_containers="$( jq <<< "$crashed_containers" --raw-output \ - '.items[] | { - pod: .metadata.name, - container: .status.containerStatuses[] | select(.restartCount > 0).name - } | [.pod, .container] | @tsv' )" - - test -z "$crashed_containers" || while IFS=$'\t' read -r pod container; do - echo; echo "$pod/$container" restarted: - kc logs --container="$container" --previous --tail='-1' "pod/$pod" - done <<< "$crashed_containers" - - exit 1 - fi -) - -"$@" diff --git a/installers/olm/validate-directory.sh b/installers/olm/validate-directory.sh deleted file mode 100755 index 726f64946e..0000000000 --- a/installers/olm/validate-directory.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/usr/bin/env bash -# vim: set noexpandtab : -set -eu - -if command -v oc > /dev/null; then - kubectl() { oc "$@"; } - kubectl version -else - kubectl version --short -fi - -push_trap_exit() { - local -a array - eval "array=($(trap -p EXIT))" - # shellcheck disable=SC2064 - trap "$1;${array[2]-}" EXIT -} - -validate_bundle_directory() { - local directory="$1" - local namespace - - namespace=$(kubectl create --filename=- --output='go-template={{.metadata.name}}' <<< '{ - "apiVersion": "v1", "kind": "Namespace", - "metadata": { - "generateName": "olm-test-", - "labels": { "olm-test": "bundle-directory" } - } - }') - echo 'namespace "'"${namespace}"'" created' - push_trap_exit "kubectl delete namespace '${namespace}'" - - # https://olm.operatorframework.io/docs/best-practices/common/ - # https://sdk.operatorframework.io/docs/advanced-topics/scorecard/scorecard/ - operator-sdk scorecard --namespace="${namespace}" "${directory}" -} - -validate_bundle_directory "$@" diff --git a/installers/olm/validate-image.sh b/installers/olm/validate-image.sh deleted file mode 100755 index 1db6ec5263..0000000000 --- a/installers/olm/validate-image.sh +++ /dev/null @@ -1,98 +0,0 @@ -#!/usr/bin/env bash -# vim: set noexpandtab : -set -eu - -push_trap_exit() { - local -a array - eval "array=($(trap -p EXIT))" - # shellcheck disable=SC2064 - trap "$1;${array[2]-}" EXIT -} - -# Store anything in a single temporary directory that gets cleaned up. -TMPDIR=$(mktemp -d) -push_trap_exit "rm -rf '${TMPDIR}'" -export TMPDIR - -validate_bundle_image() { - local container="$1" directory="$2" - directory=$(cd "${directory}" && pwd) - - cat > "${TMPDIR}/registry.config" <<-SSL - [req] - distinguished_name = req_distinguished_name - x509_extensions = v3_ext - prompt = no - [req_distinguished_name] - commonName = localhost - [v3_ext] - subjectAltName = @alt_names - [alt_names] - DNS.1 = localhost - SSL - - openssl ecparam -name prime256v1 -genkey -out "${TMPDIR}/registry.key" - openssl req -new -x509 -days 1 \ - -config "${TMPDIR}/registry.config" \ - -key "${TMPDIR}/registry.key" \ - -out "${TMPDIR}/registry.crt" - - # Start a local image registry. - local image port registry - registry=$(${container} run --detach --publish-all \ - --env='REGISTRY_HTTP_TLS_CERTIFICATE=/mnt/registry.crt' \ - --env='REGISTRY_HTTP_TLS_KEY=/mnt/registry.key' \ - --volume="${TMPDIR}:/mnt" \ - docker.io/library/registry:latest) - # https://github.com/containers/podman/issues/8524 - push_trap_exit "echo -n 'Removing '; ${container} rm '${registry}'" - push_trap_exit "echo -n 'Stopping '; ${container} stop '${registry}'" - - port=$(${container} inspect "${registry}" \ - --format='{{ (index .NetworkSettings.Ports "5000/tcp" 0).HostPort }}') - image="localhost:${port}/postgres-operator-bundle:latest" - - cat > "${TMPDIR}/registries.conf" <<-TOML - [[registry]] - location = "localhost:${port}" - insecure = true - TOML - - # Build the bundle image and push it to the local registry. - ${container} run --rm \ - --device='/dev/fuse:rw' --network='host' --security-opt='seccomp=unconfined' \ - --volume="${TMPDIR}/registries.conf:/etc/containers/registries.conf.d/localhost.conf:ro" \ - --volume="${directory}:/mnt:delegated" \ - --workdir='/mnt' \ - quay.io/buildah/stable:latest \ - buildah build-using-dockerfile \ - --format='docker' --layers --tag="docker://${image}" - - local -a opm - local opm_version - opm_version=$(opm version) - opm_version=$(sed -n 's#.*OpmVersion:"\([^"]*\)".*#\1# p' <<< "${opm_version}") - # shellcheck disable=SC2206 - opm=(${container} run --rm - --network='host' - --volume="${TMPDIR}/registry.crt:/usr/local/share/ca-certificates/registry.crt:ro" - --volume="${TMPDIR}:/mnt:delegated" - --workdir='/mnt' - quay.io/operator-framework/upstream-opm-builder:"${opm_version}" - sh -ceu 'update-ca-certificates && exec "$@"' - opm) - - # Validate the bundle image in the local registry. - # https://olm.operatorframework.io/docs/tasks/creating-operator-bundle/#validating-your-bundle - "${opm[@]}" alpha bundle validate --image-builder='none' \ - --optional-validators='operatorhub,bundle-objects' \ - --tag="${image}" - - # Create an index database from the bundle image. - "${opm[@]}" index add --bundles="${image}" --generate - - # drwxr-xr-x. 2 user user 22 database - # -rw-r--r--. 1 user user 286720 database/index.db - # -rw-r--r--. 1 user user 267 index.Dockerfile -} - -validate_bundle_image "$@" diff --git a/installers/seal.svg b/installers/seal.svg deleted file mode 100644 index 686d0c974d..0000000000 --- a/installers/seal.svg +++ /dev/null @@ -1,131 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/internal/bridge/client.go b/internal/bridge/client.go new file mode 100644 index 0000000000..d5ad8470f7 --- /dev/null +++ b/internal/bridge/client.go @@ -0,0 +1,819 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package bridge + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "time" + + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/apimachinery/pkg/util/wait" + + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +const defaultAPI = "https://api.crunchybridge.com" + +var errAuthentication = errors.New("authentication failed") + +type ClientInterface interface { + ListClusters(ctx context.Context, apiKey, teamId string) ([]*ClusterApiResource, error) + CreateCluster(ctx context.Context, apiKey string, clusterRequestPayload *PostClustersRequestPayload) (*ClusterApiResource, error) + DeleteCluster(ctx context.Context, apiKey, id string) (*ClusterApiResource, bool, error) + GetCluster(ctx context.Context, apiKey, id string) (*ClusterApiResource, error) + GetClusterStatus(ctx context.Context, apiKey, id string) (*ClusterStatusApiResource, error) + GetClusterUpgrade(ctx context.Context, apiKey, id string) (*ClusterUpgradeApiResource, error) + UpgradeCluster(ctx context.Context, apiKey, id string, clusterRequestPayload *PostClustersUpgradeRequestPayload) (*ClusterUpgradeApiResource, error) + UpgradeClusterHA(ctx context.Context, apiKey, id, action string) (*ClusterUpgradeApiResource, error) + UpdateCluster(ctx context.Context, apiKey, id string, clusterRequestPayload *PatchClustersRequestPayload) (*ClusterApiResource, error) + GetClusterRole(ctx context.Context, apiKey, clusterId, roleName string) (*ClusterRoleApiResource, error) +} + +type Client struct { + http.Client + wait.Backoff + + BaseURL url.URL + Version string +} + +// BRIDGE API RESPONSE OBJECTS + +// ClusterApiResource is used to hold cluster information received in Bridge API response. +type ClusterApiResource struct { + ID string `json:"id,omitempty"` + ClusterGroup *ClusterGroupApiResource `json:"cluster_group,omitempty"` + PrimaryClusterID string `json:"cluster_id,omitempty"` + CPU int64 `json:"cpu,omitempty"` + CreatedAt string `json:"created_at,omitempty"` + DiskUsage *ClusterDiskUsageApiResource `json:"disk_usage,omitempty"` + Environment string `json:"environment,omitempty"` + Host string `json:"host,omitempty"` + IsHA *bool `json:"is_ha,omitempty"` + IsProtected *bool `json:"is_protected,omitempty"` + IsSuspended *bool `json:"is_suspended,omitempty"` + Keychain string `json:"keychain_id,omitempty"` + MaintenanceWindowStart int64 `json:"maintenance_window_start,omitempty"` + MajorVersion int `json:"major_version,omitempty"` + Memory float64 `json:"memory,omitempty"` + ClusterName string `json:"name,omitempty"` + Network string `json:"network_id,omitempty"` + Parent string `json:"parent_id,omitempty"` + Plan string `json:"plan_id,omitempty"` + PostgresVersion intstr.IntOrString `json:"postgres_version_id,omitempty"` + Provider string `json:"provider_id,omitempty"` + Region string `json:"region_id,omitempty"` + Replicas []*ClusterApiResource `json:"replicas,omitempty"` + Storage int64 `json:"storage,omitempty"` + Tailscale *bool `json:"tailscale_active,omitempty"` + Team string `json:"team_id,omitempty"` + LastUpdate string `json:"updated_at,omitempty"` + ResponsePayload v1beta1.SchemalessObject `json:""` +} + +func (c *ClusterApiResource) AddDataToClusterStatus(cluster *v1beta1.CrunchyBridgeCluster) { + cluster.Status.ClusterName = c.ClusterName + cluster.Status.Host = c.Host + cluster.Status.ID = c.ID + cluster.Status.IsHA = c.IsHA + cluster.Status.IsProtected = c.IsProtected + cluster.Status.MajorVersion = c.MajorVersion + cluster.Status.Plan = c.Plan + cluster.Status.Storage = FromGibibytes(c.Storage) + cluster.Status.Responses.Cluster = c.ResponsePayload +} + +type ClusterList struct { + Clusters []*ClusterApiResource `json:"clusters"` +} + +// ClusterDiskUsageApiResource hold information on disk usage for a particular cluster. +type ClusterDiskUsageApiResource struct { + DiskAvailableMB int64 `json:"disk_available_mb,omitempty"` + DiskTotalSizeMB int64 `json:"disk_total_size_mb,omitempty"` + DiskUsedMB int64 `json:"disk_used_mb,omitempty"` +} + +// ClusterGroupApiResource holds information on a ClusterGroup +type ClusterGroupApiResource struct { + ID string `json:"id,omitempty"` + Clusters []*ClusterApiResource `json:"clusters,omitempty"` + Kind string `json:"kind,omitempty"` + Name string `json:"name,omitempty"` + Network string `json:"network_id,omitempty"` + Provider string `json:"provider_id,omitempty"` + Region string `json:"region_id,omitempty"` + Team string `json:"team_id,omitempty"` +} + +type ClusterStatusApiResource struct { + DiskUsage *ClusterDiskUsageApiResource `json:"disk_usage,omitempty"` + OldestBackup string `json:"oldest_backup_at,omitempty"` + OngoingUpgrade *ClusterUpgradeApiResource `json:"ongoing_upgrade,omitempty"` + State string `json:"state,omitempty"` + ResponsePayload v1beta1.SchemalessObject `json:""` +} + +func (c *ClusterStatusApiResource) AddDataToClusterStatus(cluster *v1beta1.CrunchyBridgeCluster) { + cluster.Status.State = c.State + cluster.Status.Responses.Status = c.ResponsePayload +} + +type ClusterUpgradeApiResource struct { + ClusterID string `json:"cluster_id,omitempty"` + Operations []*v1beta1.UpgradeOperation `json:"operations,omitempty"` + Team string `json:"team_id,omitempty"` + ResponsePayload v1beta1.SchemalessObject `json:""` +} + +func (c *ClusterUpgradeApiResource) AddDataToClusterStatus(cluster *v1beta1.CrunchyBridgeCluster) { + cluster.Status.OngoingUpgrade = c.Operations + cluster.Status.Responses.Upgrade = c.ResponsePayload +} + +type ClusterUpgradeOperationApiResource struct { + Flavor string `json:"flavor,omitempty"` + StartingFrom string `json:"starting_from,omitempty"` + State string `json:"state,omitempty"` +} + +// ClusterRoleApiResource is used for retrieving details on ClusterRole from the Bridge API +type ClusterRoleApiResource struct { + AccountEmail string `json:"account_email"` + AccountId string `json:"account_id"` + ClusterId string `json:"cluster_id"` + Flavor string `json:"flavor"` + Name string `json:"name"` + Password string `json:"password"` + Team string `json:"team_id"` + URI string `json:"uri"` +} + +// ClusterRoleList holds a slice of ClusterRoleApiResource +type ClusterRoleList struct { + Roles []*ClusterRoleApiResource `json:"roles"` +} + +// BRIDGE API REQUEST PAYLOADS + +// PatchClustersRequestPayload is used for updating various properties of an existing cluster. +type PatchClustersRequestPayload struct { + ClusterGroup string `json:"cluster_group_id,omitempty"` + // DashboardSettings *ClusterDashboardSettings `json:"dashboard_settings,omitempty"` + // TODO (dsessler7): Find docs for DashboardSettings and create appropriate struct + Environment string `json:"environment,omitempty"` + IsProtected *bool `json:"is_protected,omitempty"` + MaintenanceWindowStart int64 `json:"maintenance_window_start,omitempty"` + Name string `json:"name,omitempty"` +} + +// PostClustersRequestPayload is used for creating a new cluster. +type PostClustersRequestPayload struct { + Name string `json:"name"` + Plan string `json:"plan_id"` + Team string `json:"team_id"` + ClusterGroup string `json:"cluster_group_id,omitempty"` + Environment string `json:"environment,omitempty"` + IsHA bool `json:"is_ha,omitempty"` + Keychain string `json:"keychain_id,omitempty"` + Network string `json:"network_id,omitempty"` + PostgresVersion intstr.IntOrString `json:"postgres_version_id,omitempty"` + Provider string `json:"provider_id,omitempty"` + Region string `json:"region_id,omitempty"` + Storage int64 `json:"storage,omitempty"` +} + +// PostClustersUpgradeRequestPayload is used for creating a new cluster upgrade which may include +// changing its plan, upgrading its major version, or increasing its storage size. +type PostClustersUpgradeRequestPayload struct { + Plan string `json:"plan_id,omitempty"` + PostgresVersion intstr.IntOrString `json:"postgres_version_id,omitempty"` + UpgradeStartTime string `json:"starting_from,omitempty"` + Storage int64 `json:"storage,omitempty"` +} + +// PutClustersUpgradeRequestPayload is used for updating an ongoing or scheduled upgrade. +// TODO: Implement the ability to update an upgrade (this isn't currently being used) +type PutClustersUpgradeRequestPayload struct { + Plan string `json:"plan_id,omitempty"` + PostgresVersion intstr.IntOrString `json:"postgres_version_id,omitempty"` + UpgradeStartTime string `json:"starting_from,omitempty"` + Storage int64 `json:"storage,omitempty"` + UseMaintenanceWindow *bool `json:"use_cluster_maintenance_window,omitempty"` +} + +// BRIDGE CLIENT FUNCTIONS AND METHODS + +// NewClient creates a Client with backoff settings that amount to +// ~10 attempts over ~2 minutes. A default is used when apiURL is not +// an acceptable URL. +func NewClient(apiURL, version string) *Client { + // Use the default URL when the argument (1) does not parse at all, or + // (2) has the wrong scheme, or (3) has no hostname. + base, err := url.Parse(apiURL) + if err != nil || (base.Scheme != "http" && base.Scheme != "https") || base.Hostname() == "" { + base, _ = url.Parse(defaultAPI) + } + + return &Client{ + Backoff: wait.Backoff{ + Duration: time.Second, + Factor: 1.6, + Jitter: 0.2, + Steps: 10, + Cap: time.Minute, + }, + BaseURL: *base, + Version: version, + } +} + +// doWithBackoff performs HTTP requests until: +// 1. ctx is cancelled, +// 2. the server returns a status code below 500, "Internal Server Error", or +// 3. the backoff is exhausted. +// +// Be sure to close the [http.Response] Body when the returned error is nil. +// See [http.Client.Do] for more details. +func (c *Client) doWithBackoff( + ctx context.Context, method, path string, params url.Values, body []byte, headers http.Header, +) ( + *http.Response, error, +) { + var response *http.Response + + // Prepare a copy of the passed in headers so we can manipulate them. + if headers = headers.Clone(); headers == nil { + headers = make(http.Header) + } + + // Send a value that identifies this PATCH or POST request so it is safe to + // retry when the server does not respond. + // - https://docs.crunchybridge.com/api-concepts/idempotency/ + if method == http.MethodPatch || method == http.MethodPost { + headers.Set("Idempotency-Key", string(uuid.NewUUID())) + } + + headers.Set("User-Agent", "PGO/"+c.Version) + url := c.BaseURL.JoinPath(path) + if params != nil { + url.RawQuery = params.Encode() + } + urlString := url.String() + + err := wait.ExponentialBackoff(c.Backoff, func() (bool, error) { + // NOTE: The [net/http] package treats an empty [bytes.Reader] the same as nil. + request, err := http.NewRequestWithContext(ctx, method, urlString, bytes.NewReader(body)) + + if err == nil { + request.Header = headers.Clone() + + //nolint:bodyclose // This response is returned to the caller. + response, err = c.Client.Do(request) + } + + // An error indicates there was no response from the server, and the + // request may not have finished. The "Idempotency-Key" header above + // makes it safe to retry in this case. + finished := err == nil + + // When the request finishes with a server error, discard the body and retry. + // - https://docs.crunchybridge.com/api-concepts/getting-started/#status-codes + if finished && response.StatusCode >= 500 { + _ = response.Body.Close() + finished = false + } + + // Stop when the context is cancelled. + return finished, ctx.Err() + }) + + // Discard the response body when there is a timeout from backoff. + if response != nil && err != nil { + _ = response.Body.Close() + } + + // Return the last response, if any. + // Return the cancellation or timeout from backoff, if any. + return response, err +} + +// doWithRetry performs HTTP requests until: +// 1. ctx is cancelled, +// 2. the server returns a status code below 500, "Internal Server Error", +// that is not 429, "Too many requests", or +// 3. the backoff is exhausted. +// +// Be sure to close the [http.Response] Body when the returned error is nil. +// See [http.Client.Do] for more details. +func (c *Client) doWithRetry( + ctx context.Context, method, path string, params url.Values, body []byte, headers http.Header, +) ( + *http.Response, error, +) { + response, err := c.doWithBackoff(ctx, method, path, params, body, headers) + + // Retry the request when the server responds with "Too many requests". + // - https://docs.crunchybridge.com/api-concepts/getting-started/#status-codes + // - https://docs.crunchybridge.com/api-concepts/getting-started/#rate-limiting + for err == nil && response.StatusCode == 429 { + seconds, _ := strconv.Atoi(response.Header.Get("Retry-After")) + + // Only retry when the response indicates how long to wait. + if seconds <= 0 { + break + } + + // Discard the "Too many requests" response body, and retry. + _ = response.Body.Close() + + // Create a channel that sends after the delay indicated by the API. + timer := time.NewTimer(time.Duration(seconds) * time.Second) + defer timer.Stop() + + // Wait for the delay or context cancellation, whichever comes first. + select { + case <-timer.C: + // Try the request again. Check it in the loop condition. + response, err = c.doWithBackoff(ctx, method, path, params, body, headers) + timer.Stop() + + case <-ctx.Done(): + // Exit the loop and return the context cancellation. + err = ctx.Err() + } + } + + return response, err +} + +func (c *Client) CreateAuthObject(ctx context.Context, authn AuthObject) (AuthObject, error) { + var result AuthObject + + response, err := c.doWithRetry(ctx, "POST", "/vendor/operator/auth-objects", nil, nil, http.Header{ + "Accept": []string{"application/json"}, + "Authorization": []string{"Bearer " + authn.Secret}, + }) + + if err == nil { + defer response.Body.Close() + body, _ := io.ReadAll(response.Body) + + switch { + // 2xx, Successful + case response.StatusCode >= 200 && response.StatusCode < 300: + if err = json.Unmarshal(body, &result); err != nil { + err = fmt.Errorf("%w: %s", err, body) + } + + // 401, Unauthorized + case response.StatusCode == 401: + err = fmt.Errorf("%w: %s", errAuthentication, body) + + default: + //nolint:goerr113 // This is intentionally dynamic. + err = fmt.Errorf("%v: %s", response.Status, body) + } + } + + return result, err +} + +func (c *Client) CreateInstallation(ctx context.Context) (Installation, error) { + var result Installation + + response, err := c.doWithRetry(ctx, "POST", "/vendor/operator/installations", nil, nil, http.Header{ + "Accept": []string{"application/json"}, + }) + + if err == nil { + defer response.Body.Close() + body, _ := io.ReadAll(response.Body) + + switch { + // 2xx, Successful + case response.StatusCode >= 200 && response.StatusCode < 300: + if err = json.Unmarshal(body, &result); err != nil { + err = fmt.Errorf("%w: %s", err, body) + } + + default: + //nolint:goerr113 // This is intentionally dynamic. + err = fmt.Errorf("%v: %s", response.Status, body) + } + } + + return result, err +} + +// CRUNCHYBRIDGECLUSTER CRUD METHODS + +// ListClusters makes a GET request to the "/clusters" endpoint to retrieve a list of all clusters +// in Bridge that are owned by the team specified by the provided team id. +func (c *Client) ListClusters(ctx context.Context, apiKey, teamId string) ([]*ClusterApiResource, error) { + result := &ClusterList{} + + params := url.Values{} + if len(teamId) > 0 { + params.Add("team_id", teamId) + } + response, err := c.doWithRetry(ctx, "GET", "/clusters", params, nil, http.Header{ + "Accept": []string{"application/json"}, + "Authorization": []string{"Bearer " + apiKey}, + }) + + if err == nil { + defer response.Body.Close() + body, _ := io.ReadAll(response.Body) + + switch { + // 2xx, Successful + case response.StatusCode >= 200 && response.StatusCode < 300: + if err = json.Unmarshal(body, &result); err != nil { + err = fmt.Errorf("%w: %s", err, body) + } + + default: + //nolint:goerr113 // This is intentionally dynamic. + err = fmt.Errorf("%v: %s", response.Status, body) + } + } + + return result.Clusters, err +} + +// CreateCluster makes a POST request to the "/clusters" endpoint thereby creating a cluster +// in Bridge with the settings specified in the request payload. +func (c *Client) CreateCluster( + ctx context.Context, apiKey string, clusterRequestPayload *PostClustersRequestPayload, +) (*ClusterApiResource, error) { + result := &ClusterApiResource{} + + clusterbyte, err := json.Marshal(clusterRequestPayload) + if err != nil { + return result, err + } + + response, err := c.doWithRetry(ctx, "POST", "/clusters", nil, clusterbyte, http.Header{ + "Accept": []string{"application/json"}, + "Authorization": []string{"Bearer " + apiKey}, + }) + + if err == nil { + defer response.Body.Close() + body, _ := io.ReadAll(response.Body) + + switch { + // 2xx, Successful + case response.StatusCode >= 200 && response.StatusCode < 300: + if err = json.Unmarshal(body, &result); err != nil { + err = fmt.Errorf("%w: %s", err, body) + return result, err + } + if err = json.Unmarshal(body, &result.ResponsePayload); err != nil { + err = fmt.Errorf("%w: %s", err, body) + } + + default: + //nolint:goerr113 // This is intentionally dynamic. + err = fmt.Errorf("%v: %s", response.Status, body) + } + } + + return result, err +} + +// DeleteCluster calls the delete endpoint, returning +// +// the cluster, +// whether the cluster is deleted already, +// and an error. +func (c *Client) DeleteCluster(ctx context.Context, apiKey, id string) (*ClusterApiResource, bool, error) { + result := &ClusterApiResource{} + var deletedAlready bool + + response, err := c.doWithRetry(ctx, "DELETE", "/clusters/"+id, nil, nil, http.Header{ + "Accept": []string{"application/json"}, + "Authorization": []string{"Bearer " + apiKey}, + }) + + if err == nil { + defer response.Body.Close() + body, _ := io.ReadAll(response.Body) + + switch { + // 2xx, Successful + case response.StatusCode >= 200 && response.StatusCode < 300: + if err = json.Unmarshal(body, &result); err != nil { + err = fmt.Errorf("%w: %s", err, body) + } + + // Already deleted + // Bridge API returns 410 Gone for previously deleted clusters + // --https://docs.crunchybridge.com/api-concepts/idempotency#delete-semantics + // But also, if we can't find it... + // Maybe if no ID we return already deleted? + case response.StatusCode == 410: + fallthrough + case response.StatusCode == 404: + deletedAlready = true + err = nil + + default: + //nolint:goerr113 // This is intentionally dynamic. + err = fmt.Errorf("%v: %s", response.Status, body) + } + } + + return result, deletedAlready, err +} + +// GetCluster makes a GET request to the "/clusters/" endpoint, thereby retrieving details +// for a given cluster in Bridge specified by the provided cluster id. +func (c *Client) GetCluster(ctx context.Context, apiKey, id string) (*ClusterApiResource, error) { + result := &ClusterApiResource{} + + response, err := c.doWithRetry(ctx, "GET", "/clusters/"+id, nil, nil, http.Header{ + "Accept": []string{"application/json"}, + "Authorization": []string{"Bearer " + apiKey}, + }) + + if err == nil { + defer response.Body.Close() + body, _ := io.ReadAll(response.Body) + + switch { + // 2xx, Successful + case response.StatusCode >= 200 && response.StatusCode < 300: + if err = json.Unmarshal(body, &result); err != nil { + err = fmt.Errorf("%w: %s", err, body) + return result, err + } + if err = json.Unmarshal(body, &result.ResponsePayload); err != nil { + err = fmt.Errorf("%w: %s", err, body) + } + + default: + //nolint:goerr113 // This is intentionally dynamic. + err = fmt.Errorf("%v: %s", response.Status, body) + } + } + + return result, err +} + +// GetClusterStatus makes a GET request to the "/clusters//status" endpoint, thereby retrieving details +// for a given cluster's status in Bridge, specified by the provided cluster id. +func (c *Client) GetClusterStatus(ctx context.Context, apiKey, id string) (*ClusterStatusApiResource, error) { + result := &ClusterStatusApiResource{} + + response, err := c.doWithRetry(ctx, "GET", "/clusters/"+id+"/status", nil, nil, http.Header{ + "Accept": []string{"application/json"}, + "Authorization": []string{"Bearer " + apiKey}, + }) + + if err == nil { + defer response.Body.Close() + body, _ := io.ReadAll(response.Body) + + switch { + // 2xx, Successful + case response.StatusCode >= 200 && response.StatusCode < 300: + if err = json.Unmarshal(body, &result); err != nil { + err = fmt.Errorf("%w: %s", err, body) + return result, err + } + if err = json.Unmarshal(body, &result.ResponsePayload); err != nil { + err = fmt.Errorf("%w: %s", err, body) + } + + default: + //nolint:goerr113 // This is intentionally dynamic. + err = fmt.Errorf("%v: %s", response.Status, body) + } + } + + return result, err +} + +// GetClusterUpgrade makes a GET request to the "/clusters//upgrade" endpoint, thereby retrieving details +// for a given cluster's upgrade status in Bridge, specified by the provided cluster id. +func (c *Client) GetClusterUpgrade(ctx context.Context, apiKey, id string) (*ClusterUpgradeApiResource, error) { + result := &ClusterUpgradeApiResource{} + + response, err := c.doWithRetry(ctx, "GET", "/clusters/"+id+"/upgrade", nil, nil, http.Header{ + "Accept": []string{"application/json"}, + "Authorization": []string{"Bearer " + apiKey}, + }) + + if err == nil { + defer response.Body.Close() + body, _ := io.ReadAll(response.Body) + + switch { + // 2xx, Successful + case response.StatusCode >= 200 && response.StatusCode < 300: + if err = json.Unmarshal(body, &result); err != nil { + err = fmt.Errorf("%w: %s", err, body) + return result, err + } + if err = json.Unmarshal(body, &result.ResponsePayload); err != nil { + err = fmt.Errorf("%w: %s", err, body) + } + + default: + //nolint:goerr113 // This is intentionally dynamic. + err = fmt.Errorf("%v: %s", response.Status, body) + } + } + + return result, err +} + +// UpgradeCluster makes a POST request to the "/clusters//upgrade" endpoint, thereby attempting +// to upgrade certain settings for a given cluster in Bridge. +func (c *Client) UpgradeCluster( + ctx context.Context, apiKey, id string, clusterRequestPayload *PostClustersUpgradeRequestPayload, +) (*ClusterUpgradeApiResource, error) { + result := &ClusterUpgradeApiResource{} + + clusterbyte, err := json.Marshal(clusterRequestPayload) + if err != nil { + return result, err + } + + response, err := c.doWithRetry(ctx, "POST", "/clusters/"+id+"/upgrade", nil, clusterbyte, http.Header{ + "Accept": []string{"application/json"}, + "Authorization": []string{"Bearer " + apiKey}, + }) + + if err == nil { + defer response.Body.Close() + body, _ := io.ReadAll(response.Body) + + switch { + // 2xx, Successful + case response.StatusCode >= 200 && response.StatusCode < 300: + if err = json.Unmarshal(body, &result); err != nil { + err = fmt.Errorf("%w: %s", err, body) + return result, err + } + if err = json.Unmarshal(body, &result.ResponsePayload); err != nil { + err = fmt.Errorf("%w: %s", err, body) + } + + default: + //nolint:goerr113 // This is intentionally dynamic. + err = fmt.Errorf("%v: %s", response.Status, body) + } + } + + return result, err +} + +// UpgradeClusterHA makes a PUT request to the "/clusters//actions/" endpoint, +// where is either "enable-ha" or "disable-ha", thereby attempting to change the +// HA setting for a given cluster in Bridge. +func (c *Client) UpgradeClusterHA(ctx context.Context, apiKey, id, action string) (*ClusterUpgradeApiResource, error) { + result := &ClusterUpgradeApiResource{} + + response, err := c.doWithRetry(ctx, "PUT", "/clusters/"+id+"/actions/"+action, nil, nil, http.Header{ + "Accept": []string{"application/json"}, + "Authorization": []string{"Bearer " + apiKey}, + }) + + if err == nil { + defer response.Body.Close() + body, _ := io.ReadAll(response.Body) + + switch { + // 2xx, Successful + case response.StatusCode >= 200 && response.StatusCode < 300: + if err = json.Unmarshal(body, &result); err != nil { + err = fmt.Errorf("%w: %s", err, body) + return result, err + } + if err = json.Unmarshal(body, &result.ResponsePayload); err != nil { + err = fmt.Errorf("%w: %s", err, body) + } + + default: + //nolint:goerr113 // This is intentionally dynamic. + err = fmt.Errorf("%v: %s", response.Status, body) + } + } + + return result, err +} + +// UpdateCluster makes a PATCH request to the "/clusters/" endpoint, thereby attempting to +// update certain settings for a given cluster in Bridge. +func (c *Client) UpdateCluster( + ctx context.Context, apiKey, id string, clusterRequestPayload *PatchClustersRequestPayload, +) (*ClusterApiResource, error) { + result := &ClusterApiResource{} + + clusterbyte, err := json.Marshal(clusterRequestPayload) + if err != nil { + return result, err + } + + response, err := c.doWithRetry(ctx, "PATCH", "/clusters/"+id, nil, clusterbyte, http.Header{ + "Accept": []string{"application/json"}, + "Authorization": []string{"Bearer " + apiKey}, + }) + + if err == nil { + defer response.Body.Close() + body, _ := io.ReadAll(response.Body) + + switch { + // 2xx, Successful + case response.StatusCode >= 200 && response.StatusCode < 300: + if err = json.Unmarshal(body, &result); err != nil { + err = fmt.Errorf("%w: %s", err, body) + return result, err + } + if err = json.Unmarshal(body, &result.ResponsePayload); err != nil { + err = fmt.Errorf("%w: %s", err, body) + } + + default: + //nolint:goerr113 // This is intentionally dynamic. + err = fmt.Errorf("%v: %s", response.Status, body) + } + } + + return result, err +} + +// GetClusterRole sends a GET request to the "/clusters//roles/" endpoint, thereby retrieving +// Role information for a specific role from a specific cluster in Bridge. +func (c *Client) GetClusterRole(ctx context.Context, apiKey, clusterId, roleName string) (*ClusterRoleApiResource, error) { + result := &ClusterRoleApiResource{} + + response, err := c.doWithRetry(ctx, "GET", "/clusters/"+clusterId+"/roles/"+roleName, nil, nil, http.Header{ + "Accept": []string{"application/json"}, + "Authorization": []string{"Bearer " + apiKey}, + }) + + if err == nil { + defer response.Body.Close() + body, _ := io.ReadAll(response.Body) + + switch { + // 2xx, Successful + case response.StatusCode >= 200 && response.StatusCode < 300: + if err = json.Unmarshal(body, &result); err != nil { + err = fmt.Errorf("%w: %s", err, body) + } + + default: + //nolint:goerr113 // This is intentionally dynamic. + err = fmt.Errorf("%v: %s", response.Status, body) + } + } + + return result, err +} + +// ListClusterRoles sends a GET request to the "/clusters//roles" endpoint thereby retrieving +// a list of all cluster roles for a specific cluster in Bridge. +func (c *Client) ListClusterRoles(ctx context.Context, apiKey, id string) ([]*ClusterRoleApiResource, error) { + result := ClusterRoleList{} + + response, err := c.doWithRetry(ctx, "GET", "/clusters/"+id+"/roles", nil, nil, http.Header{ + "Accept": []string{"application/json"}, + "Authorization": []string{"Bearer " + apiKey}, + }) + + if err == nil { + defer response.Body.Close() + body, _ := io.ReadAll(response.Body) + + switch { + // 2xx, Successful + case response.StatusCode >= 200 && response.StatusCode < 300: + if err = json.Unmarshal(body, &result); err != nil { + err = fmt.Errorf("%w: %s", err, body) + } + + default: + //nolint:goerr113 // This is intentionally dynamic. + err = fmt.Errorf("%v: %s", response.Status, body) + } + } + + return result.Roles, err +} diff --git a/internal/bridge/client_test.go b/internal/bridge/client_test.go new file mode 100644 index 0000000000..28728c701c --- /dev/null +++ b/internal/bridge/client_test.go @@ -0,0 +1,1355 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package bridge + +import ( + "context" + "encoding/json" + "io" + "net/http" + "net/http/httptest" + "net/url" + "testing" + "time" + + gocmp "github.com/google/go-cmp/cmp" + gocmpopts "github.com/google/go-cmp/cmp/cmpopts" + "gotest.tools/v3/assert" + "k8s.io/apimachinery/pkg/util/intstr" + + "github.com/crunchydata/postgres-operator/internal/initialize" +) + +var testApiKey = "9012" +var testTeamId = "5678" + +// TestClientBackoff logs the backoff timing chosen by [NewClient] for use +// with `go test -v`. +func TestClientBackoff(t *testing.T) { + client := NewClient("", "") + var total time.Duration + + for i := 1; i <= 50 && client.Backoff.Steps > 0; i++ { + step := client.Backoff.Step() + total += step + + t.Logf("%02d:%20v%20v", i, step, total) + } +} + +func TestClientURL(t *testing.T) { + assert.Equal(t, defaultAPI, NewClient("", "").BaseURL.String(), + "expected the API constant to parse correctly") + + assert.Equal(t, defaultAPI, NewClient("/path", "").BaseURL.String()) + assert.Equal(t, defaultAPI, NewClient("http://:9999", "").BaseURL.String()) + assert.Equal(t, defaultAPI, NewClient("postgres://localhost", "").BaseURL.String()) + assert.Equal(t, defaultAPI, NewClient("postgres://localhost:5432", "").BaseURL.String()) + + assert.Equal(t, + "http://localhost:12345", NewClient("http://localhost:12345", "").BaseURL.String()) +} + +func TestClientDoWithBackoff(t *testing.T) { + t.Run("Arguments", func(t *testing.T) { + var bodies []string + var requests []http.Request + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + body, _ := io.ReadAll(r.Body) + bodies = append(bodies, string(body)) + requests = append(requests, *r) + + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`some-response`)) + })) + t.Cleanup(server.Close) + + // Client with one attempt, i.e. no backoff. + client := NewClient(server.URL, "xyz") + client.Backoff.Steps = 1 + assert.Equal(t, client.BaseURL.String(), server.URL) + + ctx := context.Background() + params := url.Values{} + params.Add("foo", "bar") + response, err := client.doWithBackoff(ctx, + "ANY", "/some/path", params, []byte(`the-body`), + http.Header{"Some": []string{"header"}}) + + assert.NilError(t, err) + assert.Assert(t, response != nil) + t.Cleanup(func() { _ = response.Body.Close() }) + + // Arguments became Request fields, including the client version. + assert.Equal(t, len(requests), 1) + assert.Equal(t, bodies[0], "the-body") + assert.Equal(t, requests[0].Method, "ANY") + assert.Equal(t, requests[0].URL.String(), "/some/path?foo=bar") + assert.DeepEqual(t, requests[0].Header.Values("Some"), []string{"header"}) + assert.DeepEqual(t, requests[0].Header.Values("User-Agent"), []string{"PGO/xyz"}) + + body, _ := io.ReadAll(response.Body) + assert.Equal(t, string(body), "some-response") + }) + + t.Run("Idempotency", func(t *testing.T) { + var bodies []string + var requests []http.Request + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + body, _ := io.ReadAll(r.Body) + bodies = append(bodies, string(body)) + requests = append(requests, *r) + + switch len(requests) { + case 1, 2: + w.WriteHeader(http.StatusBadGateway) + default: + w.WriteHeader(http.StatusNotAcceptable) + } + })) + t.Cleanup(server.Close) + + // Client with brief backoff. + client := NewClient(server.URL, "") + client.Backoff.Duration = time.Millisecond + client.Backoff.Steps = 5 + assert.Equal(t, client.BaseURL.String(), server.URL) + + ctx := context.Background() + response, err := client.doWithBackoff(ctx, + "POST", "/anything", nil, []byte(`any-body`), + http.Header{"Any": []string{"thing"}}) + + assert.NilError(t, err) + assert.Assert(t, response != nil) + assert.NilError(t, response.Body.Close()) + + assert.Equal(t, len(requests), 3, "expected multiple requests") + + // Headers include an Idempotency-Key. + assert.Equal(t, bodies[0], "any-body") + assert.Equal(t, requests[0].Header.Get("Any"), "thing") + assert.Assert(t, requests[0].Header.Get("Idempotency-Key") != "") + + // Requests are identical, including the Idempotency-Key. + assert.Equal(t, bodies[0], bodies[1]) + assert.DeepEqual(t, requests[0], requests[1], + gocmpopts.IgnoreFields(http.Request{}, "Body"), + gocmpopts.IgnoreUnexported(http.Request{})) + + assert.Equal(t, bodies[1], bodies[2]) + assert.DeepEqual(t, requests[1], requests[2], + gocmpopts.IgnoreFields(http.Request{}, "Body"), + gocmpopts.IgnoreUnexported(http.Request{})) + + // Another, identical request gets a new Idempotency-Key. + response, err = client.doWithBackoff(ctx, + "POST", "/anything", nil, []byte(`any-body`), + http.Header{"Any": []string{"thing"}}) + + assert.NilError(t, err) + assert.Assert(t, response != nil) + assert.NilError(t, response.Body.Close()) + + prior := requests[0].Header.Get("Idempotency-Key") + assert.Assert(t, len(requests) > 3) + assert.Assert(t, requests[3].Header.Get("Idempotency-Key") != "") + assert.Assert(t, requests[3].Header.Get("Idempotency-Key") != prior, + "expected a new idempotency key") + }) + + t.Run("Backoff", func(t *testing.T) { + requests := 0 + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + requests++ + w.WriteHeader(http.StatusInternalServerError) + })) + t.Cleanup(server.Close) + + // Client with brief backoff. + client := NewClient(server.URL, "") + client.Backoff.Duration = time.Millisecond + client.Backoff.Steps = 5 + assert.Equal(t, client.BaseURL.String(), server.URL) + + ctx := context.Background() + _, err := client.doWithBackoff(ctx, "POST", "/any", nil, nil, nil) //nolint:bodyclose + assert.ErrorContains(t, err, "timed out waiting") + assert.Assert(t, requests > 0, "expected multiple requests") + }) + + t.Run("Cancellation", func(t *testing.T) { + requests := 0 + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + requests++ + w.WriteHeader(http.StatusServiceUnavailable) + })) + t.Cleanup(server.Close) + + // Client with lots of brief backoff. + client := NewClient(server.URL, "") + client.Backoff.Duration = time.Millisecond + client.Backoff.Steps = 100 + assert.Equal(t, client.BaseURL.String(), server.URL) + + ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) + t.Cleanup(cancel) + + _, err := client.doWithBackoff(ctx, "POST", "/any", nil, nil, nil) //nolint:bodyclose + assert.ErrorIs(t, err, context.DeadlineExceeded) + assert.Assert(t, requests > 0, "expected multiple requests") + }) +} + +func TestClientDoWithRetry(t *testing.T) { + t.Run("Arguments", func(t *testing.T) { + var bodies []string + var requests []http.Request + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + body, _ := io.ReadAll(r.Body) + bodies = append(bodies, string(body)) + requests = append(requests, *r) + + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`some-response`)) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "xyz") + assert.Equal(t, client.BaseURL.String(), server.URL) + + ctx := context.Background() + params := url.Values{} + params.Add("foo", "bar") + response, err := client.doWithRetry(ctx, + "ANY", "/some/path", params, []byte(`the-body`), + http.Header{"Some": []string{"header"}}) + + assert.NilError(t, err) + assert.Assert(t, response != nil) + t.Cleanup(func() { _ = response.Body.Close() }) + + // Arguments became Request fields, including the client version. + assert.Equal(t, len(requests), 1) + assert.Equal(t, bodies[0], "the-body") + assert.Equal(t, requests[0].Method, "ANY") + assert.Equal(t, requests[0].URL.String(), "/some/path?foo=bar") + assert.DeepEqual(t, requests[0].Header.Values("Some"), []string{"header"}) + assert.DeepEqual(t, requests[0].Header.Values("User-Agent"), []string{"PGO/xyz"}) + + body, _ := io.ReadAll(response.Body) + assert.Equal(t, string(body), "some-response") + }) + + t.Run("Throttling", func(t *testing.T) { + var bodies []string + var requests []http.Request + var times []time.Time + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + body, _ := io.ReadAll(r.Body) + bodies = append(bodies, string(body)) + requests = append(requests, *r) + times = append(times, time.Now()) + + switch len(requests) { + case 1: + w.Header().Set("Retry-After", "1") + w.WriteHeader(http.StatusTooManyRequests) + default: + w.WriteHeader(http.StatusOK) + } + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + ctx := context.Background() + response, err := client.doWithRetry(ctx, + "POST", "/anything", nil, []byte(`any-body`), + http.Header{"Any": []string{"thing"}}) + + assert.NilError(t, err) + assert.Assert(t, response != nil) + assert.NilError(t, response.Body.Close()) + + assert.Equal(t, len(requests), 2, "expected multiple requests") + + // Headers include an Idempotency-Key. + assert.Equal(t, bodies[0], "any-body") + assert.Equal(t, requests[0].Header.Get("Any"), "thing") + assert.Assert(t, requests[0].Header.Get("Idempotency-Key") != "") + + // Requests are identical, except for the Idempotency-Key. + assert.Equal(t, bodies[0], bodies[1]) + assert.DeepEqual(t, requests[0], requests[1], + gocmpopts.IgnoreFields(http.Request{}, "Body"), + gocmpopts.IgnoreUnexported(http.Request{}), + gocmp.FilterPath( + func(p gocmp.Path) bool { return p.String() == "Header" }, + gocmpopts.IgnoreMapEntries( + func(k string, v []string) bool { return k == "Idempotency-Key" }, + ), + ), + ) + + prior := requests[0].Header.Get("Idempotency-Key") + assert.Assert(t, requests[1].Header.Get("Idempotency-Key") != "") + assert.Assert(t, requests[1].Header.Get("Idempotency-Key") != prior, + "expected a new idempotency key") + + // Requests are delayed according the server's response. + // TODO: Mock the clock for faster tests. + assert.Assert(t, times[0].Add(time.Second).Before(times[1]), + "expected the second request over 1sec after the first") + }) + + t.Run("Cancellation", func(t *testing.T) { + requests := 0 + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + requests++ + w.Header().Set("Retry-After", "5") + w.WriteHeader(http.StatusTooManyRequests) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) + t.Cleanup(cancel) + + start := time.Now() + _, err := client.doWithRetry(ctx, "POST", "/any", nil, nil, nil) //nolint:bodyclose + assert.ErrorIs(t, err, context.DeadlineExceeded) + assert.Assert(t, time.Since(start) < time.Second) + assert.Equal(t, requests, 1, "expected one request") + }) + + t.Run("UnexpectedResponse", func(t *testing.T) { + for _, tt := range []struct { + Name string + Send func(http.ResponseWriter) + Expect func(testing.TB, http.Response) + }{ + { + Name: "NoHeader", + Send: func(w http.ResponseWriter) { + w.WriteHeader(http.StatusTooManyRequests) + }, + Expect: func(t testing.TB, r http.Response) { + t.Helper() + assert.Equal(t, r.StatusCode, http.StatusTooManyRequests) + }, + }, + { + Name: "ZeroHeader", + Send: func(w http.ResponseWriter) { + w.Header().Set("Retry-After", "0") + w.WriteHeader(http.StatusTooManyRequests) + }, + Expect: func(t testing.TB, r http.Response) { + t.Helper() + assert.Equal(t, r.Header.Get("Retry-After"), "0") + assert.Equal(t, r.StatusCode, http.StatusTooManyRequests) + }, + }, + { + Name: "NegativeHeader", + Send: func(w http.ResponseWriter) { + w.Header().Set("Retry-After", "-10") + w.WriteHeader(http.StatusTooManyRequests) + }, + Expect: func(t testing.TB, r http.Response) { + t.Helper() + assert.Equal(t, r.Header.Get("Retry-After"), "-10") + assert.Equal(t, r.StatusCode, http.StatusTooManyRequests) + }, + }, + { + Name: "TextHeader", + Send: func(w http.ResponseWriter) { + w.Header().Set("Retry-After", "bogus") + w.WriteHeader(http.StatusTooManyRequests) + }, + Expect: func(t testing.TB, r http.Response) { + t.Helper() + assert.Equal(t, r.Header.Get("Retry-After"), "bogus") + assert.Equal(t, r.StatusCode, http.StatusTooManyRequests) + }, + }, + } { + t.Run(tt.Name, func(t *testing.T) { + requests := 0 + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + requests++ + tt.Send(w) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + ctx := context.Background() + response, err := client.doWithRetry(ctx, "POST", "/any", nil, nil, nil) + assert.NilError(t, err) + assert.Assert(t, response != nil) + t.Cleanup(func() { _ = response.Body.Close() }) + + tt.Expect(t, *response) + + assert.Equal(t, requests, 1, "expected no retries") + }) + } + }) +} + +func TestClientCreateAuthObject(t *testing.T) { + t.Run("Arguments", func(t *testing.T) { + var requests []http.Request + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + body, _ := io.ReadAll(r.Body) + assert.Equal(t, len(body), 0) + requests = append(requests, *r) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + ctx := context.Background() + _, _ = client.CreateAuthObject(ctx, AuthObject{Secret: "sesame"}) + + assert.Equal(t, len(requests), 1) + assert.Equal(t, requests[0].Header.Get("Authorization"), "Bearer sesame") + }) + + t.Run("Unauthorized", func(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusUnauthorized) + _, _ = w.Write([]byte(`some info`)) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err := client.CreateAuthObject(context.Background(), AuthObject{}) + assert.ErrorContains(t, err, "authentication") + assert.ErrorContains(t, err, "some info") + assert.ErrorIs(t, err, errAuthentication) + }) + + t.Run("ErrorResponse", func(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + _, _ = w.Write([]byte(`some message`)) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err := client.CreateAuthObject(context.Background(), AuthObject{}) + assert.ErrorContains(t, err, "404 Not Found") + assert.ErrorContains(t, err, "some message") + }) + + t.Run("NoResponseBody", func(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err := client.CreateAuthObject(context.Background(), AuthObject{}) + assert.ErrorContains(t, err, "unexpected end") + assert.ErrorContains(t, err, "JSON") + }) + + t.Run("ResponseNotJSON", func(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`asdf`)) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err := client.CreateAuthObject(context.Background(), AuthObject{}) + assert.ErrorContains(t, err, "invalid") + assert.ErrorContains(t, err, "asdf") + }) +} + +func TestClientCreateInstallation(t *testing.T) { + t.Run("ErrorResponse", func(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + _, _ = w.Write([]byte(`any content, any format`)) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err := client.CreateInstallation(context.Background()) + assert.ErrorContains(t, err, "404 Not Found") + assert.ErrorContains(t, err, "any content, any format") + }) + + t.Run("NoResponseBody", func(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err := client.CreateInstallation(context.Background()) + assert.ErrorContains(t, err, "unexpected end") + assert.ErrorContains(t, err, "JSON") + }) + + t.Run("ResponseNotJSON", func(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`asdf`)) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err := client.CreateInstallation(context.Background()) + assert.ErrorContains(t, err, "invalid") + assert.ErrorContains(t, err, "asdf") + }) +} + +func TestListClusters(t *testing.T) { + responsePayload := &ClusterList{ + Clusters: []*ClusterApiResource{}, + } + firstClusterApiResource := &ClusterApiResource{ + ID: "1234", + } + secondClusterApiResource := &ClusterApiResource{ + ID: "2345", + } + + t.Run("WeSendCorrectData", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(responsePayload) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, r.Method, "GET", "Expected GET method") + assert.Equal(t, r.URL.Path, "/clusters", "Expected path to be '/clusters'") + assert.Equal(t, r.Header.Get("Authorization"), "Bearer "+testApiKey, "Expected Authorization header to contain api key.") + assert.Equal(t, r.URL.Query()["team_id"][0], testTeamId, "Expected query params to contain team id.") + + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err = client.ListClusters(context.Background(), testApiKey, testTeamId) + assert.NilError(t, err) + }) + + t.Run("OkResponseNoClusters", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(responsePayload) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + clusters, err := client.ListClusters(context.Background(), testApiKey, testTeamId) + assert.NilError(t, err) + assert.Equal(t, len(clusters), 0) + }) + + t.Run("OkResponseOneCluster", func(t *testing.T) { + responsePayload.Clusters = append(responsePayload.Clusters, firstClusterApiResource) + responsePayloadJson, err := json.Marshal(responsePayload) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + clusters, err := client.ListClusters(context.Background(), testApiKey, testTeamId) + assert.NilError(t, err) + assert.Equal(t, len(clusters), 1) + assert.Equal(t, clusters[0].ID, responsePayload.Clusters[0].ID) + }) + + t.Run("OkResponseTwoClusters", func(t *testing.T) { + responsePayload.Clusters = append(responsePayload.Clusters, secondClusterApiResource) + responsePayloadJson, err := json.Marshal(responsePayload) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + clusters, err := client.ListClusters(context.Background(), testApiKey, testTeamId) + assert.NilError(t, err) + assert.Equal(t, len(clusters), 2) + }) + + t.Run("ErrorResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(responsePayload) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusBadRequest) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err = client.ListClusters(context.Background(), testApiKey, testTeamId) + assert.Check(t, err != nil) + assert.ErrorContains(t, err, "400 Bad Request") + }) +} + +func TestCreateCluster(t *testing.T) { + clusterApiResource := &ClusterApiResource{ + ClusterName: "test-cluster1", + } + clusterRequestPayload := &PostClustersRequestPayload{ + Name: "test-cluster1", + } + + t.Run("WeSendCorrectData", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var receivedPayload PostClustersRequestPayload + dec := json.NewDecoder(r.Body) + err = dec.Decode(&receivedPayload) + assert.NilError(t, err) + assert.Equal(t, r.Method, "POST", "Expected POST method") + assert.Equal(t, r.URL.Path, "/clusters", "Expected path to be '/clusters'") + assert.Equal(t, r.Header.Get("Authorization"), "Bearer "+testApiKey, "Expected Authorization header to contain api key.") + assert.Equal(t, receivedPayload, *clusterRequestPayload) + + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err = client.CreateCluster(context.Background(), testApiKey, clusterRequestPayload) + assert.NilError(t, err) + }) + + t.Run("OkResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + newCluster, err := client.CreateCluster(context.Background(), testApiKey, clusterRequestPayload) + assert.NilError(t, err) + assert.Equal(t, newCluster.ClusterName, clusterApiResource.ClusterName) + }) + + t.Run("ErrorResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusBadRequest) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err = client.CreateCluster(context.Background(), testApiKey, clusterRequestPayload) + assert.Check(t, err != nil) + assert.ErrorContains(t, err, "400 Bad Request") + }) +} + +func TestDeleteCluster(t *testing.T) { + clusterId := "1234" + clusterApiResource := &ClusterApiResource{ + ClusterName: "test-cluster1", + } + + t.Run("WeSendCorrectData", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, r.Method, "DELETE", "Expected DELETE method") + assert.Equal(t, r.URL.Path, "/clusters/"+clusterId, "Expected path to be /clusters/"+clusterId) + assert.Equal(t, r.Header.Get("Authorization"), "Bearer "+testApiKey, "Expected Authorization header to contain api key.") + + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, _, err = client.DeleteCluster(context.Background(), testApiKey, clusterId) + assert.NilError(t, err) + }) + + t.Run("OkResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + deletedCluster, deletedAlready, err := client.DeleteCluster(context.Background(), testApiKey, clusterId) + assert.NilError(t, err) + assert.Equal(t, deletedCluster.ClusterName, clusterApiResource.ClusterName) + assert.Equal(t, deletedAlready, false) + }) + + t.Run("GoneResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusGone) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, deletedAlready, err := client.DeleteCluster(context.Background(), testApiKey, clusterId) + assert.NilError(t, err) + assert.Equal(t, deletedAlready, true) + }) + + t.Run("NotFoundResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, deletedAlready, err := client.DeleteCluster(context.Background(), testApiKey, clusterId) + assert.NilError(t, err) + assert.Equal(t, deletedAlready, true) + }) + + t.Run("ErrorResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusBadRequest) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, _, err = client.DeleteCluster(context.Background(), testApiKey, clusterId) + assert.Check(t, err != nil) + assert.ErrorContains(t, err, "400 Bad Request") + }) +} + +func TestGetCluster(t *testing.T) { + clusterId := "1234" + clusterApiResource := &ClusterApiResource{ + ClusterName: "test-cluster1", + } + + t.Run("WeSendCorrectData", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, r.Method, "GET", "Expected GET method") + assert.Equal(t, r.URL.Path, "/clusters/"+clusterId, "Expected path to be /clusters/"+clusterId) + assert.Equal(t, r.Header.Get("Authorization"), "Bearer "+testApiKey, "Expected Authorization header to contain api key.") + + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err = client.GetCluster(context.Background(), testApiKey, clusterId) + assert.NilError(t, err) + }) + + t.Run("OkResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + cluster, err := client.GetCluster(context.Background(), testApiKey, clusterId) + assert.NilError(t, err) + assert.Equal(t, cluster.ClusterName, clusterApiResource.ClusterName) + }) + + t.Run("ErrorResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusBadRequest) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err = client.GetCluster(context.Background(), testApiKey, clusterId) + assert.Check(t, err != nil) + assert.ErrorContains(t, err, "400 Bad Request") + }) +} + +func TestGetClusterStatus(t *testing.T) { + clusterId := "1234" + state := "Ready" + + clusterStatusApiResource := &ClusterStatusApiResource{ + State: state, + } + + t.Run("WeSendCorrectData", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterStatusApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, r.Method, "GET", "Expected GET method") + assert.Equal(t, r.URL.Path, "/clusters/"+clusterId+"/status", "Expected path to be /clusters/"+clusterId+"/status") + assert.Equal(t, r.Header.Get("Authorization"), "Bearer "+testApiKey, "Expected Authorization header to contain api key.") + + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err = client.GetClusterStatus(context.Background(), testApiKey, clusterId) + assert.NilError(t, err) + }) + + t.Run("OkResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterStatusApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + clusterStatus, err := client.GetClusterStatus(context.Background(), testApiKey, clusterId) + assert.NilError(t, err) + assert.Equal(t, clusterStatus.State, state) + }) + + t.Run("ErrorResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterStatusApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusBadRequest) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err = client.GetClusterStatus(context.Background(), testApiKey, clusterId) + assert.Check(t, err != nil) + assert.ErrorContains(t, err, "400 Bad Request") + }) +} + +func TestGetClusterUpgrade(t *testing.T) { + clusterId := "1234" + clusterUpgradeApiResource := &ClusterUpgradeApiResource{ + ClusterID: clusterId, + } + + t.Run("WeSendCorrectData", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterUpgradeApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, r.Method, "GET", "Expected GET method") + assert.Equal(t, r.URL.Path, "/clusters/"+clusterId+"/upgrade", "Expected path to be /clusters/"+clusterId+"/upgrade") + assert.Equal(t, r.Header.Get("Authorization"), "Bearer "+testApiKey, "Expected Authorization header to contain api key.") + + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err = client.GetClusterUpgrade(context.Background(), testApiKey, clusterId) + assert.NilError(t, err) + }) + + t.Run("OkResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterUpgradeApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + clusterUpgrade, err := client.GetClusterUpgrade(context.Background(), testApiKey, clusterId) + assert.NilError(t, err) + assert.Equal(t, clusterUpgrade.ClusterID, clusterId) + }) + + t.Run("ErrorResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterUpgradeApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusBadRequest) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err = client.GetClusterUpgrade(context.Background(), testApiKey, clusterId) + assert.Check(t, err != nil) + assert.ErrorContains(t, err, "400 Bad Request") + }) +} + +func TestUpgradeCluster(t *testing.T) { + clusterId := "1234" + clusterUpgradeApiResource := &ClusterUpgradeApiResource{ + ClusterID: clusterId, + } + clusterUpgradeRequestPayload := &PostClustersUpgradeRequestPayload{ + Plan: "standard-8", + PostgresVersion: intstr.FromInt(15), + UpgradeStartTime: "start-time", + Storage: 10, + } + + t.Run("WeSendCorrectData", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterUpgradeApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var receivedPayload PostClustersUpgradeRequestPayload + dec := json.NewDecoder(r.Body) + err = dec.Decode(&receivedPayload) + assert.NilError(t, err) + assert.Equal(t, r.Method, "POST", "Expected POST method") + assert.Equal(t, r.URL.Path, "/clusters/"+clusterId+"/upgrade", "Expected path to be /clusters/"+clusterId+"/upgrade") + assert.Equal(t, r.Header.Get("Authorization"), "Bearer "+testApiKey, "Expected Authorization header to contain api key.") + assert.Equal(t, receivedPayload, *clusterUpgradeRequestPayload) + + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err = client.UpgradeCluster(context.Background(), testApiKey, clusterId, clusterUpgradeRequestPayload) + assert.NilError(t, err) + }) + + t.Run("OkResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterUpgradeApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + clusterUpgrade, err := client.UpgradeCluster(context.Background(), testApiKey, clusterId, clusterUpgradeRequestPayload) + assert.NilError(t, err) + assert.Equal(t, clusterUpgrade.ClusterID, clusterId) + }) + + t.Run("ErrorResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterUpgradeApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusBadRequest) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err = client.UpgradeCluster(context.Background(), testApiKey, clusterId, clusterUpgradeRequestPayload) + assert.Check(t, err != nil) + assert.ErrorContains(t, err, "400 Bad Request") + }) +} + +func TestUpgradeClusterHA(t *testing.T) { + clusterId := "1234" + action := "enable-ha" + clusterUpgradeApiResource := &ClusterUpgradeApiResource{ + ClusterID: clusterId, + } + + t.Run("WeSendCorrectData", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterUpgradeApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, r.Method, "PUT", "Expected PUT method") + assert.Equal(t, r.URL.Path, "/clusters/"+clusterId+"/actions/"+action, + "Expected path to be /clusters/"+clusterId+"/actions/"+action) + assert.Equal(t, r.Header.Get("Authorization"), "Bearer "+testApiKey, "Expected Authorization header to contain api key.") + + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err = client.UpgradeClusterHA(context.Background(), testApiKey, clusterId, action) + assert.NilError(t, err) + }) + + t.Run("OkResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterUpgradeApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + clusterUpgrade, err := client.UpgradeClusterHA(context.Background(), testApiKey, clusterId, action) + assert.NilError(t, err) + assert.Equal(t, clusterUpgrade.ClusterID, clusterId) + }) + + t.Run("ErrorResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterUpgradeApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusBadRequest) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err = client.UpgradeClusterHA(context.Background(), testApiKey, clusterId, action) + assert.Check(t, err != nil) + assert.ErrorContains(t, err, "400 Bad Request") + }) +} + +func TestUpdateCluster(t *testing.T) { + clusterId := "1234" + clusterApiResource := &ClusterApiResource{ + ClusterName: "new-cluster-name", + } + clusterUpdateRequestPayload := &PatchClustersRequestPayload{ + IsProtected: initialize.Bool(true), + } + + t.Run("WeSendCorrectData", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var receivedPayload PatchClustersRequestPayload + dec := json.NewDecoder(r.Body) + err = dec.Decode(&receivedPayload) + assert.NilError(t, err) + assert.Equal(t, r.Method, "PATCH", "Expected PATCH method") + assert.Equal(t, r.URL.Path, "/clusters/"+clusterId, "Expected path to be /clusters/"+clusterId) + assert.Equal(t, r.Header.Get("Authorization"), "Bearer "+testApiKey, "Expected Authorization header to contain api key.") + assert.Equal(t, *receivedPayload.IsProtected, *clusterUpdateRequestPayload.IsProtected) + + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err = client.UpdateCluster(context.Background(), testApiKey, clusterId, clusterUpdateRequestPayload) + assert.NilError(t, err) + }) + + t.Run("OkResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + clusterUpdate, err := client.UpdateCluster(context.Background(), testApiKey, clusterId, clusterUpdateRequestPayload) + assert.NilError(t, err) + assert.Equal(t, clusterUpdate.ClusterName, clusterApiResource.ClusterName) + }) + + t.Run("ErrorResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusBadRequest) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err = client.UpdateCluster(context.Background(), testApiKey, clusterId, clusterUpdateRequestPayload) + assert.Check(t, err != nil) + assert.ErrorContains(t, err, "400 Bad Request") + }) +} + +func TestGetClusterRole(t *testing.T) { + clusterId := "1234" + roleName := "application" + clusterRoleApiResource := &ClusterRoleApiResource{ + Name: roleName, + } + + t.Run("WeSendCorrectData", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterRoleApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, r.Method, "GET", "Expected GET method") + assert.Equal(t, r.URL.Path, "/clusters/"+clusterId+"/roles/"+roleName, + "Expected path to be /clusters/"+clusterId+"/roles/"+roleName) + assert.Equal(t, r.Header.Get("Authorization"), "Bearer "+testApiKey, "Expected Authorization header to contain api key.") + + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err = client.GetClusterRole(context.Background(), testApiKey, clusterId, roleName) + assert.NilError(t, err) + }) + + t.Run("OkResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterRoleApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + clusterRole, err := client.GetClusterRole(context.Background(), testApiKey, clusterId, roleName) + assert.NilError(t, err) + assert.Equal(t, clusterRole.Name, roleName) + }) + + t.Run("ErrorResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterRoleApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusBadRequest) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err = client.GetClusterRole(context.Background(), testApiKey, clusterId, roleName) + assert.Check(t, err != nil) + assert.ErrorContains(t, err, "400 Bad Request") + }) +} + +func TestListClusterRoles(t *testing.T) { + clusterId := "1234" + responsePayload := &ClusterRoleList{ + Roles: []*ClusterRoleApiResource{}, + } + applicationClusterRoleApiResource := &ClusterRoleApiResource{} + postgresClusterRoleApiResource := &ClusterRoleApiResource{} + + t.Run("WeSendCorrectData", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(responsePayload) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, r.Method, "GET", "Expected GET method") + assert.Equal(t, r.URL.Path, "/clusters/"+clusterId+"/roles", "Expected path to be '/clusters/%s/roles'") + assert.Equal(t, r.Header.Get("Authorization"), "Bearer "+testApiKey, "Expected Authorization header to contain api key.") + + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err = client.ListClusterRoles(context.Background(), testApiKey, clusterId) + assert.NilError(t, err) + }) + + t.Run("OkResponse", func(t *testing.T) { + responsePayload.Roles = append(responsePayload.Roles, applicationClusterRoleApiResource, postgresClusterRoleApiResource) + responsePayloadJson, err := json.Marshal(responsePayload) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + clusterRoles, err := client.ListClusterRoles(context.Background(), testApiKey, clusterId) + assert.NilError(t, err) + assert.Equal(t, len(clusterRoles), 2) + }) + + t.Run("ErrorResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(responsePayload) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusBadRequest) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err = client.ListClusterRoles(context.Background(), testApiKey, clusterId) + assert.Check(t, err != nil) + assert.ErrorContains(t, err, "400 Bad Request") + }) +} diff --git a/internal/bridge/crunchybridgecluster/apply.go b/internal/bridge/crunchybridgecluster/apply.go new file mode 100644 index 0000000000..d77d719d6a --- /dev/null +++ b/internal/bridge/crunchybridgecluster/apply.go @@ -0,0 +1,47 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package crunchybridgecluster + +import ( + "context" + "reflect" + + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// patch sends patch to object's endpoint in the Kubernetes API and updates +// object with any returned content. The fieldManager is set to r.Owner, but +// can be overridden in options. +// - https://docs.k8s.io/reference/using-api/server-side-apply/#managers +// +// NOTE: This function is duplicated from a version in the postgrescluster package +func (r *CrunchyBridgeClusterReconciler) patch( + ctx context.Context, object client.Object, + patch client.Patch, options ...client.PatchOption, +) error { + options = append([]client.PatchOption{r.Owner}, options...) + return r.Client.Patch(ctx, object, patch, options...) +} + +// apply sends an apply patch to object's endpoint in the Kubernetes API and +// updates object with any returned content. The fieldManager is set to +// r.Owner and the force parameter is true. +// - https://docs.k8s.io/reference/using-api/server-side-apply/#managers +// - https://docs.k8s.io/reference/using-api/server-side-apply/#conflicts +// +// NOTE: This function is duplicated from a version in the postgrescluster package +func (r *CrunchyBridgeClusterReconciler) apply(ctx context.Context, object client.Object) error { + // Generate an apply-patch by comparing the object to its zero value. + zero := reflect.New(reflect.TypeOf(object).Elem()).Interface() + data, err := client.MergeFrom(zero.(client.Object)).Data(object) + apply := client.RawPatch(client.Apply.Type(), data) + + // Send the apply-patch with force=true. + if err == nil { + err = r.patch(ctx, object, apply, client.ForceOwnership) + } + + return err +} diff --git a/internal/bridge/crunchybridgecluster/crunchybridgecluster_controller.go b/internal/bridge/crunchybridgecluster/crunchybridgecluster_controller.go new file mode 100644 index 0000000000..03d67442be --- /dev/null +++ b/internal/bridge/crunchybridgecluster/crunchybridgecluster_controller.go @@ -0,0 +1,701 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package crunchybridgecluster + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/event" + + "github.com/crunchydata/postgres-operator/internal/bridge" + "github.com/crunchydata/postgres-operator/internal/controller/runtime" + pgoRuntime "github.com/crunchydata/postgres-operator/internal/controller/runtime" + "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +// CrunchyBridgeClusterReconciler reconciles a CrunchyBridgeCluster object +type CrunchyBridgeClusterReconciler struct { + client.Client + + Owner client.FieldOwner + + // For this iteration, we will only be setting conditions rather than + // setting conditions and emitting events. That may change in the future, + // so we're leaving this EventRecorder here for now. + // record.EventRecorder + + // NewClient is called each time a new Client is needed. + NewClient func() bridge.ClientInterface +} + +//+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="crunchybridgeclusters",verbs={list,watch} +//+kubebuilder:rbac:groups="",resources="secrets",verbs={list,watch} + +// SetupWithManager sets up the controller with the Manager. +func (r *CrunchyBridgeClusterReconciler) SetupWithManager( + mgr ctrl.Manager, +) error { + return ctrl.NewControllerManagedBy(mgr). + For(&v1beta1.CrunchyBridgeCluster{}). + Owns(&corev1.Secret{}). + // Wake periodically to check Bridge API for all CrunchyBridgeClusters. + // Potentially replace with different requeue times, remove the Watch function + // Smarter: retry after a certain time for each cluster: https://gist.github.com/cbandy/a5a604e3026630c5b08cfbcdfffd2a13 + WatchesRawSource( + pgoRuntime.NewTickerImmediate(5*time.Minute, event.GenericEvent{}, r.Watch()), + ). + // Watch secrets and filter for secrets mentioned by CrunchyBridgeClusters + Watches( + &corev1.Secret{}, + r.watchForRelatedSecret(), + ). + Complete(r) +} + +// The owner reference created by controllerutil.SetControllerReference blocks +// deletion. The OwnerReferencesPermissionEnforcement plugin requires that the +// creator of such a reference have either "delete" permission on the owner or +// "update" permission on the owner's "finalizers" subresource. +// - https://docs.k8s.io/reference/access-authn-authz/admission-controllers/ +// +kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="crunchybridgeclusters/finalizers",verbs={update} + +// setControllerReference sets owner as a Controller OwnerReference on controlled. +// Only one OwnerReference can be a controller, so it returns an error if another +// is already set. +func (r *CrunchyBridgeClusterReconciler) setControllerReference( + owner *v1beta1.CrunchyBridgeCluster, controlled client.Object, +) error { + return controllerutil.SetControllerReference(owner, controlled, r.Client.Scheme()) +} + +//+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="crunchybridgeclusters",verbs={get,patch,update} +//+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="crunchybridgeclusters/status",verbs={patch,update} +//+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="crunchybridgeclusters/finalizers",verbs={patch,update} +//+kubebuilder:rbac:groups="",resources="secrets",verbs={get} + +// Reconcile does the work to move the current state of the world toward the +// desired state described in a [v1beta1.CrunchyBridgeCluster] identified by req. +func (r *CrunchyBridgeClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + log := ctrl.LoggerFrom(ctx) + + // Retrieve the crunchybridgecluster from the client cache, if it exists. A deferred + // function below will send any changes to its Status field. + // + // NOTE: No DeepCopy is necessary here because controller-runtime makes a + // copy before returning from its cache. + // - https://github.com/kubernetes-sigs/controller-runtime/issues/1235 + crunchybridgecluster := &v1beta1.CrunchyBridgeCluster{} + err := r.Get(ctx, req.NamespacedName, crunchybridgecluster) + + if err == nil { + // Write any changes to the crunchybridgecluster status on the way out. + before := crunchybridgecluster.DeepCopy() + defer func() { + if !equality.Semantic.DeepEqual(before.Status, crunchybridgecluster.Status) { + status := r.Status().Patch(ctx, crunchybridgecluster, client.MergeFrom(before), r.Owner) + + if err == nil && status != nil { + err = status + } else if status != nil { + log.Error(status, "Patching CrunchyBridgeCluster status") + } + } + }() + } else { + // NotFound cannot be fixed by requeuing so ignore it. During background + // deletion, we receive delete events from crunchybridgecluster's dependents after + // crunchybridgecluster is deleted. + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + // Get and validate connection secret for requests + key, team, err := r.reconcileBridgeConnectionSecret(ctx, crunchybridgecluster) + if err != nil { + log.Error(err, "issue reconciling bridge connection secret") + + // Don't automatically requeue Secret issues. We are watching for + // related secrets, so will requeue when a related secret is touched. + // lint:ignore nilerr Return err as status, no requeue needed + return ctrl.Result{}, nil + } + + // Check for and handle deletion of cluster. Return early if it is being + // deleted or there was an error. Make sure finalizer is added if cluster + // is not being deleted. + if result, err := r.handleDelete(ctx, crunchybridgecluster, key); err != nil { + log.Error(err, "deleting") + return ctrl.Result{}, err + } else if result != nil { + if log := log.V(1); log.Enabled() { + log.Info("deleting", "result", fmt.Sprintf("%+v", *result)) + } + return *result, err + } + + // Wonder if there's a better way to handle adding/checking/removing statuses + // We did something in the upgrade controller + // Exit early if we can't create from this K8s object + // unless this K8s object has been changed (compare ObservedGeneration) + invalid := meta.FindStatusCondition(crunchybridgecluster.Status.Conditions, + v1beta1.ConditionReady) + if invalid != nil && + invalid.Status == metav1.ConditionFalse && + invalid.Reason == "ClusterInvalid" && + invalid.ObservedGeneration == crunchybridgecluster.GetGeneration() { + return ctrl.Result{}, nil + } + + // check for an upgrade error and return until observedGeneration has + // been incremented. + invalidUpgrade := meta.FindStatusCondition(crunchybridgecluster.Status.Conditions, + v1beta1.ConditionUpgrading) + if invalidUpgrade != nil && + invalidUpgrade.Status == metav1.ConditionFalse && + invalidUpgrade.Reason == "UpgradeError" && + invalidUpgrade.ObservedGeneration == crunchybridgecluster.GetGeneration() { + return ctrl.Result{}, nil + } + + // We should only be missing the ID if no create has been issued + // or the create was interrupted and we haven't received the ID. + if crunchybridgecluster.Status.ID == "" { + // Check if a cluster with the same name already exists + controllerResult, err := r.handleDuplicateClusterName(ctx, key, team, crunchybridgecluster) + if err != nil || controllerResult != nil { + return *controllerResult, err + } + + // if we've gotten here then no cluster exists with that name and we're missing the ID, ergo, create cluster + return r.handleCreateCluster(ctx, key, team, crunchybridgecluster), nil + } + + // If we reach this point, our CrunchyBridgeCluster object has an ID, so we want + // to fill in the details for the cluster, cluster status, and cluster upgrades + // from the Bridge API. + + // Get Cluster + err = r.handleGetCluster(ctx, key, crunchybridgecluster) + if err != nil { + return ctrl.Result{}, err + } + + // Get Cluster Status + err = r.handleGetClusterStatus(ctx, key, crunchybridgecluster) + if err != nil { + return ctrl.Result{}, err + } + + // Get Cluster Upgrade + err = r.handleGetClusterUpgrade(ctx, key, crunchybridgecluster) + if err != nil { + return ctrl.Result{}, err + } + + // Reconcile roles and their secrets + err = r.reconcilePostgresRoles(ctx, key, crunchybridgecluster) + if err != nil { + log.Error(err, "issue reconciling postgres user roles/secrets") + return ctrl.Result{}, err + } + + // For now, we skip updating until the upgrade status is cleared. + // For the future, we may want to update in-progress upgrades, + // and for that we will need a way tell that an upgrade in progress + // is the one we want to update. + // Consider: Perhaps add `generation` field to upgrade status? + // Checking this here also means that if an upgrade is requested through the GUI/API + // then we will requeue and wait for it to be done. + // TODO(crunchybridgecluster): Do we want the operator to interrupt + // upgrades created through the GUI/API? + if len(crunchybridgecluster.Status.OngoingUpgrade) != 0 { + return runtime.RequeueWithoutBackoff(3 * time.Minute), nil + } + + // Check if there's an upgrade difference for the three upgradeable fields that hit the upgrade endpoint + // Why PostgresVersion and MajorVersion? Because MajorVersion in the Status is sure to be + // an int of the major version, whereas Status.Responses.Cluster.PostgresVersion might be the ID + if (crunchybridgecluster.Spec.Storage != *crunchybridgecluster.Status.Storage) || + crunchybridgecluster.Spec.Plan != crunchybridgecluster.Status.Plan || + crunchybridgecluster.Spec.PostgresVersion != crunchybridgecluster.Status.MajorVersion { + return r.handleUpgrade(ctx, key, crunchybridgecluster), nil + } + + // Are there diffs between the cluster response from the Bridge API and the spec? + // HA diffs are sent to /clusters/{cluster_id}/actions/[enable|disable]-ha + // so have to know (a) to send and (b) which to send to + if crunchybridgecluster.Spec.IsHA != *crunchybridgecluster.Status.IsHA { + return r.handleUpgradeHA(ctx, key, crunchybridgecluster), nil + } + + // Check if there's a difference in is_protected, name, maintenance_window_start, etc. + // see https://docs.crunchybridge.com/api/cluster#update-cluster + // updates to these fields that hit the PATCH `clusters/` endpoint + if crunchybridgecluster.Spec.IsProtected != *crunchybridgecluster.Status.IsProtected || + crunchybridgecluster.Spec.ClusterName != crunchybridgecluster.Status.ClusterName { + return r.handleUpdate(ctx, key, crunchybridgecluster), nil + } + + log.Info("Reconciled") + // TODO(crunchybridgecluster): do we always want to requeue? Does the Watch mean we + // don't need this, or do we want both? + return runtime.RequeueWithoutBackoff(3 * time.Minute), nil +} + +// reconcileBridgeConnectionSecret looks for the Bridge connection secret specified by the cluster, +// and returns the API key and Team ID found in the secret, or sets conditions and returns an error +// if the secret is invalid. +func (r *CrunchyBridgeClusterReconciler) reconcileBridgeConnectionSecret( + ctx context.Context, crunchybridgecluster *v1beta1.CrunchyBridgeCluster, +) (string, string, error) { + key, team, err := r.GetSecretKeys(ctx, crunchybridgecluster) + if err != nil { + meta.SetStatusCondition(&crunchybridgecluster.Status.Conditions, metav1.Condition{ + ObservedGeneration: crunchybridgecluster.GetGeneration(), + Type: v1beta1.ConditionReady, + Status: metav1.ConditionUnknown, + Reason: "SecretInvalid", + Message: fmt.Sprintf( + "The condition of the cluster is unknown because the secret is invalid: %v", err), + }) + meta.SetStatusCondition(&crunchybridgecluster.Status.Conditions, metav1.Condition{ + Type: v1beta1.ConditionUpgrading, + Status: metav1.ConditionUnknown, + ObservedGeneration: crunchybridgecluster.GetGeneration(), + LastTransitionTime: metav1.Time{}, + Reason: "SecretInvalid", + Message: fmt.Sprintf( + "The condition of the upgrade(s) is unknown because the secret is invalid: %v", err), + }) + + return "", "", err + } + + return key, team, err +} + +// handleDuplicateClusterName checks Bridge for any already existing clusters that +// have the same name. It returns (nil, nil) when no cluster is found with the same +// name. It returns a controller result, indicating we should exit the reconcile loop, +// if a cluster with a duplicate name is found. The caller is responsible for +// returning controller result objects and errors to controller-runtime. +func (r *CrunchyBridgeClusterReconciler) handleDuplicateClusterName(ctx context.Context, + apiKey, teamId string, crunchybridgecluster *v1beta1.CrunchyBridgeCluster, +) (*ctrl.Result, error) { + log := ctrl.LoggerFrom(ctx) + + clusters, err := r.NewClient().ListClusters(ctx, apiKey, teamId) + if err != nil { + meta.SetStatusCondition(&crunchybridgecluster.Status.Conditions, metav1.Condition{ + ObservedGeneration: crunchybridgecluster.GetGeneration(), + Type: v1beta1.ConditionReady, + Status: metav1.ConditionUnknown, + Reason: "UnknownClusterState", + Message: fmt.Sprintf("Issue listing existing clusters in Bridge: %v", err), + }) + log.Error(err, "issue listing existing clusters in Bridge") + return &ctrl.Result{}, err + } + + for _, cluster := range clusters { + if crunchybridgecluster.Spec.ClusterName == cluster.ClusterName { + // Cluster with the same name exists so check for adoption annotation + adoptionID, annotationExists := crunchybridgecluster.Annotations[naming.CrunchyBridgeClusterAdoptionAnnotation] + if annotationExists && strings.EqualFold(adoptionID, cluster.ID) { + // Annotation is present with correct ID value; adopt cluster by assigning ID to status. + crunchybridgecluster.Status.ID = cluster.ID + // Requeue now that we have a cluster ID assigned + return &ctrl.Result{Requeue: true}, nil + } + + // If we made it here, the adoption annotation either doesn't exist or its value is incorrect. + // The user must either add it or change the name on the CR. + + // Set invalid status condition and create log message. + meta.SetStatusCondition(&crunchybridgecluster.Status.Conditions, metav1.Condition{ + ObservedGeneration: crunchybridgecluster.GetGeneration(), + Type: v1beta1.ConditionReady, + Status: metav1.ConditionFalse, + Reason: "DuplicateClusterName", + Message: fmt.Sprintf("A cluster with the same name already exists for this team (Team ID: %v). "+ + "Give the CrunchyBridgeCluster CR a unique name, or if you would like to take control of the "+ + "existing cluster, add the 'postgres-operator.crunchydata.com/adopt-bridge-cluster' "+ + "annotation and set its value to the existing cluster's ID (Cluster ID: %v).", teamId, cluster.ID), + }) + + log.Info(fmt.Sprintf("A cluster with the same name already exists for this team (Team ID: %v). "+ + "Give the CrunchyBridgeCluster CR a unique name, or if you would like to take control "+ + "of the existing cluster, add the 'postgres-operator.crunchydata.com/adopt-bridge-cluster' "+ + "annotation and set its value to the existing cluster's ID (Cluster ID: %v).", teamId, cluster.ID)) + + // We have an invalid cluster spec so we don't want to requeue + return &ctrl.Result{}, nil + } + } + + return nil, nil +} + +// handleCreateCluster handles creating new Crunchy Bridge Clusters +func (r *CrunchyBridgeClusterReconciler) handleCreateCluster(ctx context.Context, + apiKey, teamId string, crunchybridgecluster *v1beta1.CrunchyBridgeCluster, +) ctrl.Result { + log := ctrl.LoggerFrom(ctx) + + createClusterRequestPayload := &bridge.PostClustersRequestPayload{ + IsHA: crunchybridgecluster.Spec.IsHA, + Name: crunchybridgecluster.Spec.ClusterName, + Plan: crunchybridgecluster.Spec.Plan, + PostgresVersion: intstr.FromInt(crunchybridgecluster.Spec.PostgresVersion), + Provider: crunchybridgecluster.Spec.Provider, + Region: crunchybridgecluster.Spec.Region, + Storage: bridge.ToGibibytes(crunchybridgecluster.Spec.Storage), + Team: teamId, + } + cluster, err := r.NewClient().CreateCluster(ctx, apiKey, createClusterRequestPayload) + if err != nil { + log.Error(err, "issue creating cluster in Bridge") + // TODO(crunchybridgecluster): probably shouldn't set this condition unless response from Bridge + // indicates the payload is wrong + // Otherwise want a different condition + meta.SetStatusCondition(&crunchybridgecluster.Status.Conditions, metav1.Condition{ + ObservedGeneration: crunchybridgecluster.GetGeneration(), + Type: v1beta1.ConditionReady, + Status: metav1.ConditionFalse, + Reason: "ClusterInvalid", + Message: fmt.Sprintf( + "Cannot create from spec: %v", err), + }) + + // TODO(crunchybridgecluster): If the payload is wrong, we don't want to requeue, so pass nil error + // If the transmission hit a transient problem, we do want to requeue + return ctrl.Result{} + } + crunchybridgecluster.Status.ID = cluster.ID + + meta.SetStatusCondition(&crunchybridgecluster.Status.Conditions, metav1.Condition{ + ObservedGeneration: crunchybridgecluster.GetGeneration(), + Type: v1beta1.ConditionReady, + Status: metav1.ConditionUnknown, + Reason: "UnknownClusterState", + Message: "The condition of the cluster is unknown.", + }) + + meta.SetStatusCondition(&crunchybridgecluster.Status.Conditions, metav1.Condition{ + ObservedGeneration: crunchybridgecluster.GetGeneration(), + Type: v1beta1.ConditionUpgrading, + Status: metav1.ConditionUnknown, + Reason: "UnknownUpgradeState", + Message: "The condition of the upgrade(s) is unknown.", + }) + + return runtime.RequeueWithoutBackoff(3 * time.Minute) +} + +// handleGetCluster handles getting the cluster details from Bridge and +// updating the cluster CR's Status accordingly +func (r *CrunchyBridgeClusterReconciler) handleGetCluster(ctx context.Context, + apiKey string, crunchybridgecluster *v1beta1.CrunchyBridgeCluster, +) error { + log := ctrl.LoggerFrom(ctx) + + clusterDetails, err := r.NewClient().GetCluster(ctx, apiKey, crunchybridgecluster.Status.ID) + if err != nil { + meta.SetStatusCondition(&crunchybridgecluster.Status.Conditions, metav1.Condition{ + ObservedGeneration: crunchybridgecluster.GetGeneration(), + Type: v1beta1.ConditionReady, + Status: metav1.ConditionUnknown, + Reason: "UnknownClusterState", + Message: fmt.Sprintf("Issue getting cluster information from Bridge: %v", err), + }) + log.Error(err, "issue getting cluster information from Bridge") + return err + } + clusterDetails.AddDataToClusterStatus(crunchybridgecluster) + + return nil +} + +// handleGetClusterStatus handles getting the cluster status from Bridge and +// updating the cluster CR's Status accordingly +func (r *CrunchyBridgeClusterReconciler) handleGetClusterStatus(ctx context.Context, + apiKey string, crunchybridgecluster *v1beta1.CrunchyBridgeCluster, +) error { + log := ctrl.LoggerFrom(ctx) + + clusterStatus, err := r.NewClient().GetClusterStatus(ctx, apiKey, crunchybridgecluster.Status.ID) + if err != nil { + meta.SetStatusCondition(&crunchybridgecluster.Status.Conditions, metav1.Condition{ + ObservedGeneration: crunchybridgecluster.GetGeneration(), + Type: v1beta1.ConditionReady, + Status: metav1.ConditionUnknown, + Reason: "UnknownClusterState", + Message: fmt.Sprintf("Issue getting cluster status from Bridge: %v", err), + }) + crunchybridgecluster.Status.State = "unknown" + log.Error(err, "issue getting cluster status from Bridge") + return err + } + clusterStatus.AddDataToClusterStatus(crunchybridgecluster) + + if clusterStatus.State == "ready" { + meta.SetStatusCondition(&crunchybridgecluster.Status.Conditions, metav1.Condition{ + ObservedGeneration: crunchybridgecluster.GetGeneration(), + Type: v1beta1.ConditionReady, + Status: metav1.ConditionTrue, + Reason: clusterStatus.State, + Message: fmt.Sprintf("Bridge cluster state is %v.", clusterStatus.State), + }) + } else { + meta.SetStatusCondition(&crunchybridgecluster.Status.Conditions, metav1.Condition{ + ObservedGeneration: crunchybridgecluster.GetGeneration(), + Type: v1beta1.ConditionReady, + Status: metav1.ConditionFalse, + Reason: clusterStatus.State, + Message: fmt.Sprintf("Bridge cluster state is %v.", clusterStatus.State), + }) + } + + return nil +} + +// handleGetClusterUpgrade handles getting the ongoing upgrade operations from Bridge and +// updating the cluster CR's Status accordingly +func (r *CrunchyBridgeClusterReconciler) handleGetClusterUpgrade(ctx context.Context, + apiKey string, + crunchybridgecluster *v1beta1.CrunchyBridgeCluster, +) error { + log := ctrl.LoggerFrom(ctx) + + clusterUpgradeDetails, err := r.NewClient().GetClusterUpgrade(ctx, apiKey, crunchybridgecluster.Status.ID) + if err != nil { + meta.SetStatusCondition(&crunchybridgecluster.Status.Conditions, metav1.Condition{ + ObservedGeneration: crunchybridgecluster.GetGeneration(), + Type: v1beta1.ConditionUpgrading, + Status: metav1.ConditionUnknown, + Reason: "UnknownUpgradeState", + Message: fmt.Sprintf("Issue getting cluster upgrade from Bridge: %v", err), + }) + log.Error(err, "issue getting cluster upgrade from Bridge") + return err + } + clusterUpgradeDetails.AddDataToClusterStatus(crunchybridgecluster) + + if len(clusterUpgradeDetails.Operations) != 0 { + meta.SetStatusCondition(&crunchybridgecluster.Status.Conditions, metav1.Condition{ + ObservedGeneration: crunchybridgecluster.GetGeneration(), + Type: v1beta1.ConditionUpgrading, + Status: metav1.ConditionTrue, + Reason: clusterUpgradeDetails.Operations[0].Flavor, + Message: fmt.Sprintf( + "Performing an upgrade of type %v with a state of %v.", + clusterUpgradeDetails.Operations[0].Flavor, clusterUpgradeDetails.Operations[0].State), + }) + } else { + meta.SetStatusCondition(&crunchybridgecluster.Status.Conditions, metav1.Condition{ + ObservedGeneration: crunchybridgecluster.GetGeneration(), + Type: v1beta1.ConditionUpgrading, + Status: metav1.ConditionFalse, + Reason: "NoUpgradesInProgress", + Message: "No upgrades being performed", + }) + } + + return nil +} + +// handleUpgrade handles upgrades that hit the "POST /clusters//upgrade" endpoint +func (r *CrunchyBridgeClusterReconciler) handleUpgrade(ctx context.Context, + apiKey string, + crunchybridgecluster *v1beta1.CrunchyBridgeCluster, +) ctrl.Result { + log := ctrl.LoggerFrom(ctx) + + log.Info("Handling upgrade request") + + upgradeRequest := &bridge.PostClustersUpgradeRequestPayload{ + Plan: crunchybridgecluster.Spec.Plan, + PostgresVersion: intstr.FromInt(crunchybridgecluster.Spec.PostgresVersion), + Storage: bridge.ToGibibytes(crunchybridgecluster.Spec.Storage), + } + + clusterUpgrade, err := r.NewClient().UpgradeCluster(ctx, apiKey, + crunchybridgecluster.Status.ID, upgradeRequest) + if err != nil { + // TODO(crunchybridgecluster): consider what errors we might get + // and what different results/requeue times we want to return. + // Currently: don't requeue and wait for user to change spec. + meta.SetStatusCondition(&crunchybridgecluster.Status.Conditions, metav1.Condition{ + ObservedGeneration: crunchybridgecluster.GetGeneration(), + Type: v1beta1.ConditionUpgrading, + Status: metav1.ConditionFalse, + Reason: "UpgradeError", + Message: fmt.Sprintf( + "Error performing an upgrade: %s", err), + }) + log.Error(err, "Error while attempting cluster upgrade") + return ctrl.Result{} + } + clusterUpgrade.AddDataToClusterStatus(crunchybridgecluster) + + if len(clusterUpgrade.Operations) != 0 { + meta.SetStatusCondition(&crunchybridgecluster.Status.Conditions, metav1.Condition{ + ObservedGeneration: crunchybridgecluster.GetGeneration(), + Type: v1beta1.ConditionUpgrading, + Status: metav1.ConditionTrue, + Reason: clusterUpgrade.Operations[0].Flavor, + Message: fmt.Sprintf( + "Performing an upgrade of type %v with a state of %v.", + clusterUpgrade.Operations[0].Flavor, clusterUpgrade.Operations[0].State), + }) + } + + return runtime.RequeueWithoutBackoff(3 * time.Minute) +} + +// handleUpgradeHA handles upgrades that hit the +// "PUT /clusters//actions/[enable|disable]-ha" endpoint +func (r *CrunchyBridgeClusterReconciler) handleUpgradeHA(ctx context.Context, + apiKey string, + crunchybridgecluster *v1beta1.CrunchyBridgeCluster, +) ctrl.Result { + log := ctrl.LoggerFrom(ctx) + + log.Info("Handling HA change request") + + action := "enable-ha" + if !crunchybridgecluster.Spec.IsHA { + action = "disable-ha" + } + + clusterUpgrade, err := r.NewClient().UpgradeClusterHA(ctx, apiKey, crunchybridgecluster.Status.ID, action) + if err != nil { + // TODO(crunchybridgecluster): consider what errors we might get + // and what different results/requeue times we want to return. + // Currently: don't requeue and wait for user to change spec. + meta.SetStatusCondition(&crunchybridgecluster.Status.Conditions, metav1.Condition{ + ObservedGeneration: crunchybridgecluster.GetGeneration(), + Type: v1beta1.ConditionUpgrading, + Status: metav1.ConditionFalse, + Reason: "UpgradeError", + Message: fmt.Sprintf( + "Error performing an HA upgrade: %s", err), + }) + log.Error(err, "Error while attempting cluster HA change") + return ctrl.Result{} + } + clusterUpgrade.AddDataToClusterStatus(crunchybridgecluster) + if len(clusterUpgrade.Operations) != 0 { + meta.SetStatusCondition(&crunchybridgecluster.Status.Conditions, metav1.Condition{ + ObservedGeneration: crunchybridgecluster.GetGeneration(), + Type: v1beta1.ConditionUpgrading, + Status: metav1.ConditionTrue, + Reason: clusterUpgrade.Operations[0].Flavor, + Message: fmt.Sprintf( + "Performing an upgrade of type %v with a state of %v.", + clusterUpgrade.Operations[0].Flavor, clusterUpgrade.Operations[0].State), + }) + } + + return runtime.RequeueWithoutBackoff(3 * time.Minute) +} + +// handleUpdate handles upgrades that hit the "PATCH /clusters/" endpoint +func (r *CrunchyBridgeClusterReconciler) handleUpdate(ctx context.Context, + apiKey string, + crunchybridgecluster *v1beta1.CrunchyBridgeCluster, +) ctrl.Result { + log := ctrl.LoggerFrom(ctx) + + log.Info("Handling update request") + + updateRequest := &bridge.PatchClustersRequestPayload{ + IsProtected: &crunchybridgecluster.Spec.IsProtected, + Name: crunchybridgecluster.Spec.ClusterName, + } + + clusterUpdate, err := r.NewClient().UpdateCluster(ctx, apiKey, + crunchybridgecluster.Status.ID, updateRequest) + if err != nil { + // TODO(crunchybridgecluster): consider what errors we might get + // and what different results/requeue times we want to return. + // Currently: don't requeue and wait for user to change spec. + meta.SetStatusCondition(&crunchybridgecluster.Status.Conditions, metav1.Condition{ + ObservedGeneration: crunchybridgecluster.GetGeneration(), + Type: v1beta1.ConditionUpgrading, + Status: metav1.ConditionFalse, + Reason: "UpgradeError", + Message: fmt.Sprintf( + "Error performing an upgrade: %s", err), + }) + log.Error(err, "Error while attempting cluster update") + return ctrl.Result{} + } + clusterUpdate.AddDataToClusterStatus(crunchybridgecluster) + meta.SetStatusCondition(&crunchybridgecluster.Status.Conditions, metav1.Condition{ + ObservedGeneration: crunchybridgecluster.GetGeneration(), + Type: v1beta1.ConditionUpgrading, + Status: metav1.ConditionTrue, + Reason: "ClusterUpgrade", + Message: fmt.Sprintf( + "An upgrade is occurring, the clusters name is %v and the cluster is protected is %v.", + clusterUpdate.ClusterName, *clusterUpdate.IsProtected), + }) + + return runtime.RequeueWithoutBackoff(3 * time.Minute) +} + +// GetSecretKeys gets the secret and returns the expected API key and team id +// or an error if either of those fields or the Secret are missing +func (r *CrunchyBridgeClusterReconciler) GetSecretKeys( + ctx context.Context, crunchyBridgeCluster *v1beta1.CrunchyBridgeCluster, +) (string, string, error) { + + existing := &corev1.Secret{ObjectMeta: metav1.ObjectMeta{ + Namespace: crunchyBridgeCluster.GetNamespace(), + Name: crunchyBridgeCluster.Spec.Secret, + }} + + err := errors.WithStack( + r.Client.Get(ctx, client.ObjectKeyFromObject(existing), existing)) + + if err == nil { + if existing.Data["key"] != nil && existing.Data["team"] != nil { + return string(existing.Data["key"]), string(existing.Data["team"]), nil + } + err = fmt.Errorf("error handling secret; expected to find a key and a team: found key %t, found team %t", + existing.Data["key"] != nil, + existing.Data["team"] != nil) + } + + return "", "", err +} + +// deleteControlled safely deletes object when it is controlled by cluster. +func (r *CrunchyBridgeClusterReconciler) deleteControlled( + ctx context.Context, crunchyBridgeCluster *v1beta1.CrunchyBridgeCluster, object client.Object, +) error { + if metav1.IsControlledBy(object, crunchyBridgeCluster) { + uid := object.GetUID() + version := object.GetResourceVersion() + exactly := client.Preconditions{UID: &uid, ResourceVersion: &version} + + return r.Client.Delete(ctx, object, exactly) + } + + return nil +} diff --git a/internal/bridge/crunchybridgecluster/crunchybridgecluster_controller_test.go b/internal/bridge/crunchybridgecluster/crunchybridgecluster_controller_test.go new file mode 100644 index 0000000000..92d6b58d0e --- /dev/null +++ b/internal/bridge/crunchybridgecluster/crunchybridgecluster_controller_test.go @@ -0,0 +1,834 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package crunchybridgecluster + +import ( + "context" + "strings" + "testing" + "time" + + "gotest.tools/v3/assert" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/crunchydata/postgres-operator/internal/bridge" + "github.com/crunchydata/postgres-operator/internal/initialize" + "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/internal/testing/cmp" + "github.com/crunchydata/postgres-operator/internal/testing/require" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +var testTeamId = "5678" +var testApiKey = "9012" + +func TestReconcileBridgeConnectionSecret(t *testing.T) { + ctx := context.Background() + tClient := setupKubernetes(t) + require.ParallelCapacity(t, 0) + + reconciler := &CrunchyBridgeClusterReconciler{ + Client: tClient, + Owner: "crunchybridgecluster-controller", + } + + ns := setupNamespace(t, tClient).Name + cluster := testCluster() + cluster.Namespace = ns + + t.Run("Failure", func(t *testing.T) { + key, team, err := reconciler.reconcileBridgeConnectionSecret(ctx, cluster) + assert.Equal(t, key, "") + assert.Equal(t, team, "") + assert.Check(t, err != nil) + readyCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionReady) + if assert.Check(t, readyCondition != nil) { + assert.Equal(t, readyCondition.Status, metav1.ConditionUnknown) + assert.Equal(t, readyCondition.Reason, "SecretInvalid") + assert.Check(t, cmp.Contains(readyCondition.Message, + "The condition of the cluster is unknown because the secret is invalid:")) + } + upgradingCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionUpgrading) + if assert.Check(t, upgradingCondition != nil) { + assert.Equal(t, upgradingCondition.Status, metav1.ConditionUnknown) + assert.Equal(t, upgradingCondition.Reason, "SecretInvalid") + assert.Check(t, cmp.Contains(upgradingCondition.Message, + "The condition of the upgrade(s) is unknown because the secret is invalid:")) + } + }) + + t.Run("ValidSecretFound", func(t *testing.T) { + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "crunchy-bridge-api-key", + Namespace: ns, + }, + Data: map[string][]byte{ + "key": []byte(`asdf`), + "team": []byte(`jkl;`), + }, + } + assert.NilError(t, tClient.Create(ctx, secret)) + + key, team, err := reconciler.reconcileBridgeConnectionSecret(ctx, cluster) + assert.Equal(t, key, "asdf") + assert.Equal(t, team, "jkl;") + assert.NilError(t, err) + }) +} + +func TestHandleDuplicateClusterName(t *testing.T) { + ctx := context.Background() + tClient := setupKubernetes(t) + require.ParallelCapacity(t, 0) + + clusterInBridge := testClusterApiResource() + clusterInBridge.ClusterName = "bridge-cluster-1" // originally "hippo-cluster" + reconciler := &CrunchyBridgeClusterReconciler{ + Client: tClient, + Owner: "crunchybridgecluster-controller", + } + reconciler.NewClient = func() bridge.ClientInterface { + return &TestBridgeClient{ + ApiKey: testApiKey, + TeamId: testTeamId, + Clusters: []*bridge.ClusterApiResource{clusterInBridge}, + } + } + + ns := setupNamespace(t, tClient).Name + + t.Run("FailureToListClusters", func(t *testing.T) { + cluster := testCluster() + cluster.Namespace = ns + + controllerResult, err := reconciler.handleDuplicateClusterName(ctx, "bad_api_key", testTeamId, cluster) + assert.Check(t, err != nil) + assert.Equal(t, *controllerResult, ctrl.Result{}) + readyCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionReady) + if assert.Check(t, readyCondition != nil) { + assert.Equal(t, readyCondition.Status, metav1.ConditionUnknown) + assert.Equal(t, readyCondition.Reason, "UnknownClusterState") + assert.Check(t, cmp.Contains(readyCondition.Message, + "Issue listing existing clusters in Bridge:")) + } + }) + + t.Run("NoDuplicateFound", func(t *testing.T) { + cluster := testCluster() + cluster.Namespace = ns + + controllerResult, err := reconciler.handleDuplicateClusterName(ctx, testApiKey, testTeamId, cluster) + assert.NilError(t, err) + assert.Check(t, controllerResult == nil) + }) + + t.Run("DuplicateFoundAdoptionAnnotationNotPresent", func(t *testing.T) { + cluster := testCluster() + cluster.Namespace = ns + cluster.Spec.ClusterName = "bridge-cluster-1" // originally "hippo-cluster" + + controllerResult, err := reconciler.handleDuplicateClusterName(ctx, testApiKey, testTeamId, cluster) + assert.NilError(t, err) + assert.Equal(t, *controllerResult, ctrl.Result{}) + readyCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionReady) + if assert.Check(t, readyCondition != nil) { + assert.Equal(t, readyCondition.Status, metav1.ConditionFalse) + assert.Equal(t, readyCondition.Reason, "DuplicateClusterName") + assert.Check(t, cmp.Contains(readyCondition.Message, + "A cluster with the same name already exists for this team (Team ID: ")) + } + }) + + t.Run("DuplicateFoundAdoptionAnnotationPresent", func(t *testing.T) { + cluster := testCluster() + cluster.Namespace = ns + cluster.Spec.ClusterName = "bridge-cluster-1" // originally "hippo-cluster" + cluster.Annotations = map[string]string{} + cluster.Annotations[naming.CrunchyBridgeClusterAdoptionAnnotation] = "1234" + + controllerResult, err := reconciler.handleDuplicateClusterName(ctx, testApiKey, testTeamId, cluster) + assert.NilError(t, err) + assert.Equal(t, *controllerResult, ctrl.Result{Requeue: true}) + assert.Equal(t, cluster.Status.ID, "1234") + }) +} + +func TestHandleCreateCluster(t *testing.T) { + ctx := context.Background() + tClient := setupKubernetes(t) + require.ParallelCapacity(t, 0) + + ns := setupNamespace(t, tClient).Name + + reconciler := &CrunchyBridgeClusterReconciler{ + Client: tClient, + Owner: "crunchybridgecluster-controller", + } + reconciler.NewClient = func() bridge.ClientInterface { + return &TestBridgeClient{ + ApiKey: testApiKey, + TeamId: testTeamId, + Clusters: []*bridge.ClusterApiResource{}, + } + } + + t.Run("SuccessfulCreate", func(t *testing.T) { + cluster := testCluster() + cluster.Namespace = ns + + controllerResult := reconciler.handleCreateCluster(ctx, testApiKey, testTeamId, cluster) + assert.Equal(t, controllerResult.RequeueAfter, 3*time.Minute) + assert.Equal(t, cluster.Status.ID, "0") + + readyCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionReady) + if assert.Check(t, readyCondition != nil) { + assert.Equal(t, readyCondition.Status, metav1.ConditionUnknown) + assert.Equal(t, readyCondition.Reason, "UnknownClusterState") + assert.Check(t, cmp.Contains(readyCondition.Message, + "The condition of the cluster is unknown.")) + } + + upgradingCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionUpgrading) + if assert.Check(t, upgradingCondition != nil) { + assert.Equal(t, upgradingCondition.Status, metav1.ConditionUnknown) + assert.Equal(t, upgradingCondition.Reason, "UnknownUpgradeState") + assert.Check(t, cmp.Contains(upgradingCondition.Message, + "The condition of the upgrade(s) is unknown.")) + } + }) + + t.Run("UnsuccessfulCreate", func(t *testing.T) { + cluster := testCluster() + cluster.Namespace = ns + + controllerResult := reconciler.handleCreateCluster(ctx, "bad_api_key", testTeamId, cluster) + assert.Equal(t, controllerResult, ctrl.Result{}) + assert.Equal(t, cluster.Status.ID, "") + + readyCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionReady) + if assert.Check(t, readyCondition != nil) { + assert.Equal(t, readyCondition.Status, metav1.ConditionFalse) + assert.Equal(t, readyCondition.Reason, "ClusterInvalid") + assert.Check(t, cmp.Contains(readyCondition.Message, + "Cannot create from spec:")) + } + + upgradingCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionUpgrading) + assert.Check(t, upgradingCondition == nil) + }) +} + +func TestHandleGetCluster(t *testing.T) { + ctx := context.Background() + tClient := setupKubernetes(t) + require.ParallelCapacity(t, 0) + + ns := setupNamespace(t, tClient).Name + firstClusterInBridge := testClusterApiResource() + secondClusterInBridge := testClusterApiResource() + secondClusterInBridge.ID = "2345" // originally "1234" + secondClusterInBridge.ClusterName = "hippo-cluster-2" // originally "hippo-cluster" + + reconciler := &CrunchyBridgeClusterReconciler{ + Client: tClient, + Owner: "crunchybridgecluster-controller", + } + reconciler.NewClient = func() bridge.ClientInterface { + return &TestBridgeClient{ + ApiKey: testApiKey, + TeamId: testTeamId, + Clusters: []*bridge.ClusterApiResource{firstClusterInBridge, secondClusterInBridge}, + } + } + + t.Run("SuccessfulGet", func(t *testing.T) { + cluster := testCluster() + cluster.Namespace = ns + cluster.Status.ID = "1234" + + err := reconciler.handleGetCluster(ctx, testApiKey, cluster) + assert.NilError(t, err) + assert.Equal(t, cluster.Status.ClusterName, firstClusterInBridge.ClusterName) + assert.Equal(t, cluster.Status.Host, firstClusterInBridge.Host) + assert.Equal(t, cluster.Status.ID, firstClusterInBridge.ID) + assert.Equal(t, cluster.Status.IsHA, firstClusterInBridge.IsHA) + assert.Equal(t, cluster.Status.IsProtected, firstClusterInBridge.IsProtected) + assert.Equal(t, cluster.Status.MajorVersion, firstClusterInBridge.MajorVersion) + assert.Equal(t, cluster.Status.Plan, firstClusterInBridge.Plan) + assert.Equal(t, *cluster.Status.Storage, *bridge.FromGibibytes(firstClusterInBridge.Storage)) + }) + + t.Run("UnsuccessfulGet", func(t *testing.T) { + cluster := testCluster() + cluster.Namespace = ns + cluster.Status.ID = "bad_cluster_id" + + err := reconciler.handleGetCluster(ctx, testApiKey, cluster) + assert.Check(t, err != nil) + + readyCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionReady) + if assert.Check(t, readyCondition != nil) { + assert.Equal(t, readyCondition.Status, metav1.ConditionUnknown) + assert.Equal(t, readyCondition.Reason, "UnknownClusterState") + assert.Check(t, cmp.Contains(readyCondition.Message, + "Issue getting cluster information from Bridge:")) + } + }) +} + +func TestHandleGetClusterStatus(t *testing.T) { + ctx := context.Background() + tClient := setupKubernetes(t) + require.ParallelCapacity(t, 0) + + ns := setupNamespace(t, tClient).Name + readyClusterId := "1234" + creatingClusterId := "7890" + readyClusterStatusInBridge := testClusterStatusApiResource(readyClusterId) + creatingClusterStatusInBridge := testClusterStatusApiResource(creatingClusterId) + creatingClusterStatusInBridge.State = "creating" // originally "ready" + + reconciler := &CrunchyBridgeClusterReconciler{ + Client: tClient, + Owner: "crunchybridgecluster-controller", + } + reconciler.NewClient = func() bridge.ClientInterface { + return &TestBridgeClient{ + ApiKey: testApiKey, + TeamId: testTeamId, + ClusterStatuses: map[string]*bridge.ClusterStatusApiResource{ + readyClusterId: readyClusterStatusInBridge, + creatingClusterId: creatingClusterStatusInBridge, + }, + } + } + + t.Run("SuccessReadyState", func(t *testing.T) { + cluster := testCluster() + cluster.Namespace = ns + cluster.Status.ID = readyClusterId + + err := reconciler.handleGetClusterStatus(ctx, testApiKey, cluster) + assert.NilError(t, err) + assert.Equal(t, cluster.Status.State, "ready") + readyCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionReady) + if assert.Check(t, readyCondition != nil) { + assert.Equal(t, readyCondition.Status, metav1.ConditionTrue) + assert.Equal(t, readyCondition.Reason, "ready") + assert.Check(t, cmp.Contains(readyCondition.Message, + "Bridge cluster state is ready")) + } + }) + + t.Run("SuccessNonReadyState", func(t *testing.T) { + cluster := testCluster() + cluster.Namespace = ns + cluster.Status.ID = creatingClusterId + + err := reconciler.handleGetClusterStatus(ctx, testApiKey, cluster) + assert.NilError(t, err) + assert.Equal(t, cluster.Status.State, "creating") + readyCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionReady) + if assert.Check(t, readyCondition != nil) { + assert.Equal(t, readyCondition.Status, metav1.ConditionFalse) + assert.Equal(t, readyCondition.Reason, "creating") + assert.Check(t, cmp.Contains(readyCondition.Message, + "Bridge cluster state is creating")) + } + }) + + t.Run("UnsuccessfulGet", func(t *testing.T) { + cluster := testCluster() + cluster.Namespace = ns + cluster.Status.ID = creatingClusterId + + err := reconciler.handleGetClusterStatus(ctx, "bad_api_key", cluster) + assert.Check(t, err != nil) + assert.Equal(t, cluster.Status.State, "unknown") + readyCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionReady) + if assert.Check(t, readyCondition != nil) { + assert.Equal(t, readyCondition.Status, metav1.ConditionUnknown) + assert.Equal(t, readyCondition.Reason, "UnknownClusterState") + assert.Check(t, cmp.Contains(readyCondition.Message, + "Issue getting cluster status from Bridge:")) + } + }) +} + +func TestHandleGetClusterUpgrade(t *testing.T) { + ctx := context.Background() + tClient := setupKubernetes(t) + require.ParallelCapacity(t, 0) + + ns := setupNamespace(t, tClient).Name + upgradingClusterId := "1234" + notUpgradingClusterId := "7890" + upgradingClusterUpgradeInBridge := testClusterUpgradeApiResource(upgradingClusterId) + notUpgradingClusterUpgradeInBridge := testClusterUpgradeApiResource(notUpgradingClusterId) + notUpgradingClusterUpgradeInBridge.Operations = []*v1beta1.UpgradeOperation{} + + reconciler := &CrunchyBridgeClusterReconciler{ + Client: tClient, + Owner: "crunchybridgecluster-controller", + } + reconciler.NewClient = func() bridge.ClientInterface { + return &TestBridgeClient{ + ApiKey: testApiKey, + TeamId: testTeamId, + ClusterUpgrades: map[string]*bridge.ClusterUpgradeApiResource{ + upgradingClusterId: upgradingClusterUpgradeInBridge, + notUpgradingClusterId: notUpgradingClusterUpgradeInBridge, + }, + } + } + + t.Run("SuccessUpgrading", func(t *testing.T) { + cluster := testCluster() + cluster.Namespace = ns + cluster.Status.ID = upgradingClusterId + + err := reconciler.handleGetClusterUpgrade(ctx, testApiKey, cluster) + assert.NilError(t, err) + assert.Equal(t, *cluster.Status.OngoingUpgrade[0], v1beta1.UpgradeOperation{ + Flavor: "resize", + StartingFrom: "", + State: "in_progress", + }) + upgradingCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionUpgrading) + if assert.Check(t, upgradingCondition != nil) { + assert.Equal(t, upgradingCondition.Status, metav1.ConditionTrue) + assert.Equal(t, upgradingCondition.Reason, "resize") + assert.Check(t, cmp.Contains(upgradingCondition.Message, + "Performing an upgrade of type resize with a state of in_progress.")) + } + }) + + t.Run("SuccessNotUpgrading", func(t *testing.T) { + cluster := testCluster() + cluster.Namespace = ns + cluster.Status.ID = notUpgradingClusterId + + err := reconciler.handleGetClusterUpgrade(ctx, testApiKey, cluster) + assert.NilError(t, err) + assert.Equal(t, len(cluster.Status.OngoingUpgrade), 0) + upgradingCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionUpgrading) + if assert.Check(t, upgradingCondition != nil) { + assert.Equal(t, upgradingCondition.Status, metav1.ConditionFalse) + assert.Equal(t, upgradingCondition.Reason, "NoUpgradesInProgress") + assert.Check(t, cmp.Contains(upgradingCondition.Message, + "No upgrades being performed")) + } + }) + + t.Run("UnsuccessfulGet", func(t *testing.T) { + cluster := testCluster() + cluster.Namespace = ns + cluster.Status.ID = notUpgradingClusterId + + err := reconciler.handleGetClusterUpgrade(ctx, "bad_api_key", cluster) + assert.Check(t, err != nil) + upgradingCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionUpgrading) + if assert.Check(t, upgradingCondition != nil) { + assert.Equal(t, upgradingCondition.Status, metav1.ConditionUnknown) + assert.Equal(t, upgradingCondition.Reason, "UnknownUpgradeState") + assert.Check(t, cmp.Contains(upgradingCondition.Message, + "Issue getting cluster upgrade from Bridge:")) + } + }) +} + +func TestHandleUpgrade(t *testing.T) { + ctx := context.Background() + tClient := setupKubernetes(t) + require.ParallelCapacity(t, 0) + + ns := setupNamespace(t, tClient).Name + clusterInBridge := testClusterApiResource() + + reconciler := &CrunchyBridgeClusterReconciler{ + Client: tClient, + Owner: "crunchybridgecluster-controller", + } + reconciler.NewClient = func() bridge.ClientInterface { + return &TestBridgeClient{ + ApiKey: testApiKey, + TeamId: testTeamId, + Clusters: []*bridge.ClusterApiResource{clusterInBridge}, + } + } + + t.Run("UpgradePlan", func(t *testing.T) { + cluster := testCluster() + cluster.Namespace = ns + cluster.Status.ID = "1234" + cluster.Spec.Plan = "standard-16" // originally "standard-8" + + controllerResult := reconciler.handleUpgrade(ctx, testApiKey, cluster) + assert.Equal(t, controllerResult.RequeueAfter, 3*time.Minute) + upgradingCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionUpgrading) + if assert.Check(t, upgradingCondition != nil) { + assert.Equal(t, upgradingCondition.Status, metav1.ConditionTrue) + assert.Equal(t, upgradingCondition.Reason, "maintenance") + assert.Check(t, cmp.Contains(upgradingCondition.Message, + "Performing an upgrade of type maintenance with a state of in_progress.")) + assert.Equal(t, *cluster.Status.OngoingUpgrade[0], v1beta1.UpgradeOperation{ + Flavor: "maintenance", + StartingFrom: "", + State: "in_progress", + }) + } + }) + + t.Run("UpgradePostgres", func(t *testing.T) { + cluster := testCluster() + cluster.Namespace = ns + cluster.Status.ID = "1234" + cluster.Spec.PostgresVersion = 16 // originally "15" + + controllerResult := reconciler.handleUpgrade(ctx, testApiKey, cluster) + assert.Equal(t, controllerResult.RequeueAfter, 3*time.Minute) + upgradingCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionUpgrading) + if assert.Check(t, upgradingCondition != nil) { + assert.Equal(t, upgradingCondition.Status, metav1.ConditionTrue) + assert.Equal(t, upgradingCondition.Reason, "major_version_upgrade") + assert.Check(t, cmp.Contains(upgradingCondition.Message, + "Performing an upgrade of type major_version_upgrade with a state of in_progress.")) + assert.Equal(t, *cluster.Status.OngoingUpgrade[0], v1beta1.UpgradeOperation{ + Flavor: "major_version_upgrade", + StartingFrom: "", + State: "in_progress", + }) + } + }) + + t.Run("UpgradeStorage", func(t *testing.T) { + cluster := testCluster() + cluster.Namespace = ns + cluster.Status.ID = "1234" + cluster.Spec.Storage = resource.MustParse("15Gi") // originally "10Gi" + + controllerResult := reconciler.handleUpgrade(ctx, testApiKey, cluster) + assert.Equal(t, controllerResult.RequeueAfter, 3*time.Minute) + upgradingCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionUpgrading) + if assert.Check(t, upgradingCondition != nil) { + assert.Equal(t, upgradingCondition.Status, metav1.ConditionTrue) + assert.Equal(t, upgradingCondition.Reason, "resize") + assert.Check(t, cmp.Contains(upgradingCondition.Message, + "Performing an upgrade of type resize with a state of in_progress.")) + assert.Equal(t, *cluster.Status.OngoingUpgrade[0], v1beta1.UpgradeOperation{ + Flavor: "resize", + StartingFrom: "", + State: "in_progress", + }) + } + }) + + t.Run("UpgradeFailure", func(t *testing.T) { + cluster := testCluster() + cluster.Namespace = ns + cluster.Status.ID = "1234" + cluster.Spec.Storage = resource.MustParse("15Gi") // originally "10Gi" + + controllerResult := reconciler.handleUpgrade(ctx, "bad_api_key", cluster) + assert.Equal(t, controllerResult, ctrl.Result{}) + upgradingCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionUpgrading) + if assert.Check(t, upgradingCondition != nil) { + assert.Equal(t, upgradingCondition.Status, metav1.ConditionFalse) + assert.Equal(t, upgradingCondition.Reason, "UpgradeError") + assert.Check(t, cmp.Contains(upgradingCondition.Message, + "Error performing an upgrade: boom")) + } + }) +} + +func TestHandleUpgradeHA(t *testing.T) { + ctx := context.Background() + tClient := setupKubernetes(t) + require.ParallelCapacity(t, 0) + + ns := setupNamespace(t, tClient).Name + clusterInBridgeWithHaDisabled := testClusterApiResource() + clusterInBridgeWithHaEnabled := testClusterApiResource() + clusterInBridgeWithHaEnabled.ID = "2345" // originally "1234" + clusterInBridgeWithHaEnabled.IsHA = initialize.Bool(true) // originally "false" + + reconciler := &CrunchyBridgeClusterReconciler{ + Client: tClient, + Owner: "crunchybridgecluster-controller", + } + reconciler.NewClient = func() bridge.ClientInterface { + return &TestBridgeClient{ + ApiKey: testApiKey, + TeamId: testTeamId, + Clusters: []*bridge.ClusterApiResource{clusterInBridgeWithHaDisabled, + clusterInBridgeWithHaEnabled}, + } + } + + t.Run("EnableHA", func(t *testing.T) { + cluster := testCluster() + cluster.Namespace = ns + cluster.Status.ID = "1234" + cluster.Spec.IsHA = true // originally "false" + + controllerResult := reconciler.handleUpgradeHA(ctx, testApiKey, cluster) + assert.Equal(t, controllerResult.RequeueAfter, 3*time.Minute) + upgradingCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionUpgrading) + if assert.Check(t, upgradingCondition != nil) { + assert.Equal(t, upgradingCondition.Status, metav1.ConditionTrue) + assert.Equal(t, upgradingCondition.Reason, "ha_change") + assert.Check(t, cmp.Contains(upgradingCondition.Message, + "Performing an upgrade of type ha_change with a state of enabling_ha.")) + assert.Equal(t, *cluster.Status.OngoingUpgrade[0], v1beta1.UpgradeOperation{ + Flavor: "ha_change", + StartingFrom: "", + State: "enabling_ha", + }) + } + }) + + t.Run("DisableHA", func(t *testing.T) { + cluster := testCluster() + cluster.Namespace = ns + cluster.Status.ID = "2345" + + controllerResult := reconciler.handleUpgradeHA(ctx, testApiKey, cluster) + assert.Equal(t, controllerResult.RequeueAfter, 3*time.Minute) + upgradingCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionUpgrading) + if assert.Check(t, upgradingCondition != nil) { + assert.Equal(t, upgradingCondition.Status, metav1.ConditionTrue) + assert.Equal(t, upgradingCondition.Reason, "ha_change") + assert.Check(t, cmp.Contains(upgradingCondition.Message, + "Performing an upgrade of type ha_change with a state of disabling_ha.")) + assert.Equal(t, *cluster.Status.OngoingUpgrade[0], v1beta1.UpgradeOperation{ + Flavor: "ha_change", + StartingFrom: "", + State: "disabling_ha", + }) + } + }) + + t.Run("UpgradeFailure", func(t *testing.T) { + cluster := testCluster() + cluster.Namespace = ns + cluster.Status.ID = "1234" + + controllerResult := reconciler.handleUpgradeHA(ctx, "bad_api_key", cluster) + assert.Equal(t, controllerResult, ctrl.Result{}) + upgradingCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionUpgrading) + if assert.Check(t, upgradingCondition != nil) { + assert.Equal(t, upgradingCondition.Status, metav1.ConditionFalse) + assert.Equal(t, upgradingCondition.Reason, "UpgradeError") + assert.Check(t, cmp.Contains(upgradingCondition.Message, + "Error performing an HA upgrade: boom")) + } + }) +} + +func TestHandleUpdate(t *testing.T) { + ctx := context.Background() + tClient := setupKubernetes(t) + require.ParallelCapacity(t, 0) + + ns := setupNamespace(t, tClient).Name + clusterInBridge := testClusterApiResource() + + reconciler := &CrunchyBridgeClusterReconciler{ + Client: tClient, + Owner: "crunchybridgecluster-controller", + } + reconciler.NewClient = func() bridge.ClientInterface { + return &TestBridgeClient{ + ApiKey: testApiKey, + TeamId: testTeamId, + Clusters: []*bridge.ClusterApiResource{clusterInBridge}, + } + } + + t.Run("UpdateName", func(t *testing.T) { + cluster := testCluster() + cluster.Namespace = ns + cluster.Status.ID = "1234" + cluster.Spec.ClusterName = "new-cluster-name" // originally "hippo-cluster" + + controllerResult := reconciler.handleUpdate(ctx, testApiKey, cluster) + assert.Equal(t, controllerResult.RequeueAfter, 3*time.Minute) + upgradingCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionUpgrading) + if assert.Check(t, upgradingCondition != nil) { + assert.Equal(t, upgradingCondition.Status, metav1.ConditionTrue) + assert.Equal(t, upgradingCondition.Reason, "ClusterUpgrade") + assert.Check(t, cmp.Contains(upgradingCondition.Message, + "An upgrade is occurring, the clusters name is new-cluster-name and the cluster is protected is false.")) + } + assert.Equal(t, cluster.Status.ClusterName, "new-cluster-name") + }) + + t.Run("UpdateIsProtected", func(t *testing.T) { + cluster := testCluster() + cluster.Namespace = ns + cluster.Status.ID = "1234" + cluster.Spec.IsProtected = true // originally "false" + + controllerResult := reconciler.handleUpdate(ctx, testApiKey, cluster) + assert.Equal(t, controllerResult.RequeueAfter, 3*time.Minute) + upgradingCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionUpgrading) + if assert.Check(t, upgradingCondition != nil) { + assert.Equal(t, upgradingCondition.Status, metav1.ConditionTrue) + assert.Equal(t, upgradingCondition.Reason, "ClusterUpgrade") + assert.Check(t, cmp.Contains(upgradingCondition.Message, + "An upgrade is occurring, the clusters name is hippo-cluster and the cluster is protected is true.")) + } + assert.Equal(t, *cluster.Status.IsProtected, true) + }) + + t.Run("UpgradeFailure", func(t *testing.T) { + cluster := testCluster() + cluster.Namespace = ns + cluster.Status.ID = "1234" + cluster.Spec.IsProtected = true // originally "false" + + controllerResult := reconciler.handleUpdate(ctx, "bad_api_key", cluster) + assert.Equal(t, controllerResult, ctrl.Result{}) + upgradingCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionUpgrading) + if assert.Check(t, upgradingCondition != nil) { + assert.Equal(t, upgradingCondition.Status, metav1.ConditionFalse) + assert.Equal(t, upgradingCondition.Reason, "UpgradeError") + assert.Check(t, cmp.Contains(upgradingCondition.Message, "Error performing an upgrade: boom")) + } + }) +} + +func TestGetSecretKeys(t *testing.T) { + ctx := context.Background() + tClient := setupKubernetes(t) + require.ParallelCapacity(t, 0) + + reconciler := &CrunchyBridgeClusterReconciler{ + Client: tClient, + Owner: "crunchybridgecluster-controller", + } + + ns := setupNamespace(t, tClient).Name + cluster := testCluster() + cluster.Namespace = ns + + t.Run("NoSecret", func(t *testing.T) { + apiKey, team, err := reconciler.GetSecretKeys(ctx, cluster) + assert.Equal(t, apiKey, "") + assert.Equal(t, team, "") + assert.ErrorContains(t, err, "secrets \"crunchy-bridge-api-key\" not found") + }) + + t.Run("SecretMissingApiKey", func(t *testing.T) { + cluster.Spec.Secret = "secret-missing-api-key" // originally "crunchy-bridge-api-key" + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "secret-missing-api-key", + Namespace: ns, + }, + Data: map[string][]byte{ + "team": []byte(`jkl;`), + }, + } + assert.NilError(t, tClient.Create(ctx, secret)) + + apiKey, team, err := reconciler.GetSecretKeys(ctx, cluster) + assert.Equal(t, apiKey, "") + assert.Equal(t, team, "") + assert.ErrorContains(t, err, "error handling secret; expected to find a key and a team: found key false, found team true") + + assert.NilError(t, tClient.Delete(ctx, secret)) + }) + + t.Run("SecretMissingTeamId", func(t *testing.T) { + cluster.Spec.Secret = "secret-missing-team-id" + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "secret-missing-team-id", + Namespace: ns, + }, + Data: map[string][]byte{ + "key": []byte(`asdf`), + }, + } + assert.NilError(t, tClient.Create(ctx, secret)) + + apiKey, team, err := reconciler.GetSecretKeys(ctx, cluster) + assert.Equal(t, apiKey, "") + assert.Equal(t, team, "") + assert.ErrorContains(t, err, "error handling secret; expected to find a key and a team: found key true, found team false") + }) + + t.Run("GoodSecret", func(t *testing.T) { + cluster.Spec.Secret = "crunchy-bridge-api-key" + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "crunchy-bridge-api-key", + Namespace: ns, + }, + Data: map[string][]byte{ + "key": []byte(`asdf`), + "team": []byte(`jkl;`), + }, + } + assert.NilError(t, tClient.Create(ctx, secret)) + + apiKey, team, err := reconciler.GetSecretKeys(ctx, cluster) + assert.Equal(t, apiKey, "asdf") + assert.Equal(t, team, "jkl;") + assert.NilError(t, err) + }) +} + +func TestDeleteControlled(t *testing.T) { + ctx := context.Background() + tClient := setupKubernetes(t) + require.ParallelCapacity(t, 1) + + ns := setupNamespace(t, tClient) + reconciler := &CrunchyBridgeClusterReconciler{ + Client: tClient, + Owner: "crunchybridgecluster-controller", + } + + cluster := testCluster() + cluster.Namespace = ns.Name + cluster.Name = strings.ToLower(t.Name()) // originally "hippo-cr" + assert.NilError(t, tClient.Create(ctx, cluster)) + + t.Run("NotControlled", func(t *testing.T) { + secret := &corev1.Secret{} + secret.Namespace = ns.Name + secret.Name = "solo" + + assert.NilError(t, tClient.Create(ctx, secret)) + + // No-op when there's no ownership + assert.NilError(t, reconciler.deleteControlled(ctx, cluster, secret)) + assert.NilError(t, tClient.Get(ctx, client.ObjectKeyFromObject(secret), secret)) + }) + + t.Run("Controlled", func(t *testing.T) { + secret := &corev1.Secret{} + secret.Namespace = ns.Name + secret.Name = "controlled" + + assert.NilError(t, reconciler.setControllerReference(cluster, secret)) + assert.NilError(t, tClient.Create(ctx, secret)) + + // Deletes when controlled by cluster. + assert.NilError(t, reconciler.deleteControlled(ctx, cluster, secret)) + + err := tClient.Get(ctx, client.ObjectKeyFromObject(secret), secret) + assert.Assert(t, apierrors.IsNotFound(err), "expected NotFound, got %#v", err) + }) +} diff --git a/internal/bridge/crunchybridgecluster/delete.go b/internal/bridge/crunchybridgecluster/delete.go new file mode 100644 index 0000000000..8dcada31cf --- /dev/null +++ b/internal/bridge/crunchybridgecluster/delete.go @@ -0,0 +1,70 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package crunchybridgecluster + +import ( + "context" + "time" + + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + "github.com/crunchydata/postgres-operator/internal/controller/runtime" + "github.com/crunchydata/postgres-operator/internal/initialize" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +const finalizer = "crunchybridgecluster.postgres-operator.crunchydata.com/finalizer" + +// handleDelete sets a finalizer on cluster and performs the finalization of +// cluster when it is being deleted. It returns (nil, nil) when cluster is +// not being deleted and there are no errors patching the CrunchyBridgeCluster. +// The caller is responsible for returning other values to controller-runtime. +func (r *CrunchyBridgeClusterReconciler) handleDelete( + ctx context.Context, crunchybridgecluster *v1beta1.CrunchyBridgeCluster, key string, +) (*ctrl.Result, error) { + log := ctrl.LoggerFrom(ctx) + + // If the CrunchyBridgeCluster isn't being deleted, add the finalizer + if crunchybridgecluster.ObjectMeta.DeletionTimestamp.IsZero() { + if !controllerutil.ContainsFinalizer(crunchybridgecluster, finalizer) { + controllerutil.AddFinalizer(crunchybridgecluster, finalizer) + if err := r.Update(ctx, crunchybridgecluster); err != nil { + return nil, err + } + } + // If the CrunchyBridgeCluster is being deleted, + // handle the deletion, and remove the finalizer + } else { + if controllerutil.ContainsFinalizer(crunchybridgecluster, finalizer) { + log.Info("deleting cluster", "clusterName", crunchybridgecluster.Spec.ClusterName) + + // TODO(crunchybridgecluster): If is_protected is true, maybe skip this call, but allow the deletion of the K8s object? + _, deletedAlready, err := r.NewClient().DeleteCluster(ctx, key, crunchybridgecluster.Status.ID) + // Requeue if error + if err != nil { + return &ctrl.Result{}, err + } + + if !deletedAlready { + return initialize.Pointer(runtime.RequeueWithoutBackoff(time.Second)), err + } + + // Remove finalizer if deleted already + if deletedAlready { + log.Info("cluster deleted", "clusterName", crunchybridgecluster.Spec.ClusterName) + + controllerutil.RemoveFinalizer(crunchybridgecluster, finalizer) + if err := r.Update(ctx, crunchybridgecluster); err != nil { + return &ctrl.Result{}, err + } + } + } + // Stop reconciliation as the item is being deleted + return &ctrl.Result{}, nil + } + + return nil, nil +} diff --git a/internal/bridge/crunchybridgecluster/delete_test.go b/internal/bridge/crunchybridgecluster/delete_test.go new file mode 100644 index 0000000000..28e6feb1f8 --- /dev/null +++ b/internal/bridge/crunchybridgecluster/delete_test.go @@ -0,0 +1,133 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package crunchybridgecluster + +import ( + "context" + "testing" + "time" + + "gotest.tools/v3/assert" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + "github.com/crunchydata/postgres-operator/internal/bridge" + "github.com/crunchydata/postgres-operator/internal/testing/require" +) + +func TestHandleDeleteCluster(t *testing.T) { + ctx := context.Background() + tClient := setupKubernetes(t) + require.ParallelCapacity(t, 0) + + ns := setupNamespace(t, tClient).Name + + firstClusterInBridge := testClusterApiResource() + firstClusterInBridge.ClusterName = "bridge-cluster-1" + secondClusterInBridge := testClusterApiResource() + secondClusterInBridge.ClusterName = "bridge-cluster-2" + secondClusterInBridge.ID = "2345" + + reconciler := &CrunchyBridgeClusterReconciler{ + Client: tClient, + Owner: "crunchybridgecluster-controller", + } + testBridgeClient := &TestBridgeClient{ + ApiKey: "9012", + TeamId: "5678", + Clusters: []*bridge.ClusterApiResource{firstClusterInBridge, secondClusterInBridge}, + } + reconciler.NewClient = func() bridge.ClientInterface { + return testBridgeClient + } + + t.Run("SuccessfulDeletion", func(t *testing.T) { + // Create test cluster in kubernetes + cluster := testCluster() + cluster.Namespace = ns + cluster.Status.ID = "1234" + cluster.Spec.ClusterName = "bridge-cluster-1" + assert.NilError(t, tClient.Create(ctx, cluster)) + + // Run handleDelete + controllerResult, err := reconciler.handleDelete(ctx, cluster, "9012") + assert.NilError(t, err) + assert.Check(t, controllerResult == nil) + + // Make sure that finalizer was added + assert.Check(t, controllerutil.ContainsFinalizer(cluster, finalizer)) + + // Send delete request to kubernetes + assert.NilError(t, tClient.Delete(ctx, cluster)) + + // Get cluster from kubernetes and assert that the deletion timestamp was added + assert.NilError(t, tClient.Get(ctx, client.ObjectKeyFromObject(cluster), cluster)) + assert.Check(t, !cluster.ObjectMeta.DeletionTimestamp.IsZero()) + + // Note: We must run handleDelete multiple times because we don't want to remove the + // finalizer until we're sure that the cluster has been deleted from Bridge, so we + // have to do multiple calls/reconcile loops. + // Run handleDelete again to delete from Bridge + cluster.Status.ID = "1234" + controllerResult, err = reconciler.handleDelete(ctx, cluster, "9012") + assert.NilError(t, err) + assert.Equal(t, controllerResult.RequeueAfter, 1*time.Second) + assert.Equal(t, len(testBridgeClient.Clusters), 1) + assert.Equal(t, testBridgeClient.Clusters[0].ClusterName, "bridge-cluster-2") + + // Run handleDelete one last time to remove finalizer + controllerResult, err = reconciler.handleDelete(ctx, cluster, "9012") + assert.NilError(t, err) + assert.Equal(t, *controllerResult, ctrl.Result{}) + + // Make sure that finalizer was removed + assert.Check(t, !controllerutil.ContainsFinalizer(cluster, finalizer)) + }) + + t.Run("UnsuccessfulDeletion", func(t *testing.T) { + cluster := testCluster() + cluster.Namespace = ns + cluster.Status.ID = "2345" + cluster.Spec.ClusterName = "bridge-cluster-2" + assert.NilError(t, tClient.Create(ctx, cluster)) + + // Run handleDelete + controllerResult, err := reconciler.handleDelete(ctx, cluster, "9012") + assert.NilError(t, err) + assert.Check(t, controllerResult == nil) + + // Make sure that finalizer was added + assert.Check(t, controllerutil.ContainsFinalizer(cluster, finalizer)) + + // Send delete request to kubernetes + assert.NilError(t, tClient.Delete(ctx, cluster)) + + // Get cluster from kubernetes and assert that the deletion timestamp was added + assert.NilError(t, tClient.Get(ctx, client.ObjectKeyFromObject(cluster), cluster)) + assert.Check(t, !cluster.ObjectMeta.DeletionTimestamp.IsZero()) + + // Run handleDelete again to attempt to delete from Bridge, but provide bad api key + cluster.Status.ID = "2345" + controllerResult, err = reconciler.handleDelete(ctx, cluster, "bad_api_key") + assert.ErrorContains(t, err, "boom") + assert.Equal(t, *controllerResult, ctrl.Result{}) + + // Run handleDelete a couple times with good api key so test can cleanup properly. + // Note: We must run handleDelete multiple times because we don't want to remove the + // finalizer until we're sure that the cluster has been deleted from Bridge, so we + // have to do multiple calls/reconcile loops. + // delete from bridge + _, err = reconciler.handleDelete(ctx, cluster, "9012") + assert.NilError(t, err) + + // remove finalizer + _, err = reconciler.handleDelete(ctx, cluster, "9012") + assert.NilError(t, err) + + // Make sure that finalizer was removed + assert.Check(t, !controllerutil.ContainsFinalizer(cluster, finalizer)) + }) +} diff --git a/internal/bridge/crunchybridgecluster/helpers_test.go b/internal/bridge/crunchybridgecluster/helpers_test.go new file mode 100644 index 0000000000..f40ad3d054 --- /dev/null +++ b/internal/bridge/crunchybridgecluster/helpers_test.go @@ -0,0 +1,178 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package crunchybridgecluster + +import ( + "context" + "os" + "strconv" + "testing" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/yaml" + + "github.com/crunchydata/postgres-operator/internal/bridge" + "github.com/crunchydata/postgres-operator/internal/initialize" + "github.com/crunchydata/postgres-operator/internal/testing/require" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +// Scale extends d according to PGO_TEST_TIMEOUT_SCALE. +var Scale = func(d time.Duration) time.Duration { return d } + +// This function was duplicated from the postgrescluster package. +// TODO: Pull these duplicated functions out into a separate, shared package. +func init() { + setting := os.Getenv("PGO_TEST_TIMEOUT_SCALE") + factor, _ := strconv.ParseFloat(setting, 64) + + if setting != "" { + if factor <= 0 { + panic("PGO_TEST_TIMEOUT_SCALE must be a fractional number greater than zero") + } + + Scale = func(d time.Duration) time.Duration { + return time.Duration(factor * float64(d)) + } + } +} + +// setupKubernetes starts or connects to a Kubernetes API and returns a client +// that uses it. See [require.Kubernetes] for more details. +func setupKubernetes(t testing.TB) client.Client { + t.Helper() + + // Start and/or connect to a Kubernetes API, or Skip when that's not configured. + cc := require.Kubernetes(t) + + // Log the status of any test namespaces after this test fails. + t.Cleanup(func() { + if t.Failed() { + var namespaces corev1.NamespaceList + _ = cc.List(context.Background(), &namespaces, client.HasLabels{"postgres-operator-test"}) + + type shaped map[string]corev1.NamespaceStatus + result := make([]shaped, len(namespaces.Items)) + + for i, ns := range namespaces.Items { + result[i] = shaped{ns.Labels["postgres-operator-test"]: ns.Status} + } + + formatted, _ := yaml.Marshal(result) + t.Logf("Test Namespaces:\n%s", formatted) + } + }) + + return cc +} + +// setupNamespace creates a random namespace that will be deleted by t.Cleanup. +// +// Deprecated: Use [require.Namespace] instead. +func setupNamespace(t testing.TB, cc client.Client) *corev1.Namespace { + t.Helper() + return require.Namespace(t, cc) +} + +// testCluster defines a base cluster spec that can be used by tests to +// generate a CrunchyBridgeCluster CR +func testCluster() *v1beta1.CrunchyBridgeCluster { + cluster := v1beta1.CrunchyBridgeCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "hippo-cr", + }, + Spec: v1beta1.CrunchyBridgeClusterSpec{ + ClusterName: "hippo-cluster", + IsHA: false, + PostgresVersion: 15, + Plan: "standard-8", + Provider: "aws", + Region: "us-east-2", + Secret: "crunchy-bridge-api-key", + Storage: resource.MustParse("10Gi"), + }, + } + return cluster.DeepCopy() +} + +func testClusterApiResource() *bridge.ClusterApiResource { + cluster := bridge.ClusterApiResource{ + ID: "1234", + Host: "example.com", + IsHA: initialize.Bool(false), + IsProtected: initialize.Bool(false), + MajorVersion: 15, + ClusterName: "hippo-cluster", + Plan: "standard-8", + Provider: "aws", + Region: "us-east-2", + Storage: 10, + Team: "5678", + } + return &cluster +} + +func testClusterStatusApiResource(clusterId string) *bridge.ClusterStatusApiResource { + teamId := "5678" + state := "ready" + + clusterStatus := bridge.ClusterStatusApiResource{ + DiskUsage: &bridge.ClusterDiskUsageApiResource{ + DiskAvailableMB: 16, + DiskTotalSizeMB: 16, + DiskUsedMB: 0, + }, + OldestBackup: "oldbackup", + OngoingUpgrade: &bridge.ClusterUpgradeApiResource{ + ClusterID: clusterId, + Operations: []*v1beta1.UpgradeOperation{}, + Team: teamId, + }, + State: state, + } + + return &clusterStatus +} + +func testClusterUpgradeApiResource(clusterId string) *bridge.ClusterUpgradeApiResource { + teamId := "5678" + + clusterUpgrade := bridge.ClusterUpgradeApiResource{ + ClusterID: clusterId, + Operations: []*v1beta1.UpgradeOperation{ + { + Flavor: "resize", + StartingFrom: "", + State: "in_progress", + }, + }, + Team: teamId, + } + + return &clusterUpgrade +} + +func testClusterRoleApiResource() *bridge.ClusterRoleApiResource { + clusterId := "1234" + teamId := "5678" + roleName := "application" + + clusterRole := bridge.ClusterRoleApiResource{ + AccountEmail: "test@email.com", + AccountId: "12345678", + ClusterId: clusterId, + Flavor: "chocolate", + Name: roleName, + Password: "application-password", + Team: teamId, + URI: "connection-string", + } + + return &clusterRole +} diff --git a/internal/bridge/crunchybridgecluster/mock_bridge_api.go b/internal/bridge/crunchybridgecluster/mock_bridge_api.go new file mode 100644 index 0000000000..5c6b243714 --- /dev/null +++ b/internal/bridge/crunchybridgecluster/mock_bridge_api.go @@ -0,0 +1,247 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package crunchybridgecluster + +import ( + "context" + "errors" + "fmt" + + "k8s.io/apimachinery/pkg/util/intstr" + + "github.com/crunchydata/postgres-operator/internal/bridge" + "github.com/crunchydata/postgres-operator/internal/initialize" + + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +type TestBridgeClient struct { + ApiKey string `json:"apiKey,omitempty"` + TeamId string `json:"teamId,omitempty"` + Clusters []*bridge.ClusterApiResource `json:"clusters,omitempty"` + ClusterRoles []*bridge.ClusterRoleApiResource `json:"clusterRoles,omitempty"` + ClusterStatuses map[string]*bridge.ClusterStatusApiResource `json:"clusterStatuses,omitempty"` + ClusterUpgrades map[string]*bridge.ClusterUpgradeApiResource `json:"clusterUpgrades,omitempty"` +} + +func (tbc *TestBridgeClient) ListClusters(ctx context.Context, apiKey, teamId string) ([]*bridge.ClusterApiResource, error) { + + if apiKey == tbc.ApiKey && teamId == tbc.TeamId { + return tbc.Clusters, nil + } + + return nil, errors.New("boom") +} + +func (tbc *TestBridgeClient) UpgradeCluster(ctx context.Context, apiKey, id string, clusterRequestPayload *bridge.PostClustersUpgradeRequestPayload, +) (*bridge.ClusterUpgradeApiResource, error) { + // look for cluster + var desiredCluster *bridge.ClusterApiResource + clusterFound := false + for _, cluster := range tbc.Clusters { + if cluster.ID == id { + desiredCluster = cluster + clusterFound = true + } + } + if !clusterFound { + return nil, errors.New("cluster not found") + } + + // happy path + if apiKey == tbc.ApiKey { + result := &bridge.ClusterUpgradeApiResource{ + ClusterID: id, + Team: tbc.TeamId, + } + if clusterRequestPayload.Plan != desiredCluster.Plan { + result.Operations = []*v1beta1.UpgradeOperation{ + { + Flavor: "maintenance", + StartingFrom: "", + State: "in_progress", + }, + } + } else if clusterRequestPayload.PostgresVersion != intstr.FromInt(desiredCluster.MajorVersion) { + result.Operations = []*v1beta1.UpgradeOperation{ + { + Flavor: "major_version_upgrade", + StartingFrom: "", + State: "in_progress", + }, + } + } else if clusterRequestPayload.Storage != desiredCluster.Storage { + result.Operations = []*v1beta1.UpgradeOperation{ + { + Flavor: "resize", + StartingFrom: "", + State: "in_progress", + }, + } + } + return result, nil + } + // sad path + return nil, errors.New("boom") +} + +func (tbc *TestBridgeClient) UpgradeClusterHA(ctx context.Context, apiKey, id, action string, +) (*bridge.ClusterUpgradeApiResource, error) { + // look for cluster + var desiredCluster *bridge.ClusterApiResource + clusterFound := false + for _, cluster := range tbc.Clusters { + if cluster.ID == id { + desiredCluster = cluster + clusterFound = true + } + } + if !clusterFound { + return nil, errors.New("cluster not found") + } + + // happy path + if apiKey == tbc.ApiKey { + result := &bridge.ClusterUpgradeApiResource{ + ClusterID: id, + Team: tbc.TeamId, + } + if action == "enable-ha" && !*desiredCluster.IsHA { + result.Operations = []*v1beta1.UpgradeOperation{ + { + Flavor: "ha_change", + StartingFrom: "", + State: "enabling_ha", + }, + } + } else if action == "disable-ha" && *desiredCluster.IsHA { + result.Operations = []*v1beta1.UpgradeOperation{ + { + Flavor: "ha_change", + StartingFrom: "", + State: "disabling_ha", + }, + } + } else { + return nil, errors.New("no change detected") + } + return result, nil + } + // sad path + return nil, errors.New("boom") +} + +func (tbc *TestBridgeClient) UpdateCluster(ctx context.Context, apiKey, id string, clusterRequestPayload *bridge.PatchClustersRequestPayload, +) (*bridge.ClusterApiResource, error) { + // look for cluster + var desiredCluster *bridge.ClusterApiResource + clusterFound := false + for _, cluster := range tbc.Clusters { + if cluster.ID == id { + desiredCluster = cluster + clusterFound = true + } + } + if !clusterFound { + return nil, errors.New("cluster not found") + } + + // happy path + if apiKey == tbc.ApiKey { + desiredCluster.ClusterName = clusterRequestPayload.Name + desiredCluster.IsProtected = clusterRequestPayload.IsProtected + return desiredCluster, nil + } + // sad path + return nil, errors.New("boom") +} + +func (tbc *TestBridgeClient) CreateCluster(ctx context.Context, apiKey string, + clusterRequestPayload *bridge.PostClustersRequestPayload) (*bridge.ClusterApiResource, error) { + + if apiKey == tbc.ApiKey && clusterRequestPayload.Team == tbc.TeamId && clusterRequestPayload.Name != "" && + clusterRequestPayload.Plan != "" { + cluster := &bridge.ClusterApiResource{ + ID: fmt.Sprint(len(tbc.Clusters)), + Host: "example.com", + IsHA: initialize.Bool(clusterRequestPayload.IsHA), + MajorVersion: clusterRequestPayload.PostgresVersion.IntValue(), + ClusterName: clusterRequestPayload.Name, + Plan: clusterRequestPayload.Plan, + Provider: clusterRequestPayload.Provider, + Region: clusterRequestPayload.Region, + Storage: clusterRequestPayload.Storage, + } + tbc.Clusters = append(tbc.Clusters, cluster) + + return cluster, nil + } + + return nil, errors.New("boom") +} + +func (tbc *TestBridgeClient) GetCluster(ctx context.Context, apiKey, id string) (*bridge.ClusterApiResource, error) { + + if apiKey == tbc.ApiKey { + for _, cluster := range tbc.Clusters { + if cluster.ID == id { + return cluster, nil + } + } + } + + return nil, errors.New("boom") +} + +func (tbc *TestBridgeClient) GetClusterStatus(ctx context.Context, apiKey, id string) (*bridge.ClusterStatusApiResource, error) { + + if apiKey == tbc.ApiKey { + return tbc.ClusterStatuses[id], nil + } + + return nil, errors.New("boom") +} + +func (tbc *TestBridgeClient) GetClusterUpgrade(ctx context.Context, apiKey, id string) (*bridge.ClusterUpgradeApiResource, error) { + + if apiKey == tbc.ApiKey { + return tbc.ClusterUpgrades[id], nil + } + + return nil, errors.New("boom") +} + +func (tbc *TestBridgeClient) GetClusterRole(ctx context.Context, apiKey, clusterId, roleName string) (*bridge.ClusterRoleApiResource, error) { + + if apiKey == tbc.ApiKey { + for _, clusterRole := range tbc.ClusterRoles { + if clusterRole.ClusterId == clusterId && clusterRole.Name == roleName { + return clusterRole, nil + } + } + } + + return nil, errors.New("boom") +} + +func (tbc *TestBridgeClient) DeleteCluster(ctx context.Context, apiKey, clusterId string) (*bridge.ClusterApiResource, bool, error) { + alreadyDeleted := true + var cluster *bridge.ClusterApiResource + + if apiKey == tbc.ApiKey { + for i := len(tbc.Clusters) - 1; i >= 0; i-- { + if tbc.Clusters[i].ID == clusterId { + cluster = tbc.Clusters[i] + alreadyDeleted = false + tbc.Clusters = append(tbc.Clusters[:i], tbc.Clusters[i+1:]...) + return cluster, alreadyDeleted, nil + } + } + } else { + return nil, alreadyDeleted, errors.New("boom") + } + + return nil, alreadyDeleted, nil +} diff --git a/internal/bridge/crunchybridgecluster/postgres.go b/internal/bridge/crunchybridgecluster/postgres.go new file mode 100644 index 0000000000..024631de67 --- /dev/null +++ b/internal/bridge/crunchybridgecluster/postgres.go @@ -0,0 +1,164 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package crunchybridgecluster + +import ( + "context" + "fmt" + + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/crunchydata/postgres-operator/internal/bridge" + "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +// generatePostgresRoleSecret returns a Secret containing a password and +// connection details for the appropriate database. +func (r *CrunchyBridgeClusterReconciler) generatePostgresRoleSecret( + cluster *v1beta1.CrunchyBridgeCluster, roleSpec *v1beta1.CrunchyBridgeClusterRoleSpec, + clusterRole *bridge.ClusterRoleApiResource, +) (*corev1.Secret, error) { + roleName := roleSpec.Name + secretName := roleSpec.SecretName + intent := &corev1.Secret{ObjectMeta: metav1.ObjectMeta{ + Namespace: cluster.Namespace, + Name: secretName, + }} + intent.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("Secret")) + intent.StringData = map[string]string{ + "name": clusterRole.Name, + "password": clusterRole.Password, + "uri": clusterRole.URI, + } + + intent.Annotations = cluster.Spec.Metadata.GetAnnotationsOrNil() + intent.Labels = naming.Merge( + cluster.Spec.Metadata.GetLabelsOrNil(), + map[string]string{ + naming.LabelCluster: cluster.Name, + naming.LabelRole: naming.RoleCrunchyBridgeClusterPostgresRole, + naming.LabelCrunchyBridgeClusterPostgresRole: roleName, + }) + + err := errors.WithStack(r.setControllerReference(cluster, intent)) + + return intent, err +} + +// reconcilePostgresRoles writes the objects necessary to manage roles and their +// passwords in PostgreSQL. +func (r *CrunchyBridgeClusterReconciler) reconcilePostgresRoles( + ctx context.Context, apiKey string, cluster *v1beta1.CrunchyBridgeCluster, +) error { + _, _, err := r.reconcilePostgresRoleSecrets(ctx, apiKey, cluster) + + // TODO: If we ever add a PgAdmin feature to CrunchyBridgeCluster, we will + // want to add the role credentials to PgAdmin here + + return err +} + +func (r *CrunchyBridgeClusterReconciler) reconcilePostgresRoleSecrets( + ctx context.Context, apiKey string, cluster *v1beta1.CrunchyBridgeCluster, +) ( + []*v1beta1.CrunchyBridgeClusterRoleSpec, map[string]*corev1.Secret, error, +) { + log := ctrl.LoggerFrom(ctx) + specRoles := cluster.Spec.Roles + + // Index role specifications by PostgreSQL role name and make sure that none of the + // secretNames are identical in the spec + secretNames := make(map[string]bool) + roleSpecs := make(map[string]*v1beta1.CrunchyBridgeClusterRoleSpec, len(specRoles)) + for i := range specRoles { + if secretNames[specRoles[i].SecretName] { + // Duplicate secretName found, return early with error + err := errors.New("Two or more of the Roles in the CrunchyBridgeCluster spec " + + "have the same SecretName. Role SecretNames must be unique.") + return nil, nil, err + } + secretNames[specRoles[i].SecretName] = true + + roleSpecs[specRoles[i].Name] = specRoles[i] + } + + // Make sure that this cluster's role secret names are not being used by any other + // secrets in the namespace + allSecretsInNamespace := &corev1.SecretList{} + err := errors.WithStack(r.Client.List(ctx, allSecretsInNamespace, client.InNamespace(cluster.Namespace))) + if err != nil { + return nil, nil, err + } + for _, secret := range allSecretsInNamespace.Items { + if secretNames[secret.Name] { + existingSecretLabels := secret.GetLabels() + if existingSecretLabels[naming.LabelCluster] != cluster.Name || + existingSecretLabels[naming.LabelRole] != naming.RoleCrunchyBridgeClusterPostgresRole { + err = errors.New( + fmt.Sprintf("There is already an existing Secret in this namespace with the name %v. "+ + "Please choose a different name for this role's Secret.", secret.Name), + ) + return nil, nil, err + } + } + } + + // Gather existing role secrets + secrets := &corev1.SecretList{} + selector, err := naming.AsSelector(naming.CrunchyBridgeClusterPostgresRoles(cluster.Name)) + if err == nil { + err = errors.WithStack( + r.Client.List(ctx, secrets, + client.InNamespace(cluster.Namespace), + client.MatchingLabelsSelector{Selector: selector}, + )) + } + + // Index secrets by PostgreSQL role name and delete any that are not in the + // cluster spec. + roleSecrets := make(map[string]*corev1.Secret, len(secrets.Items)) + if err == nil { + for i := range secrets.Items { + secret := &secrets.Items[i] + secretRoleName := secret.Labels[naming.LabelCrunchyBridgeClusterPostgresRole] + + roleSpec, specified := roleSpecs[secretRoleName] + if specified && roleSpec.SecretName == secret.Name { + roleSecrets[secretRoleName] = secret + } else if err == nil { + err = errors.WithStack(r.deleteControlled(ctx, cluster, secret)) + } + } + } + + // Reconcile each PostgreSQL role in the cluster spec. + for roleName, role := range roleSpecs { + // Get ClusterRole from Bridge API + clusterRole, err := r.NewClient().GetClusterRole(ctx, apiKey, cluster.Status.ID, roleName) + // If issue with getting ClusterRole, log error and move on to next role + if err != nil { + // TODO (dsessler7): Emit event here? + log.Error(err, "issue retrieving cluster role from Bridge") + continue + } + if err == nil { + roleSecrets[roleName], err = r.generatePostgresRoleSecret(cluster, role, clusterRole) + } + if err == nil { + err = errors.WithStack(r.apply(ctx, roleSecrets[roleName])) + } + if err != nil { + log.Error(err, "Issue creating role secret.") + } + } + + return specRoles, roleSecrets, err +} diff --git a/internal/bridge/crunchybridgecluster/postgres_test.go b/internal/bridge/crunchybridgecluster/postgres_test.go new file mode 100644 index 0000000000..66add7b789 --- /dev/null +++ b/internal/bridge/crunchybridgecluster/postgres_test.go @@ -0,0 +1,239 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package crunchybridgecluster + +import ( + "context" + "testing" + + "sigs.k8s.io/controller-runtime/pkg/client" + + "gotest.tools/v3/assert" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/crunchydata/postgres-operator/internal/bridge" + "github.com/crunchydata/postgres-operator/internal/testing/require" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +func TestGeneratePostgresRoleSecret(t *testing.T) { + tClient := setupKubernetes(t) + require.ParallelCapacity(t, 0) + + reconciler := &CrunchyBridgeClusterReconciler{ + Client: tClient, + Owner: "crunchybridgecluster-controller", + } + + cluster := testCluster() + cluster.Namespace = setupNamespace(t, tClient).Name + + spec := &v1beta1.CrunchyBridgeClusterRoleSpec{ + Name: "application", + SecretName: "application-role-secret", + } + role := &bridge.ClusterRoleApiResource{ + Name: "application", + Password: "password", + URI: "postgres://application:password@example.com:5432/postgres", + } + t.Run("ObjectMeta", func(t *testing.T) { + secret, err := reconciler.generatePostgresRoleSecret(cluster, spec, role) + assert.NilError(t, err) + + if assert.Check(t, secret != nil) { + assert.Equal(t, secret.Namespace, cluster.Namespace) + assert.Assert(t, metav1.IsControlledBy(secret, cluster)) + assert.DeepEqual(t, secret.Labels, map[string]string{ + "postgres-operator.crunchydata.com/cluster": "hippo-cr", + "postgres-operator.crunchydata.com/role": "cbc-pgrole", + "postgres-operator.crunchydata.com/cbc-pgrole": "application", + }) + } + }) + + t.Run("Data", func(t *testing.T) { + secret, err := reconciler.generatePostgresRoleSecret(cluster, spec, role) + assert.NilError(t, err) + + if assert.Check(t, secret != nil) { + assert.Equal(t, secret.StringData["name"], "application") + assert.Equal(t, secret.StringData["password"], "password") + assert.Equal(t, secret.StringData["uri"], + "postgres://application:password@example.com:5432/postgres") + } + }) +} + +func TestReconcilePostgresRoleSecrets(t *testing.T) { + ctx := context.Background() + tClient := setupKubernetes(t) + require.ParallelCapacity(t, 0) + + apiKey := "9012" + ns := setupNamespace(t, tClient).Name + + reconciler := &CrunchyBridgeClusterReconciler{ + Client: tClient, + Owner: "crunchybridgecluster-controller", + } + + t.Run("DuplicateSecretNameInSpec", func(t *testing.T) { + cluster := testCluster() + cluster.Namespace = ns + + spec1 := &v1beta1.CrunchyBridgeClusterRoleSpec{ + Name: "application", + SecretName: "role-secret", + } + spec2 := &v1beta1.CrunchyBridgeClusterRoleSpec{ + Name: "postgres", + SecretName: "role-secret", + } + cluster.Spec.Roles = append(cluster.Spec.Roles, spec1, spec2) + + roleSpecSlice, secretMap, err := reconciler.reconcilePostgresRoleSecrets(ctx, apiKey, cluster) + assert.Check(t, roleSpecSlice == nil) + assert.Check(t, secretMap == nil) + assert.ErrorContains(t, err, "Two or more of the Roles in the CrunchyBridgeCluster spec have "+ + "the same SecretName. Role SecretNames must be unique.", "expected duplicate secret name error") + }) + + t.Run("DuplicateSecretNameInNamespace", func(t *testing.T) { + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "role-secret", + Namespace: ns, + }, + StringData: map[string]string{ + "path": "stuff", + }, + } + assert.NilError(t, tClient.Create(ctx, secret)) + + cluster := testCluster() + cluster.Namespace = ns + + spec1 := &v1beta1.CrunchyBridgeClusterRoleSpec{ + Name: "application", + SecretName: "role-secret", + } + + cluster.Spec.Roles = append(cluster.Spec.Roles, spec1) + + roleSpecSlice, secretMap, err := reconciler.reconcilePostgresRoleSecrets(ctx, apiKey, cluster) + assert.Check(t, roleSpecSlice == nil) + assert.Check(t, secretMap == nil) + assert.ErrorContains(t, err, "There is already an existing Secret in this namespace with the name role-secret. "+ + "Please choose a different name for this role's Secret.", "expected duplicate secret name error") + }) + + t.Run("UnusedSecretsGetRemoved", func(t *testing.T) { + applicationRoleInBridge := testClusterRoleApiResource() + postgresRoleInBridge := testClusterRoleApiResource() + postgresRoleInBridge.Name = "postgres" + postgresRoleInBridge.Password = "postgres-password" + reconciler.NewClient = func() bridge.ClientInterface { + return &TestBridgeClient{ + ApiKey: apiKey, + TeamId: "5678", + ClusterRoles: []*bridge.ClusterRoleApiResource{applicationRoleInBridge, postgresRoleInBridge}, + } + } + + applicationSpec := &v1beta1.CrunchyBridgeClusterRoleSpec{ + Name: "application", + SecretName: "application-role-secret", + } + postgresSpec := &v1beta1.CrunchyBridgeClusterRoleSpec{ + Name: "postgres", + SecretName: "postgres-role-secret", + } + + cluster := testCluster() + cluster.Namespace = ns + cluster.Status.ID = "1234" + // Add one role to cluster spec + cluster.Spec.Roles = append(cluster.Spec.Roles, applicationSpec) + assert.NilError(t, tClient.Create(ctx, cluster)) + + applicationRole := &bridge.ClusterRoleApiResource{ + Name: "application", + Password: "application-password", + URI: "connection-string", + } + postgresRole := &bridge.ClusterRoleApiResource{ + Name: "postgres", + Password: "postgres-password", + URI: "connection-string", + } + + // Generate secrets + applicationSecret, err := reconciler.generatePostgresRoleSecret(cluster, applicationSpec, applicationRole) + assert.NilError(t, err) + postgresSecret, err := reconciler.generatePostgresRoleSecret(cluster, postgresSpec, postgresRole) + assert.NilError(t, err) + + // Create secrets in k8s + assert.NilError(t, tClient.Create(ctx, applicationSecret)) + assert.NilError(t, tClient.Create(ctx, postgresSecret)) + + roleSpecSlice, secretMap, err := reconciler.reconcilePostgresRoleSecrets(ctx, apiKey, cluster) + assert.Check(t, roleSpecSlice != nil) + assert.Check(t, secretMap != nil) + assert.NilError(t, err) + + // Assert that postgresSecret was deleted since its associated role is not in the spec + err = tClient.Get(ctx, client.ObjectKeyFromObject(postgresSecret), postgresSecret) + assert.Assert(t, apierrors.IsNotFound(err), "expected NotFound, got %#v", err) + + // Assert that applicationSecret is still there + err = tClient.Get(ctx, client.ObjectKeyFromObject(applicationSecret), applicationSecret) + assert.NilError(t, err) + }) + + t.Run("SecretsGetUpdated", func(t *testing.T) { + clusterRoleInBridge := testClusterRoleApiResource() + clusterRoleInBridge.Password = "different-password" + reconciler.NewClient = func() bridge.ClientInterface { + return &TestBridgeClient{ + ApiKey: apiKey, + TeamId: "5678", + ClusterRoles: []*bridge.ClusterRoleApiResource{clusterRoleInBridge}, + } + } + + cluster := testCluster() + cluster.Namespace = ns + err := tClient.Get(ctx, client.ObjectKeyFromObject(cluster), cluster) + assert.NilError(t, err) + cluster.Status.ID = "1234" + + spec1 := &v1beta1.CrunchyBridgeClusterRoleSpec{ + Name: "application", + SecretName: "application-role-secret", + } + role1 := &bridge.ClusterRoleApiResource{ + Name: "application", + Password: "test", + URI: "connection-string", + } + // Generate secret + secret1, err := reconciler.generatePostgresRoleSecret(cluster, spec1, role1) + assert.NilError(t, err) + + roleSpecSlice, secretMap, err := reconciler.reconcilePostgresRoleSecrets(ctx, apiKey, cluster) + assert.Check(t, roleSpecSlice != nil) + assert.Check(t, secretMap != nil) + assert.NilError(t, err) + + // Assert that secret1 was updated + err = tClient.Get(ctx, client.ObjectKeyFromObject(secret1), secret1) + assert.NilError(t, err) + assert.Equal(t, string(secret1.Data["password"]), "different-password") + }) +} diff --git a/internal/bridge/crunchybridgecluster/watches.go b/internal/bridge/crunchybridgecluster/watches.go new file mode 100644 index 0000000000..79687b3476 --- /dev/null +++ b/internal/bridge/crunchybridgecluster/watches.go @@ -0,0 +1,103 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package crunchybridgecluster + +import ( + "context" + + "k8s.io/client-go/util/workqueue" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +// watchForRelatedSecret handles create/update/delete events for secrets, +// passing the Secret ObjectKey to findCrunchyBridgeClustersForSecret +func (r *CrunchyBridgeClusterReconciler) watchForRelatedSecret() handler.EventHandler { + handle := func(ctx context.Context, secret client.Object, q workqueue.RateLimitingInterface) { + key := client.ObjectKeyFromObject(secret) + + for _, cluster := range r.findCrunchyBridgeClustersForSecret(ctx, key) { + q.Add(ctrl.Request{ + NamespacedName: client.ObjectKeyFromObject(cluster), + }) + } + } + + return handler.Funcs{ + CreateFunc: func(ctx context.Context, e event.CreateEvent, q workqueue.RateLimitingInterface) { + handle(ctx, e.Object, q) + }, + UpdateFunc: func(ctx context.Context, e event.UpdateEvent, q workqueue.RateLimitingInterface) { + handle(ctx, e.ObjectNew, q) + }, + // If the secret is deleted, we want to reconcile + // in order to emit an event/status about this problem. + // We will also emit a matching event/status about this problem + // when we reconcile the cluster and can't find the secret. + // That way, users will get two alerts: one when the secret is deleted + // and another when the cluster is being reconciled. + DeleteFunc: func(ctx context.Context, e event.DeleteEvent, q workqueue.RateLimitingInterface) { + handle(ctx, e.Object, q) + }, + } +} + +//+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="crunchybridgeclusters",verbs={list} + +// findCrunchyBridgeClustersForSecret returns CrunchyBridgeClusters +// that are connected to the Secret +func (r *CrunchyBridgeClusterReconciler) findCrunchyBridgeClustersForSecret( + ctx context.Context, secret client.ObjectKey, +) []*v1beta1.CrunchyBridgeCluster { + var matching []*v1beta1.CrunchyBridgeCluster + var clusters v1beta1.CrunchyBridgeClusterList + + // NOTE: If this becomes slow due to a large number of CrunchyBridgeClusters in a single + // namespace, we can configure the [ctrl.Manager] field indexer and pass a + // [fields.Selector] here. + // - https://book.kubebuilder.io/reference/watching-resources/externally-managed.html + if err := r.List(ctx, &clusters, &client.ListOptions{ + Namespace: secret.Namespace, + }); err == nil { + for i := range clusters.Items { + if clusters.Items[i].Spec.Secret == secret.Name { + matching = append(matching, &clusters.Items[i]) + } + } + } + return matching +} + +//+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="crunchybridgeclusters",verbs={list} + +// Watch enqueues all existing CrunchyBridgeClusters for reconciles. +func (r *CrunchyBridgeClusterReconciler) Watch() handler.EventHandler { + return handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, _ client.Object) []reconcile.Request { + log := ctrl.LoggerFrom(ctx) + + crunchyBridgeClusterList := &v1beta1.CrunchyBridgeClusterList{} + if err := r.List(ctx, crunchyBridgeClusterList); err != nil { + log.Error(err, "Error listing CrunchyBridgeClusters.") + } + + reconcileRequests := []reconcile.Request{} + for index := range crunchyBridgeClusterList.Items { + reconcileRequests = append(reconcileRequests, + reconcile.Request{ + NamespacedName: client.ObjectKeyFromObject( + &crunchyBridgeClusterList.Items[index], + ), + }, + ) + } + + return reconcileRequests + }) +} diff --git a/internal/bridge/crunchybridgecluster/watches_test.go b/internal/bridge/crunchybridgecluster/watches_test.go new file mode 100644 index 0000000000..48dba2ba14 --- /dev/null +++ b/internal/bridge/crunchybridgecluster/watches_test.go @@ -0,0 +1,84 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package crunchybridgecluster + +import ( + "context" + "testing" + + "gotest.tools/v3/assert" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/crunchydata/postgres-operator/internal/testing/require" +) + +func TestFindCrunchyBridgeClustersForSecret(t *testing.T) { + ctx := context.Background() + tClient := setupKubernetes(t) + require.ParallelCapacity(t, 0) + + ns := setupNamespace(t, tClient) + reconciler := &CrunchyBridgeClusterReconciler{Client: tClient} + + secret := &corev1.Secret{} + secret.Namespace = ns.Name + secret.Name = "crunchy-bridge-api-key" + + assert.NilError(t, tClient.Create(ctx, secret)) + secretObjectKey := client.ObjectKeyFromObject(secret) + + t.Run("NoClusters", func(t *testing.T) { + clusters := reconciler.findCrunchyBridgeClustersForSecret(ctx, secretObjectKey) + + assert.Equal(t, len(clusters), 0) + }) + + t.Run("OneCluster", func(t *testing.T) { + cluster1 := testCluster() + cluster1.Namespace = ns.Name + cluster1.Name = "first-cluster" + assert.NilError(t, tClient.Create(ctx, cluster1)) + + clusters := reconciler.findCrunchyBridgeClustersForSecret(ctx, secretObjectKey) + + assert.Equal(t, len(clusters), 1) + assert.Equal(t, clusters[0].Name, "first-cluster") + }) + + t.Run("TwoClusters", func(t *testing.T) { + cluster2 := testCluster() + cluster2.Namespace = ns.Name + cluster2.Name = "second-cluster" + assert.NilError(t, tClient.Create(ctx, cluster2)) + clusters := reconciler.findCrunchyBridgeClustersForSecret(ctx, secretObjectKey) + + assert.Equal(t, len(clusters), 2) + clusterCount := map[string]int{} + for _, cluster := range clusters { + clusterCount[cluster.Name] += 1 + } + assert.Equal(t, clusterCount["first-cluster"], 1) + assert.Equal(t, clusterCount["second-cluster"], 1) + }) + + t.Run("ClusterWithDifferentSecretNameNotIncluded", func(t *testing.T) { + cluster3 := testCluster() + cluster3.Namespace = ns.Name + cluster3.Name = "third-cluster" + cluster3.Spec.Secret = "different-secret-name" + assert.NilError(t, tClient.Create(ctx, cluster3)) + clusters := reconciler.findCrunchyBridgeClustersForSecret(ctx, secretObjectKey) + + assert.Equal(t, len(clusters), 2) + clusterCount := map[string]int{} + for _, cluster := range clusters { + clusterCount[cluster.Name] += 1 + } + assert.Equal(t, clusterCount["first-cluster"], 1) + assert.Equal(t, clusterCount["second-cluster"], 1) + assert.Equal(t, clusterCount["third-cluster"], 0) + }) +} diff --git a/internal/bridge/installation.go b/internal/bridge/installation.go new file mode 100644 index 0000000000..c76a073348 --- /dev/null +++ b/internal/bridge/installation.go @@ -0,0 +1,280 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package bridge + +import ( + "context" + "encoding/json" + "errors" + "sync" + "time" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + corev1apply "k8s.io/client-go/applyconfigurations/core/v1" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/yaml" + + "github.com/crunchydata/postgres-operator/internal/controller/runtime" + "github.com/crunchydata/postgres-operator/internal/logging" + "github.com/crunchydata/postgres-operator/internal/naming" +) + +// self is a singleton Installation. See [InstallationReconciler]. +var self = new(struct { + Installation + sync.RWMutex +}) + +type AuthObject struct { + ID string `json:"id"` + ExpiresAt time.Time `json:"expires_at"` + Secret string `json:"secret"` +} + +type Installation struct { + ID string `json:"id"` + AuthObject AuthObject `json:"auth_object"` +} + +type InstallationReconciler struct { + Owner client.FieldOwner + Reader interface { + Get(context.Context, client.ObjectKey, client.Object, ...client.GetOption) error + } + Writer interface { + Patch(context.Context, client.Object, client.Patch, ...client.PatchOption) error + } + + // Refresh is the frequency at which AuthObjects should be renewed. + Refresh time.Duration + + // SecretRef is the name of the corev1.Secret in which to store Bridge tokens. + SecretRef client.ObjectKey + + // NewClient is called each time a new Client is needed. + NewClient func() *Client +} + +// ManagedInstallationReconciler creates an [InstallationReconciler] and adds it to m. +func ManagedInstallationReconciler(m manager.Manager, newClient func() *Client) error { + kubernetes := m.GetClient() + reconciler := &InstallationReconciler{ + Owner: naming.ControllerBridge, + Reader: kubernetes, + Writer: kubernetes, + Refresh: 2 * time.Hour, + SecretRef: naming.AsObjectKey(naming.OperatorConfigurationSecret()), + NewClient: newClient, + } + + // NOTE: This name was selected to show something interesting in the logs. + // The default is "secret". + // TODO: Pick this name considering metrics and other controllers. + return builder.ControllerManagedBy(m).Named("installation"). + // + // Reconcile the one Secret that holds Bridge tokens. + For(&corev1.Secret{}, builder.WithPredicates( + predicate.NewPredicateFuncs(func(secret client.Object) bool { + return client.ObjectKeyFromObject(secret) == reconciler.SecretRef + }), + )). + // + // Wake periodically even when that Secret does not exist. + WatchesRawSource( + runtime.NewTickerImmediate(time.Hour, event.GenericEvent{}, + handler.EnqueueRequestsFromMapFunc( + func(context.Context, client.Object) []reconcile.Request { + return []reconcile.Request{{NamespacedName: reconciler.SecretRef}} + }, + ), + ), + ). + // + Complete(reconciler) +} + +func (r *InstallationReconciler) Reconcile( + ctx context.Context, request reconcile.Request) (reconcile.Result, error, +) { + result := reconcile.Result{} + secret := &corev1.Secret{} + err := client.IgnoreNotFound(r.Reader.Get(ctx, request.NamespacedName, secret)) + + if err == nil { + // It is easier later to treat a missing Secret the same as one that exists + // and is empty. Fill in the metadata with information from the request to + // make it so. + secret.Namespace, secret.Name = request.Namespace, request.Name + + result.RequeueAfter, err = r.reconcile(ctx, secret) + } + + // Nothing can be written to a deleted namespace. + if err != nil && apierrors.HasStatusCause(err, corev1.NamespaceTerminatingCause) { + return runtime.ErrorWithoutBackoff(err) + } + + // Write conflicts are returned as errors; log and retry with backoff. + if err != nil && apierrors.IsConflict(err) { + logging.FromContext(ctx).Info("Requeue", "reason", err) + return runtime.RequeueWithBackoff(), nil + } + + return result, err +} + +// reconcile looks for an Installation in read and stores it or another in +// the [self] singleton after a successful response from the Bridge API. +func (r *InstallationReconciler) reconcile( + ctx context.Context, read *corev1.Secret) (next time.Duration, err error, +) { + write, err := corev1apply.ExtractSecret(read, string(r.Owner)) + if err != nil { + return 0, err + } + + // We GET-extract-PATCH the Secret and do not build it up from scratch. + // Send the ResourceVersion from the GET in the body of every PATCH. + if len(read.ResourceVersion) != 0 { + write.WithResourceVersion(read.ResourceVersion) + } + + // Read the Installation from the Secret, if any. + var installation Installation + if yaml.Unmarshal(read.Data[KeyBridgeToken], &installation) != nil { + installation = Installation{} + } + + // When the Secret lacks an Installation, write the one we have in memory + // or register with the API for a new one. In both cases, we write to the + // Secret which triggers another reconcile. + if len(installation.ID) == 0 { + if len(self.ID) == 0 { + return 0, r.register(ctx, write) + } + + data := map[string][]byte{} + data[KeyBridgeToken], _ = json.Marshal(self.Installation) //nolint:errchkjson + + return 0, r.persist(ctx, write.WithData(data)) + } + + // Read the timestamp from the Secret, if any. + var touched time.Time + if yaml.Unmarshal(read.Data[KeyBridgeLocalTime], &touched) != nil { + touched = time.Time{} + } + + // Refresh the AuthObject when there is no Installation in memory, + // there is no timestamp, or the timestamp is far away. This writes to + // the Secret which triggers another reconcile. + if len(self.ID) == 0 || time.Since(touched) > r.Refresh || time.Until(touched) > r.Refresh { + return 0, r.refresh(ctx, installation, write) + } + + // Trigger another reconcile one interval after the stored timestamp. + return wait.Jitter(time.Until(touched.Add(r.Refresh)), 0.1), nil +} + +// persist uses Server-Side Apply to write config to Kubernetes. The Name and +// Namespace fields cannot be nil. +func (r *InstallationReconciler) persist( + ctx context.Context, config *corev1apply.SecretApplyConfiguration, +) error { + data, err := json.Marshal(config) + apply := client.RawPatch(client.Apply.Type(), data) + + // [client.Client] decides where to write by looking at the underlying type, + // namespace, and name of its [client.Object] argument. That is also where + // it stores the API response. + target := corev1.Secret{} + target.Namespace, target.Name = *config.Namespace, *config.Name + + if err == nil { + err = r.Writer.Patch(ctx, &target, apply, r.Owner, client.ForceOwnership) + } + + return err +} + +// refresh calls the Bridge API to refresh the AuthObject of installation. It +// combines the result with installation and stores that in the [self] singleton +// and the write object in Kubernetes. The Name and Namespace fields of the +// latter cannot be nil. +func (r *InstallationReconciler) refresh( + ctx context.Context, installation Installation, + write *corev1apply.SecretApplyConfiguration, +) error { + result, err := r.NewClient().CreateAuthObject(ctx, installation.AuthObject) + + // An authentication error means the installation is irrecoverably expired. + // Remove it from the singleton and move it to a dated entry in the Secret. + if err != nil && errors.Is(err, errAuthentication) { + self.Lock() + self.Installation = Installation{} + self.Unlock() + + keyExpiration := KeyBridgeToken + + installation.AuthObject.ExpiresAt.UTC().Format("--2006-01-02") + + data := make(map[string][]byte, 2) + data[KeyBridgeToken] = nil + data[keyExpiration], _ = json.Marshal(installation) //nolint:errchkjson + + return r.persist(ctx, write.WithData(data)) + } + + if err == nil { + installation.AuthObject = result + + // Store the new value in the singleton. + self.Lock() + self.Installation = installation + self.Unlock() + + // Store the new value in the Secret along with the current time. + data := make(map[string][]byte, 2) + data[KeyBridgeLocalTime], _ = metav1.Now().MarshalJSON() + data[KeyBridgeToken], _ = json.Marshal(installation) //nolint:errchkjson + + err = r.persist(ctx, write.WithData(data)) + } + + return err +} + +// register calls the Bridge API to register a new Installation. It stores the +// result in the [self] singleton and the write object in Kubernetes. The Name +// and Namespace fields of the latter cannot be nil. +func (r *InstallationReconciler) register( + ctx context.Context, write *corev1apply.SecretApplyConfiguration, +) error { + installation, err := r.NewClient().CreateInstallation(ctx) + + if err == nil { + // Store the new value in the singleton. + self.Lock() + self.Installation = installation + self.Unlock() + + // Store the new value in the Secret along with the current time. + data := make(map[string][]byte, 2) + data[KeyBridgeLocalTime], _ = metav1.Now().MarshalJSON() + data[KeyBridgeToken], _ = json.Marshal(installation) //nolint:errchkjson + + err = r.persist(ctx, write.WithData(data)) + } + + return err +} diff --git a/internal/bridge/installation_test.go b/internal/bridge/installation_test.go new file mode 100644 index 0000000000..96223a2233 --- /dev/null +++ b/internal/bridge/installation_test.go @@ -0,0 +1,491 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package bridge + +import ( + "context" + "encoding/json" + "errors" + "net/http" + "net/http/httptest" + "testing" + "time" + + "gotest.tools/v3/assert" + cmpopt "gotest.tools/v3/assert/opt" + corev1 "k8s.io/api/core/v1" + corev1apply "k8s.io/client-go/applyconfigurations/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/yaml" + + "github.com/crunchydata/postgres-operator/internal/controller/runtime" + "github.com/crunchydata/postgres-operator/internal/testing/cmp" +) + +func TestExtractSecretContract(t *testing.T) { + // We expect ExtractSecret to populate GVK, Namespace, and Name. + + t.Run("GVK", func(t *testing.T) { + empty := &corev1.Secret{} + + extracted, err := corev1apply.ExtractSecret(empty, "") + assert.NilError(t, err) + + if assert.Check(t, extracted.APIVersion != nil) { + assert.Equal(t, *extracted.APIVersion, "v1") + } + if assert.Check(t, extracted.Kind != nil) { + assert.Equal(t, *extracted.Kind, "Secret") + } + }) + + t.Run("Name", func(t *testing.T) { + named := &corev1.Secret{} + named.Namespace, named.Name = "ns1", "s2" + + extracted, err := corev1apply.ExtractSecret(named, "") + assert.NilError(t, err) + + if assert.Check(t, extracted.Namespace != nil) { + assert.Equal(t, *extracted.Namespace, "ns1") + } + if assert.Check(t, extracted.Name != nil) { + assert.Equal(t, *extracted.Name, "s2") + } + }) + + t.Run("ResourceVersion", func(t *testing.T) { + versioned := &corev1.Secret{} + versioned.ResourceVersion = "asdf" + + extracted, err := corev1apply.ExtractSecret(versioned, "") + assert.NilError(t, err) + + // ResourceVersion is not copied from the original. + assert.Assert(t, extracted.ResourceVersion == nil) + }) +} + +func TestInstallationReconcile(t *testing.T) { + // Scenario: + // When there is no Secret and no Installation in memory, + // Then Reconcile should register with the API. + // + t.Run("FreshStart", func(t *testing.T) { + var reconciler *InstallationReconciler + var secret *corev1.Secret + + beforeEach := func() { + reconciler = new(InstallationReconciler) + secret = new(corev1.Secret) + self.Installation = Installation{} + } + + t.Run("ItRegisters", func(t *testing.T) { + beforeEach() + + // API double; spy on requests. + var requests []http.Request + { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + requests = append(requests, *r) + _ = json.NewEncoder(w).Encode(map[string]any{ + "id": "abc", "auth_object": map[string]any{"secret": "xyz"}, + }) + })) + t.Cleanup(server.Close) + + reconciler.NewClient = func() *Client { + c := NewClient(server.URL, "") + c.Backoff.Steps = 1 + assert.Equal(t, c.BaseURL.String(), server.URL) + return c + } + } + + // Kubernetes double; spy on SSA patches. + var applies []string + { + reconciler.Writer = runtime.ClientPatch(func(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + assert.Equal(t, string(patch.Type()), "application/apply-patch+yaml") + + data, err := patch.Data(obj) + applies = append(applies, string(data)) + return err + }) + } + + ctx := context.Background() + next, err := reconciler.reconcile(ctx, secret) + assert.NilError(t, err) + assert.Assert(t, next == 0) + + // It calls the API. + assert.Equal(t, len(requests), 1) + assert.Equal(t, requests[0].Method, "POST") + assert.Equal(t, requests[0].URL.Path, "/vendor/operator/installations") + + // It stores the result in memory. + assert.Equal(t, self.ID, "abc") + assert.Equal(t, self.AuthObject.Secret, "xyz") + + // It stores the result in Kubernetes. + assert.Equal(t, len(applies), 1) + assert.Assert(t, cmp.Contains(applies[0], `"kind":"Secret"`)) + + var decoded corev1.Secret + assert.NilError(t, yaml.Unmarshal([]byte(applies[0]), &decoded)) + assert.Assert(t, cmp.Contains(string(decoded.Data["bridge-token"]), `"id":"abc"`)) + assert.Assert(t, cmp.Contains(string(decoded.Data["bridge-token"]), `"secret":"xyz"`)) + }) + + t.Run("KubernetesError", func(t *testing.T) { + beforeEach() + + // API double; successful. + { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + _ = json.NewEncoder(w).Encode(map[string]any{ + "id": "123", "auth_object": map[string]any{"secret": "456"}, + }) + })) + t.Cleanup(server.Close) + + reconciler.NewClient = func() *Client { + c := NewClient(server.URL, "") + c.Backoff.Steps = 1 + assert.Equal(t, c.BaseURL.String(), server.URL) + return c + } + } + + // Kubernetes double; failure. + expected := errors.New("boom") + { + reconciler.Writer = runtime.ClientPatch(func(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + return expected + }) + } + + ctx := context.Background() + _, err := reconciler.reconcile(ctx, secret) + assert.Equal(t, err, expected, "expected a Kubernetes error") + + // It stores the API result in memory. + assert.Equal(t, self.ID, "123") + assert.Equal(t, self.AuthObject.Secret, "456") + }) + }) + + // Scenario: + // When there is no Secret but an Installation exists in memory, + // Then Reconcile should store it in Kubernetes. + // + t.Run("LostSecret", func(t *testing.T) { + var reconciler *InstallationReconciler + var secret *corev1.Secret + + beforeEach := func(token []byte) { + reconciler = new(InstallationReconciler) + secret = new(corev1.Secret) + secret.Data = map[string][]byte{ + KeyBridgeToken: token, + } + self.Installation = Installation{ID: "asdf"} + } + + for _, tt := range []struct { + Name string + Token []byte + }{ + {Name: "NoToken", Token: nil}, + {Name: "BadToken", Token: []byte(`asdf`)}, + } { + t.Run(tt.Name, func(t *testing.T) { + beforeEach(tt.Token) + + // Kubernetes double; spy on SSA patches. + var applies []string + { + reconciler.Writer = runtime.ClientPatch(func(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + assert.Equal(t, string(patch.Type()), "application/apply-patch+yaml") + + data, err := patch.Data(obj) + applies = append(applies, string(data)) + return err + }) + } + + ctx := context.Background() + next, err := reconciler.reconcile(ctx, secret) + assert.NilError(t, err) + assert.Assert(t, next == 0) + + assert.Equal(t, self.ID, "asdf", "expected no change to memory") + + // It stores the memory in Kubernetes. + assert.Equal(t, len(applies), 1) + assert.Assert(t, cmp.Contains(applies[0], `"kind":"Secret"`)) + + var decoded corev1.Secret + assert.NilError(t, yaml.Unmarshal([]byte(applies[0]), &decoded)) + assert.Assert(t, cmp.Contains(string(decoded.Data["bridge-token"]), `"id":"asdf"`)) + }) + } + + t.Run("KubernetesError", func(t *testing.T) { + beforeEach(nil) + + // Kubernetes double; failure. + expected := errors.New("boom") + { + reconciler.Writer = runtime.ClientPatch(func(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + return expected + }) + } + + ctx := context.Background() + _, err := reconciler.reconcile(ctx, secret) + assert.Equal(t, err, expected, "expected a Kubernetes error") + assert.Equal(t, self.ID, "asdf", "expected no change to memory") + }) + }) + + // Scenario: + // When there is a Secret but no Installation in memory, + // Then Reconcile should verify it in the API and store it in memory. + // + t.Run("Restart", func(t *testing.T) { + var reconciler *InstallationReconciler + var secret *corev1.Secret + + beforeEach := func() { + reconciler = new(InstallationReconciler) + secret = new(corev1.Secret) + secret.Data = map[string][]byte{ + KeyBridgeToken: []byte(`{ + "id":"xyz", "auth_object":{ + "secret":"abc", + "expires_at":"2020-10-28T05:06:07Z" + } + }`), + } + self.Installation = Installation{} + } + + t.Run("ItVerifies", func(t *testing.T) { + beforeEach() + + // API double; spy on requests. + var requests []http.Request + { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + requests = append(requests, *r) + _ = json.NewEncoder(w).Encode(map[string]any{"secret": "def"}) + })) + t.Cleanup(server.Close) + + reconciler.NewClient = func() *Client { + c := NewClient(server.URL, "") + c.Backoff.Steps = 1 + assert.Equal(t, c.BaseURL.String(), server.URL) + return c + } + } + + // Kubernetes double; spy on SSA patches. + var applies []string + { + reconciler.Writer = runtime.ClientPatch(func(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + assert.Equal(t, string(patch.Type()), "application/apply-patch+yaml") + + data, err := patch.Data(obj) + applies = append(applies, string(data)) + return err + }) + } + + ctx := context.Background() + next, err := reconciler.reconcile(ctx, secret) + assert.NilError(t, err) + assert.Assert(t, next == 0) + + assert.Equal(t, len(requests), 1) + assert.Equal(t, requests[0].Header.Get("Authorization"), "Bearer abc") + assert.Equal(t, requests[0].Method, "POST") + assert.Equal(t, requests[0].URL.Path, "/vendor/operator/auth-objects") + + // It stores the result in memory. + assert.Equal(t, self.ID, "xyz") + assert.Equal(t, self.AuthObject.Secret, "def") + + // It stores the memory in Kubernetes. + assert.Equal(t, len(applies), 1) + assert.Assert(t, cmp.Contains(applies[0], `"kind":"Secret"`)) + + var decoded corev1.Secret + assert.NilError(t, yaml.Unmarshal([]byte(applies[0]), &decoded)) + assert.Assert(t, cmp.Contains(string(decoded.Data["bridge-token"]), `"id":"xyz"`)) + assert.Assert(t, cmp.Contains(string(decoded.Data["bridge-token"]), `"secret":"def"`)) + }) + + t.Run("Expired", func(t *testing.T) { + beforeEach() + + // API double; authentication error. + { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusUnauthorized) + })) + t.Cleanup(server.Close) + + reconciler.NewClient = func() *Client { + c := NewClient(server.URL, "") + c.Backoff.Steps = 1 + assert.Equal(t, c.BaseURL.String(), server.URL) + return c + } + } + + // Kubernetes double; spy on SSA patches. + var applies []string + { + reconciler.Writer = runtime.ClientPatch(func(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + assert.Equal(t, string(patch.Type()), "application/apply-patch+yaml") + + data, err := patch.Data(obj) + applies = append(applies, string(data)) + return err + }) + } + + ctx := context.Background() + next, err := reconciler.reconcile(ctx, secret) + assert.NilError(t, err) + assert.Assert(t, next == 0) + + assert.DeepEqual(t, self.Installation, Installation{}) + + // It archives the expired one. + assert.Equal(t, len(applies), 1) + assert.Assert(t, cmp.Contains(applies[0], `"kind":"Secret"`)) + + var decoded corev1.Secret + assert.NilError(t, yaml.Unmarshal([]byte(applies[0]), &decoded)) + assert.Equal(t, len(decoded.Data["bridge-token"]), 0) + + archived := string(decoded.Data["bridge-token--2020-10-28"]) + assert.Assert(t, cmp.Contains(archived, `"id":"xyz"`)) + assert.Assert(t, cmp.Contains(archived, `"secret":"abc"`)) + }) + }) + + // Scenario: + // When there is an Installation in the Secret and in memory, + // Then Reconcile should refresh it periodically. + // + t.Run("Refresh", func(t *testing.T) { + var reconciler *InstallationReconciler + var secret *corev1.Secret + + beforeEach := func(timestamp []byte) { + reconciler = new(InstallationReconciler) + reconciler.Refresh = time.Minute + + secret = new(corev1.Secret) + secret.Data = map[string][]byte{ + KeyBridgeToken: []byte(`{"id":"ddd", "auth_object":{"secret":"eee"}}`), + KeyBridgeLocalTime: timestamp, + } + + self.Installation = Installation{ID: "ddd"} + } + + for _, tt := range []struct { + Name string + Timestamp []byte + }{ + {Name: "NoTimestamp", Timestamp: nil}, + {Name: "BadTimestamp", Timestamp: []byte(`asdf`)}, + {Name: "OldTimestamp", Timestamp: []byte(`"2020-10-10T20:20:20Z"`)}, + {Name: "FutureTimestamp", Timestamp: []byte(`"2030-10-10T20:20:20Z"`)}, + } { + t.Run(tt.Name, func(t *testing.T) { + beforeEach(tt.Timestamp) + + // API double; spy on requests. + var requests []http.Request + { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + requests = append(requests, *r) + _ = json.NewEncoder(w).Encode(map[string]any{"secret": "fresh"}) + })) + t.Cleanup(server.Close) + + reconciler.NewClient = func() *Client { + c := NewClient(server.URL, "") + c.Backoff.Steps = 1 + assert.Equal(t, c.BaseURL.String(), server.URL) + return c + } + } + + // Kubernetes double; spy on SSA patches. + var applies []string + { + reconciler.Writer = runtime.ClientPatch(func(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + assert.Equal(t, string(patch.Type()), "application/apply-patch+yaml") + + data, err := patch.Data(obj) + applies = append(applies, string(data)) + return err + }) + } + + ctx := context.Background() + next, err := reconciler.reconcile(ctx, secret) + assert.NilError(t, err) + assert.Assert(t, next == 0) + + assert.Equal(t, len(requests), 1) + assert.Equal(t, requests[0].Header.Get("Authorization"), "Bearer eee") + assert.Equal(t, requests[0].Method, "POST") + assert.Equal(t, requests[0].URL.Path, "/vendor/operator/auth-objects") + + // It stores the result in memory. + assert.Equal(t, self.ID, "ddd") + assert.Equal(t, self.AuthObject.Secret, "fresh") + + // It stores the memory in Kubernetes. + assert.Equal(t, len(applies), 1) + assert.Assert(t, cmp.Contains(applies[0], `"kind":"Secret"`)) + + var decoded corev1.Secret + assert.NilError(t, yaml.Unmarshal([]byte(applies[0]), &decoded)) + assert.Assert(t, cmp.Contains(string(decoded.Data["bridge-token"]), `"id":"ddd"`)) + assert.Assert(t, cmp.Contains(string(decoded.Data["bridge-token"]), `"secret":"fresh"`)) + }) + } + + t.Run("CurrentTimestamp", func(t *testing.T) { + current := time.Now().Add(-15 * time.Minute) + currentJSON, _ := current.UTC().MarshalJSON() + + beforeEach(currentJSON) + reconciler.Refresh = time.Hour + + // Any API calls would panic because no spies are configured here. + + ctx := context.Background() + next, err := reconciler.reconcile(ctx, secret) + assert.NilError(t, err) + + // The next reconcile is scheduled around (60 - 15 =) 45 minutes + // from now, plus or minus (60 * 10% =) 6 minutes of jitter. + assert.DeepEqual(t, next, 45*time.Minute, + cmpopt.DurationWithThreshold(6*time.Minute)) + }) + }) +} diff --git a/internal/bridge/naming.go b/internal/bridge/naming.go new file mode 100644 index 0000000000..cabe8e9cf6 --- /dev/null +++ b/internal/bridge/naming.go @@ -0,0 +1,10 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package bridge + +const ( + KeyBridgeLocalTime = "bridge-local-time" + KeyBridgeToken = "bridge-token" +) diff --git a/internal/bridge/quantity.go b/internal/bridge/quantity.go new file mode 100644 index 0000000000..a948c6b4cf --- /dev/null +++ b/internal/bridge/quantity.go @@ -0,0 +1,44 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package bridge + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/resource" +) + +func FromCPU(n int64) *resource.Quantity { + // Assume the Bridge API returns numbers that can be parsed by the + // [resource] package. + if q, err := resource.ParseQuantity(fmt.Sprint(n)); err == nil { + return &q + } + + return resource.NewQuantity(0, resource.DecimalSI) +} + +// FromGibibytes returns n gibibytes as a [resource.Quantity]. +func FromGibibytes(n int64) *resource.Quantity { + // Assume the Bridge API returns numbers that can be parsed by the + // [resource] package. + if q, err := resource.ParseQuantity(fmt.Sprint(n) + "Gi"); err == nil { + return &q + } + + return resource.NewQuantity(0, resource.BinarySI) +} + +// ToGibibytes returns q rounded up to a non-negative gibibyte. +func ToGibibytes(q resource.Quantity) int64 { + v := q.Value() + + if v <= 0 { + return 0 + } + + // https://stackoverflow.com/a/2745086 + return 1 + ((v - 1) >> 30) +} diff --git a/internal/bridge/quantity_test.go b/internal/bridge/quantity_test.go new file mode 100644 index 0000000000..7cfebb4a86 --- /dev/null +++ b/internal/bridge/quantity_test.go @@ -0,0 +1,59 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package bridge + +import ( + "testing" + + "gotest.tools/v3/assert" + "k8s.io/apimachinery/pkg/api/resource" +) + +func TestFromCPU(t *testing.T) { + zero := FromCPU(0) + assert.Assert(t, zero.IsZero()) + assert.Equal(t, zero.String(), "0") + + one := FromCPU(1) + assert.Equal(t, one.String(), "1") + + negative := FromCPU(-2) + assert.Equal(t, negative.String(), "-2") +} + +func TestFromGibibytes(t *testing.T) { + zero := FromGibibytes(0) + assert.Assert(t, zero.IsZero()) + assert.Equal(t, zero.String(), "0") + + one := FromGibibytes(1) + assert.Equal(t, one.String(), "1Gi") + + negative := FromGibibytes(-2) + assert.Equal(t, negative.String(), "-2Gi") +} + +func TestToGibibytes(t *testing.T) { + zero := resource.MustParse("0") + assert.Equal(t, ToGibibytes(zero), int64(0)) + + // Negative quantities become zero. + negative := resource.MustParse("-4G") + assert.Equal(t, ToGibibytes(negative), int64(0)) + + // Decimal quantities round up. + decimal := resource.MustParse("9000M") + assert.Equal(t, ToGibibytes(decimal), int64(9)) + + // Binary quantities round up. + binary := resource.MustParse("8000Mi") + assert.Equal(t, ToGibibytes(binary), int64(8)) + + fourGi := resource.MustParse("4096Mi") + assert.Equal(t, ToGibibytes(fourGi), int64(4)) + + moreThanFourGi := resource.MustParse("4097Mi") + assert.Equal(t, ToGibibytes(moreThanFourGi), int64(5)) +} diff --git a/internal/config/config.go b/internal/config/config.go index 9eaea9938b..e3f9ced215 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package config @@ -30,10 +19,30 @@ func defaultFromEnv(value, key string) string { return value } +// FetchKeyCommand returns the fetch_key_cmd value stored in the encryption_key_command +// variable used to enable TDE. +func FetchKeyCommand(spec *v1beta1.PostgresClusterSpec) string { + if spec.Patroni != nil { + if spec.Patroni.DynamicConfiguration != nil { + configuration := spec.Patroni.DynamicConfiguration + if configuration != nil { + if postgresql, ok := configuration["postgresql"].(map[string]any); ok { + if parameters, ok := postgresql["parameters"].(map[string]any); ok { + if parameters["encryption_key_command"] != nil { + return fmt.Sprintf("%s", parameters["encryption_key_command"]) + } + } + } + } + } + } + return "" +} + // Red Hat Marketplace requires operators to use environment variables be used // for any image other than the operator itself. Those variables must start with // "RELATED_IMAGE_" so that OSBS can transform their tag values into digests -// for a "disconncted" OLM CSV. +// for a "disconnected" OLM CSV. // - https://redhat-connect.gitbook.io/certified-operator-guide/troubleshooting-and-resources/offline-enabled-operators // - https://osbs.readthedocs.io/en/latest/users.html#pullspec-locations @@ -45,6 +54,27 @@ func PGBackRestContainerImage(cluster *v1beta1.PostgresCluster) string { return defaultFromEnv(image, "RELATED_IMAGE_PGBACKREST") } +// PGAdminContainerImage returns the container image to use for pgAdmin. +func PGAdminContainerImage(cluster *v1beta1.PostgresCluster) string { + var image string + if cluster.Spec.UserInterface != nil && + cluster.Spec.UserInterface.PGAdmin != nil { + image = cluster.Spec.UserInterface.PGAdmin.Image + } + + return defaultFromEnv(image, "RELATED_IMAGE_PGADMIN") +} + +// StandalonePGAdminContainerImage returns the container image to use for pgAdmin. +func StandalonePGAdminContainerImage(pgadmin *v1beta1.PGAdmin) string { + var image string + if pgadmin.Spec.Image != nil { + image = *pgadmin.Spec.Image + } + + return defaultFromEnv(image, "RELATED_IMAGE_STANDALONE_PGADMIN") +} + // PGBouncerContainerImage returns the container image to use for pgBouncer. func PGBouncerContainerImage(cluster *v1beta1.PostgresCluster) string { var image string @@ -80,3 +110,50 @@ func PostgresContainerImage(cluster *v1beta1.PostgresCluster) string { return defaultFromEnv(image, key) } + +// PGONamespace returns the namespace where the PGO is running, +// based on the env var from the DownwardAPI +// If no env var is found, returns "" +func PGONamespace() string { + return os.Getenv("PGO_NAMESPACE") +} + +// VerifyImageValues checks that all container images required by the +// spec are defined. If any are undefined, a list is returned in an error. +func VerifyImageValues(cluster *v1beta1.PostgresCluster) error { + + var images []string + + if PGBackRestContainerImage(cluster) == "" { + images = append(images, "crunchy-pgbackrest") + } + if PGAdminContainerImage(cluster) == "" && + cluster.Spec.UserInterface != nil && + cluster.Spec.UserInterface.PGAdmin != nil { + images = append(images, "crunchy-pgadmin4") + } + if PGBouncerContainerImage(cluster) == "" && + cluster.Spec.Proxy != nil && + cluster.Spec.Proxy.PGBouncer != nil { + images = append(images, "crunchy-pgbouncer") + } + if PGExporterContainerImage(cluster) == "" && + cluster.Spec.Monitoring != nil && + cluster.Spec.Monitoring.PGMonitor != nil && + cluster.Spec.Monitoring.PGMonitor.Exporter != nil { + images = append(images, "crunchy-postgres-exporter") + } + if PostgresContainerImage(cluster) == "" { + if cluster.Spec.PostGISVersion != "" { + images = append(images, "crunchy-postgres-gis") + } else { + images = append(images, "crunchy-postgres") + } + } + + if len(images) > 0 { + return fmt.Errorf("Missing image(s): %s", images) + } + + return nil +} diff --git a/internal/config/config_test.go b/internal/config/config_test.go index 40b14a5e44..7b8ca2f863 100644 --- a/internal/config/config_test.go +++ b/internal/config/config_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package config @@ -25,40 +14,101 @@ import ( "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) -func saveEnv(t testing.TB, key string) { - t.Helper() - previous, ok := os.LookupEnv(key) - t.Cleanup(func() { - if ok { - os.Setenv(key, previous) - } else { - os.Unsetenv(key) - } - }) -} +func TestFetchKeyCommand(t *testing.T) { + + spec1 := v1beta1.PostgresClusterSpec{} + assert.Assert(t, FetchKeyCommand(&spec1) == "") + + spec2 := v1beta1.PostgresClusterSpec{ + Patroni: &v1beta1.PatroniSpec{}, + } + assert.Assert(t, FetchKeyCommand(&spec2) == "") + + spec3 := v1beta1.PostgresClusterSpec{ + Patroni: &v1beta1.PatroniSpec{ + DynamicConfiguration: map[string]any{}, + }, + } + assert.Assert(t, FetchKeyCommand(&spec3) == "") + + spec4 := v1beta1.PostgresClusterSpec{ + Patroni: &v1beta1.PatroniSpec{ + DynamicConfiguration: map[string]any{ + "postgresql": map[string]any{}, + }, + }, + } + assert.Assert(t, FetchKeyCommand(&spec4) == "") + + spec5 := v1beta1.PostgresClusterSpec{ + Patroni: &v1beta1.PatroniSpec{ + DynamicConfiguration: map[string]any{ + "postgresql": map[string]any{ + "parameters": map[string]any{}, + }, + }, + }, + } + assert.Assert(t, FetchKeyCommand(&spec5) == "") + + spec6 := v1beta1.PostgresClusterSpec{ + Patroni: &v1beta1.PatroniSpec{ + DynamicConfiguration: map[string]any{ + "postgresql": map[string]any{ + "parameters": map[string]any{ + "encryption_key_command": "", + }, + }, + }, + }, + } + assert.Assert(t, FetchKeyCommand(&spec6) == "") + + spec7 := v1beta1.PostgresClusterSpec{ + Patroni: &v1beta1.PatroniSpec{ + DynamicConfiguration: map[string]any{ + "postgresql": map[string]any{ + "parameters": map[string]any{ + "encryption_key_command": "echo mykey", + }, + }, + }, + }, + } + assert.Assert(t, FetchKeyCommand(&spec7) == "echo mykey") -func setEnv(t testing.TB, key, value string) { - t.Helper() - saveEnv(t, key) - assert.NilError(t, os.Setenv(key, value)) } -func unsetEnv(t testing.TB, key string) { - t.Helper() - saveEnv(t, key) - assert.NilError(t, os.Unsetenv(key)) +func TestPGAdminContainerImage(t *testing.T) { + cluster := &v1beta1.PostgresCluster{} + + t.Setenv("RELATED_IMAGE_PGADMIN", "") + os.Unsetenv("RELATED_IMAGE_PGADMIN") + assert.Equal(t, PGAdminContainerImage(cluster), "") + + t.Setenv("RELATED_IMAGE_PGADMIN", "") + assert.Equal(t, PGAdminContainerImage(cluster), "") + + t.Setenv("RELATED_IMAGE_PGADMIN", "env-var-pgadmin") + assert.Equal(t, PGAdminContainerImage(cluster), "env-var-pgadmin") + + assert.NilError(t, yaml.Unmarshal([]byte(`{ + userInterface: { pgAdmin: { image: spec-image } }, + }`), &cluster.Spec)) + assert.Equal(t, PGAdminContainerImage(cluster), "spec-image") } func TestPGBackRestContainerImage(t *testing.T) { cluster := &v1beta1.PostgresCluster{} - unsetEnv(t, "RELATED_IMAGE_PGBACKREST") + t.Setenv("RELATED_IMAGE_PGBACKREST", "") + os.Unsetenv("RELATED_IMAGE_PGBACKREST") assert.Equal(t, PGBackRestContainerImage(cluster), "") - setEnv(t, "RELATED_IMAGE_PGBACKREST", "") + t.Setenv("RELATED_IMAGE_PGBACKREST", "") assert.Equal(t, PGBackRestContainerImage(cluster), "") - setEnv(t, "RELATED_IMAGE_PGBACKREST", "env-var-pgbackrest") + t.Setenv("RELATED_IMAGE_PGBACKREST", "env-var-pgbackrest") assert.Equal(t, PGBackRestContainerImage(cluster), "env-var-pgbackrest") assert.NilError(t, yaml.Unmarshal([]byte(`{ @@ -70,13 +120,14 @@ func TestPGBackRestContainerImage(t *testing.T) { func TestPGBouncerContainerImage(t *testing.T) { cluster := &v1beta1.PostgresCluster{} - unsetEnv(t, "RELATED_IMAGE_PGBOUNCER") + t.Setenv("RELATED_IMAGE_PGBOUNCER", "") + os.Unsetenv("RELATED_IMAGE_PGBOUNCER") assert.Equal(t, PGBouncerContainerImage(cluster), "") - setEnv(t, "RELATED_IMAGE_PGBOUNCER", "") + t.Setenv("RELATED_IMAGE_PGBOUNCER", "") assert.Equal(t, PGBouncerContainerImage(cluster), "") - setEnv(t, "RELATED_IMAGE_PGBOUNCER", "env-var-pgbouncer") + t.Setenv("RELATED_IMAGE_PGBOUNCER", "env-var-pgbouncer") assert.Equal(t, PGBouncerContainerImage(cluster), "env-var-pgbouncer") assert.NilError(t, yaml.Unmarshal([]byte(`{ @@ -88,13 +139,14 @@ func TestPGBouncerContainerImage(t *testing.T) { func TestPGExporterContainerImage(t *testing.T) { cluster := &v1beta1.PostgresCluster{} - unsetEnv(t, "RELATED_IMAGE_PGEXPORTER") + t.Setenv("RELATED_IMAGE_PGEXPORTER", "") + os.Unsetenv("RELATED_IMAGE_PGEXPORTER") assert.Equal(t, PGExporterContainerImage(cluster), "") - setEnv(t, "RELATED_IMAGE_PGEXPORTER", "") + t.Setenv("RELATED_IMAGE_PGEXPORTER", "") assert.Equal(t, PGExporterContainerImage(cluster), "") - setEnv(t, "RELATED_IMAGE_PGEXPORTER", "env-var-pgexporter") + t.Setenv("RELATED_IMAGE_PGEXPORTER", "env-var-pgexporter") assert.Equal(t, PGExporterContainerImage(cluster), "env-var-pgexporter") assert.NilError(t, yaml.Unmarshal([]byte(`{ @@ -103,17 +155,37 @@ func TestPGExporterContainerImage(t *testing.T) { assert.Equal(t, PGExporterContainerImage(cluster), "spec-image") } +func TestStandalonePGAdminContainerImage(t *testing.T) { + pgadmin := &v1beta1.PGAdmin{} + + t.Setenv("RELATED_IMAGE_STANDALONE_PGADMIN", "") + os.Unsetenv("RELATED_IMAGE_STANDALONE_PGADMIN") + assert.Equal(t, StandalonePGAdminContainerImage(pgadmin), "") + + t.Setenv("RELATED_IMAGE_STANDALONE_PGADMIN", "") + assert.Equal(t, StandalonePGAdminContainerImage(pgadmin), "") + + t.Setenv("RELATED_IMAGE_STANDALONE_PGADMIN", "env-var-pgadmin") + assert.Equal(t, StandalonePGAdminContainerImage(pgadmin), "env-var-pgadmin") + + assert.NilError(t, yaml.Unmarshal([]byte(`{ + image: spec-image + }`), &pgadmin.Spec)) + assert.Equal(t, StandalonePGAdminContainerImage(pgadmin), "spec-image") +} + func TestPostgresContainerImage(t *testing.T) { cluster := &v1beta1.PostgresCluster{} cluster.Spec.PostgresVersion = 12 - unsetEnv(t, "RELATED_IMAGE_POSTGRES_12") + t.Setenv("RELATED_IMAGE_POSTGRES_12", "") + os.Unsetenv("RELATED_IMAGE_POSTGRES_12") assert.Equal(t, PostgresContainerImage(cluster), "") - setEnv(t, "RELATED_IMAGE_POSTGRES_12", "") + t.Setenv("RELATED_IMAGE_POSTGRES_12", "") assert.Equal(t, PostgresContainerImage(cluster), "") - setEnv(t, "RELATED_IMAGE_POSTGRES_12", "env-var-postgres") + t.Setenv("RELATED_IMAGE_POSTGRES_12", "env-var-postgres") assert.Equal(t, PostgresContainerImage(cluster), "env-var-postgres") cluster.Spec.Image = "spec-image" @@ -121,9 +193,64 @@ func TestPostgresContainerImage(t *testing.T) { cluster.Spec.Image = "" cluster.Spec.PostGISVersion = "3.0" - setEnv(t, "RELATED_IMAGE_POSTGRES_12_GIS_3.0", "env-var-postgis") + t.Setenv("RELATED_IMAGE_POSTGRES_12_GIS_3.0", "env-var-postgis") assert.Equal(t, PostgresContainerImage(cluster), "env-var-postgis") cluster.Spec.Image = "spec-image" assert.Equal(t, PostgresContainerImage(cluster), "spec-image") } + +func TestVerifyImageValues(t *testing.T) { + cluster := &v1beta1.PostgresCluster{} + + verifyImageCheck := func(t *testing.T, envVar, errString string, cluster *v1beta1.PostgresCluster) { + + t.Setenv(envVar, "") + os.Unsetenv(envVar) + err := VerifyImageValues(cluster) + assert.ErrorContains(t, err, errString) + } + + t.Run("crunchy-postgres", func(t *testing.T) { + cluster.Spec.PostgresVersion = 14 + verifyImageCheck(t, "RELATED_IMAGE_POSTGRES_14", "crunchy-postgres", cluster) + }) + + t.Run("crunchy-postgres-gis", func(t *testing.T) { + cluster.Spec.PostGISVersion = "3.3" + verifyImageCheck(t, "RELATED_IMAGE_POSTGRES_14_GIS_3.3", "crunchy-postgres-gis", cluster) + }) + + t.Run("crunchy-pgbackrest", func(t *testing.T) { + verifyImageCheck(t, "RELATED_IMAGE_PGBACKREST", "crunchy-pgbackrest", cluster) + }) + + t.Run("crunchy-pgbouncer", func(t *testing.T) { + cluster.Spec.Proxy = new(v1beta1.PostgresProxySpec) + cluster.Spec.Proxy.PGBouncer = new(v1beta1.PGBouncerPodSpec) + verifyImageCheck(t, "RELATED_IMAGE_PGBOUNCER", "crunchy-pgbouncer", cluster) + }) + + t.Run("crunchy-pgadmin4", func(t *testing.T) { + cluster.Spec.UserInterface = new(v1beta1.UserInterfaceSpec) + cluster.Spec.UserInterface.PGAdmin = new(v1beta1.PGAdminPodSpec) + verifyImageCheck(t, "RELATED_IMAGE_PGADMIN", "crunchy-pgadmin4", cluster) + }) + + t.Run("crunchy-postgres-exporter", func(t *testing.T) { + cluster.Spec.Monitoring = new(v1beta1.MonitoringSpec) + cluster.Spec.Monitoring.PGMonitor = new(v1beta1.PGMonitorSpec) + cluster.Spec.Monitoring.PGMonitor.Exporter = new(v1beta1.ExporterSpec) + verifyImageCheck(t, "RELATED_IMAGE_PGEXPORTER", "crunchy-postgres-exporter", cluster) + }) + + t.Run("multiple images", func(t *testing.T) { + err := VerifyImageValues(cluster) + assert.ErrorContains(t, err, "crunchy-postgres-gis") + assert.ErrorContains(t, err, "crunchy-pgbackrest") + assert.ErrorContains(t, err, "crunchy-pgbouncer") + assert.ErrorContains(t, err, "crunchy-pgadmin4") + assert.ErrorContains(t, err, "crunchy-postgres-exporter") + }) + +} diff --git a/internal/controller/pgupgrade/apply.go b/internal/controller/pgupgrade/apply.go new file mode 100644 index 0000000000..71cf65cd4f --- /dev/null +++ b/internal/controller/pgupgrade/apply.go @@ -0,0 +1,43 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package pgupgrade + +import ( + "context" + "reflect" + + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// patch sends patch to object's endpoint in the Kubernetes API and updates +// object with any returned content. The fieldManager is set to r.Owner, but +// can be overridden in options. +// - https://docs.k8s.io/reference/using-api/server-side-apply/#managers +func (r *PGUpgradeReconciler) patch( + ctx context.Context, object client.Object, + patch client.Patch, options ...client.PatchOption, +) error { + options = append([]client.PatchOption{r.Owner}, options...) + return r.Client.Patch(ctx, object, patch, options...) +} + +// apply sends an apply patch to object's endpoint in the Kubernetes API and +// updates object with any returned content. The fieldManager is set to +// r.Owner and the force parameter is true. +// - https://docs.k8s.io/reference/using-api/server-side-apply/#managers +// - https://docs.k8s.io/reference/using-api/server-side-apply/#conflicts +func (r *PGUpgradeReconciler) apply(ctx context.Context, object client.Object) error { + // Generate an apply-patch by comparing the object to its zero value. + zero := reflect.New(reflect.TypeOf(object).Elem()).Interface() + data, err := client.MergeFrom(zero.(client.Object)).Data(object) + apply := client.RawPatch(client.Apply.Type(), data) + + // Send the apply-patch with force=true. + if err == nil { + err = r.patch(ctx, object, apply, client.ForceOwnership) + } + + return err +} diff --git a/internal/controller/pgupgrade/jobs.go b/internal/controller/pgupgrade/jobs.go new file mode 100644 index 0000000000..a1722dfc12 --- /dev/null +++ b/internal/controller/pgupgrade/jobs.go @@ -0,0 +1,344 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package pgupgrade + +import ( + "context" + "fmt" + "strings" + + appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + + "github.com/crunchydata/postgres-operator/internal/initialize" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +// Upgrade job + +// pgUpgradeJob returns the ObjectMeta for the pg_upgrade Job utilized to +// upgrade from one major PostgreSQL version to another +func pgUpgradeJob(upgrade *v1beta1.PGUpgrade) metav1.ObjectMeta { + return metav1.ObjectMeta{ + Namespace: upgrade.Namespace, + Name: upgrade.Name + "-pgdata", + } +} + +// upgradeCommand returns an entrypoint that prepares the filesystem for +// and performs a PostgreSQL major version upgrade using pg_upgrade. +func upgradeCommand(upgrade *v1beta1.PGUpgrade, fetchKeyCommand string) []string { + oldVersion := fmt.Sprint(upgrade.Spec.FromPostgresVersion) + newVersion := fmt.Sprint(upgrade.Spec.ToPostgresVersion) + + // if the fetch key command is set for TDE, provide the value during initialization + initdb := `/usr/pgsql-"${new_version}"/bin/initdb -k -D /pgdata/pg"${new_version}"` + if fetchKeyCommand != "" { + initdb += ` --encryption-key-command "` + fetchKeyCommand + `"` + } + + args := []string{oldVersion, newVersion} + script := strings.Join([]string{ + `declare -r data_volume='/pgdata' old_version="$1" new_version="$2"`, + `printf 'Performing PostgreSQL upgrade from version "%s" to "%s" ...\n\n' "$@"`, + + // Note: Rather than import the nss_wrapper init container, as we do in + // the main postgres-operator, this job does the required nss_wrapper + // settings here. + + // Create a copy of the system group definitions, but remove the "postgres" + // group or any group with the current GID. Replace them with our own that + // has the current GID. + `gid=$(id -G); NSS_WRAPPER_GROUP=$(mktemp)`, + `(sed "/^postgres:x:/ d; /^[^:]*:x:${gid%% *}:/ d" /etc/group`, + `echo "postgres:x:${gid%% *}:") > "${NSS_WRAPPER_GROUP}"`, + + // Create a copy of the system user definitions, but remove the "postgres" + // user or any user with the current UID. Replace them with our own that + // has the current UID and GID. + `uid=$(id -u); NSS_WRAPPER_PASSWD=$(mktemp)`, + `(sed "/^postgres:x:/ d; /^[^:]*:x:${uid}:/ d" /etc/passwd`, + `echo "postgres:x:${uid}:${gid%% *}::${data_volume}:") > "${NSS_WRAPPER_PASSWD}"`, + + // Enable nss_wrapper so the current UID and GID resolve to "postgres". + // - https://cwrap.org/nss_wrapper.html + `export LD_PRELOAD='libnss_wrapper.so' NSS_WRAPPER_GROUP NSS_WRAPPER_PASSWD`, + + // Below is the pg_upgrade script used to upgrade a PostgresCluster from + // one major version to another. Additional information concerning the + // steps used and command flag specifics can be found in the documentation: + // - https://www.postgresql.org/docs/current/pgupgrade.html + + // To begin, we first move to the mounted /pgdata directory and create a + // new version directory which is then initialized with the initdb command. + `cd /pgdata || exit`, + `echo -e "Step 1: Making new pgdata directory...\n"`, + `mkdir /pgdata/pg"${new_version}"`, + `echo -e "Step 2: Initializing new pgdata directory...\n"`, + initdb, + + // Before running the upgrade check, which ensures the clusters are compatible, + // proper permissions have to be set on the old pgdata directory and the + // preload library settings must be copied over. + `echo -e "\nStep 3: Setting the expected permissions on the old pgdata directory...\n"`, + `chmod 700 /pgdata/pg"${old_version}"`, + `echo -e "Step 4: Copying shared_preload_libraries setting to new postgresql.conf file...\n"`, + `echo "shared_preload_libraries = '$(/usr/pgsql-"""${old_version}"""/bin/postgres -D \`, + `/pgdata/pg"""${old_version}""" -C shared_preload_libraries)'" >> /pgdata/pg"${new_version}"/postgresql.conf`, + + // Before the actual upgrade is run, we will run the upgrade --check to + // verify everything before actually changing any data. + `echo -e "Step 5: Running pg_upgrade check...\n"`, + `time /usr/pgsql-"${new_version}"/bin/pg_upgrade --old-bindir /usr/pgsql-"${old_version}"/bin \`, + `--new-bindir /usr/pgsql-"${new_version}"/bin --old-datadir /pgdata/pg"${old_version}"\`, + ` --new-datadir /pgdata/pg"${new_version}" --link --check`, + + // Assuming the check completes successfully, the pg_upgrade command will + // be run that actually prepares the upgraded pgdata directory. + `echo -e "\nStep 6: Running pg_upgrade...\n"`, + `time /usr/pgsql-"${new_version}"/bin/pg_upgrade --old-bindir /usr/pgsql-"${old_version}"/bin \`, + `--new-bindir /usr/pgsql-"${new_version}"/bin --old-datadir /pgdata/pg"${old_version}" \`, + `--new-datadir /pgdata/pg"${new_version}" --link`, + + // Since we have cleared the Patroni cluster step by removing the EndPoints, we copy patroni.dynamic.json + // from the old data dir to help retain PostgreSQL parameters you had set before. + // - https://patroni.readthedocs.io/en/latest/existing_data.html#major-upgrade-of-postgresql-version + `echo -e "\nStep 7: Copying patroni.dynamic.json...\n"`, + `cp /pgdata/pg"${old_version}"/patroni.dynamic.json /pgdata/pg"${new_version}"`, + + `echo -e "\npg_upgrade Job Complete!"`, + }, "\n") + + return append([]string{"bash", "-ceu", "--", script, "upgrade"}, args...) +} + +// generateUpgradeJob returns a Job that can upgrade the PostgreSQL data +// directory of the startup instance. +func (r *PGUpgradeReconciler) generateUpgradeJob( + _ context.Context, upgrade *v1beta1.PGUpgrade, + startup *appsv1.StatefulSet, fetchKeyCommand string, +) *batchv1.Job { + job := &batchv1.Job{} + job.SetGroupVersionKind(batchv1.SchemeGroupVersion.WithKind("Job")) + + job.Namespace = upgrade.Namespace + job.Name = pgUpgradeJob(upgrade).Name + + job.Annotations = upgrade.Spec.Metadata.GetAnnotationsOrNil() + job.Labels = Merge(upgrade.Spec.Metadata.GetLabelsOrNil(), + commonLabels(pgUpgrade, upgrade), //FIXME role pgupgrade + map[string]string{ + LabelVersion: fmt.Sprint(upgrade.Spec.ToPostgresVersion), + }) + + // Find the database container. + var database *corev1.Container + for i := range startup.Spec.Template.Spec.Containers { + container := startup.Spec.Template.Spec.Containers[i] + if container.Name == ContainerDatabase { + database = &container + } + } + + // Copy the pod template from the startup instance StatefulSet. This includes + // the service account, volumes, DNS policies, and scheduling constraints. + startup.Spec.Template.DeepCopyInto(&job.Spec.Template) + + // Use the same labels and annotations as the job. + job.Spec.Template.ObjectMeta = metav1.ObjectMeta{ + Annotations: job.Annotations, + Labels: job.Labels, + } + + // Use the image pull secrets specified for the upgrade image. + job.Spec.Template.Spec.ImagePullSecrets = upgrade.Spec.ImagePullSecrets + + // Attempt the upgrade exactly once. + job.Spec.BackoffLimit = initialize.Int32(0) + job.Spec.Template.Spec.RestartPolicy = corev1.RestartPolicyNever + + // Replace all containers with one that does the upgrade. + job.Spec.Template.Spec.EphemeralContainers = nil + job.Spec.Template.Spec.InitContainers = nil + job.Spec.Template.Spec.Containers = []corev1.Container{{ + // Copy volume mounts and the security context needed to access them + // from the database container. There is a downward API volume that + // refers back to the container by name, so use that same name here. + Name: database.Name, + SecurityContext: database.SecurityContext, + VolumeMounts: database.VolumeMounts, + + // Use our upgrade command and the specified image and resources. + Command: upgradeCommand(upgrade, fetchKeyCommand), + Image: pgUpgradeContainerImage(upgrade), + ImagePullPolicy: upgrade.Spec.ImagePullPolicy, + Resources: upgrade.Spec.Resources, + }} + + // The following will set these fields to null if not set in the spec + job.Spec.Template.Spec.Affinity = upgrade.Spec.Affinity + job.Spec.Template.Spec.PriorityClassName = + initialize.FromPointer(upgrade.Spec.PriorityClassName) + job.Spec.Template.Spec.Tolerations = upgrade.Spec.Tolerations + + r.setControllerReference(upgrade, job) + return job +} + +// Remove data job + +// removeDataCommand returns an entrypoint that removes certain directories. +// We currently target the `pgdata/pg{old_version}` and `pgdata/pg{old_version}_wal` +// directories for removal. +func removeDataCommand(upgrade *v1beta1.PGUpgrade) []string { + oldVersion := fmt.Sprint(upgrade.Spec.FromPostgresVersion) + + // Before removing the directories (both data and wal), we check that + // the directory is not in use by running `pg_controldata` and making sure + // the server state is "shut down in recovery" + // TODO(benjaminjb): pg_controldata seems pretty stable, but might want to + // experiment with a few more versions. + args := []string{oldVersion} + script := strings.Join([]string{ + `declare -r old_version="$1"`, + `printf 'Removing PostgreSQL data dir for pg%s...\n\n' "$@"`, + `echo -e "Checking the directory exists and isn't being used...\n"`, + `cd /pgdata || exit`, + // The string `shut down in recovery` is the dbstate that postgres sets from + // at least version 10 to 14 when a replica has been shut down. + // - https://git.postgresql.org/gitweb/?p=postgresql.git;a=blob;f=src/bin/pg_controldata/pg_controldata.c;h=f911f98d946d83f1191abf35239d9b4455c5f52a;hb=HEAD#l59 + // Note: `pg_controldata` is actually used by `pg_upgrade` before upgrading + // to make sure that the server in question is shut down as a primary; + // that aligns with our use here, where we're making sure that the server in question + // was shut down as a replica. + // - https://git.postgresql.org/gitweb/?p=postgresql.git;a=blob;f=src/bin/pg_upgrade/controldata.c;h=41b8f69b8cbe4f40e6098ad84c2e8e987e24edaf;hb=HEAD#l122 + `if [ "$(/usr/pgsql-"${old_version}"/bin/pg_controldata /pgdata/pg"${old_version}" | grep -c "shut down in recovery")" -ne 1 ]; then echo -e "Directory in use, cannot remove..."; exit 1; fi`, + `echo -e "Removing old pgdata directory...\n"`, + // When deleting the wal directory, use `realpath` to resolve the symlink from + // the pgdata directory. This is necessary because the wal directory can be + // mounted at different places depending on if an external wal PVC is used, + // i.e. `/pgdata/pg14_wal` vs `/pgwal/pg14_wal` + `rm -rf /pgdata/pg"${old_version}" "$(realpath /pgdata/pg${old_version}/pg_wal)"`, + `echo -e "Remove Data Job Complete!"`, + }, "\n") + + return append([]string{"bash", "-ceu", "--", script, "remove"}, args...) +} + +// generateRemoveDataJob returns a Job that can remove the data +// on the given replica StatefulSet +func (r *PGUpgradeReconciler) generateRemoveDataJob( + _ context.Context, upgrade *v1beta1.PGUpgrade, sts *appsv1.StatefulSet, +) *batchv1.Job { + job := &batchv1.Job{} + job.SetGroupVersionKind(batchv1.SchemeGroupVersion.WithKind("Job")) + + job.Namespace = upgrade.Namespace + job.Name = upgrade.Name + "-" + sts.Name + + job.Annotations = upgrade.Spec.Metadata.GetAnnotationsOrNil() + job.Labels = labels.Merge(upgrade.Spec.Metadata.GetLabelsOrNil(), + commonLabels(removeData, upgrade)) //FIXME role removedata + + // Find the database container. + var database *corev1.Container + for i := range sts.Spec.Template.Spec.Containers { + container := sts.Spec.Template.Spec.Containers[i] + if container.Name == ContainerDatabase { + database = &container + } + } + + // Copy the pod template from the sts instance StatefulSet. This includes + // the service account, volumes, DNS policies, and scheduling constraints. + sts.Spec.Template.DeepCopyInto(&job.Spec.Template) + + // Use the same labels and annotations as the job. + job.Spec.Template.ObjectMeta = metav1.ObjectMeta{ + Annotations: job.Annotations, + Labels: job.Labels, + } + + // Use the image pull secrets specified for the upgrade image. + job.Spec.Template.Spec.ImagePullSecrets = upgrade.Spec.ImagePullSecrets + + // Attempt the removal exactly once. + job.Spec.BackoffLimit = initialize.Int32(0) + job.Spec.Template.Spec.RestartPolicy = corev1.RestartPolicyNever + + // Replace all containers with one that removes the data. + job.Spec.Template.Spec.EphemeralContainers = nil + job.Spec.Template.Spec.InitContainers = nil + job.Spec.Template.Spec.Containers = []corev1.Container{{ + // Copy volume mounts and the security context needed to access them + // from the database container. There is a downward API volume that + // refers back to the container by name, so use that same name here. + // We are using a PG image in order to check that the PG server is down. + Name: database.Name, + SecurityContext: database.SecurityContext, + VolumeMounts: database.VolumeMounts, + + // Use our remove command and the specified resources. + Command: removeDataCommand(upgrade), + Image: pgUpgradeContainerImage(upgrade), + ImagePullPolicy: upgrade.Spec.ImagePullPolicy, + Resources: upgrade.Spec.Resources, + }} + + // The following will set these fields to null if not set in the spec + job.Spec.Template.Spec.Affinity = upgrade.Spec.Affinity + job.Spec.Template.Spec.PriorityClassName = + initialize.FromPointer(upgrade.Spec.PriorityClassName) + job.Spec.Template.Spec.Tolerations = upgrade.Spec.Tolerations + + r.setControllerReference(upgrade, job) + return job +} + +// Util functions + +// pgUpgradeContainerImage returns the container image to use for pg_upgrade. +func pgUpgradeContainerImage(upgrade *v1beta1.PGUpgrade) string { + var image string + if upgrade.Spec.Image != nil { + image = *upgrade.Spec.Image + } + return defaultFromEnv(image, "RELATED_IMAGE_PGUPGRADE") +} + +// verifyUpgradeImageValue checks that the upgrade container image required by the +// spec is defined. If it is undefined, an error is returned. +func verifyUpgradeImageValue(upgrade *v1beta1.PGUpgrade) error { + if pgUpgradeContainerImage(upgrade) == "" { + return fmt.Errorf("Missing crunchy-upgrade image") + } + return nil +} + +// jobFailed returns "true" if the Job provided has failed. Otherwise it returns "false". +func jobFailed(job *batchv1.Job) bool { + conditions := job.Status.Conditions + for i := range conditions { + if conditions[i].Type == batchv1.JobFailed { + return (conditions[i].Status == corev1.ConditionTrue) + } + } + return false +} + +// jobCompleted returns "true" if the Job provided completed successfully. Otherwise it returns +// "false". +func jobCompleted(job *batchv1.Job) bool { + conditions := job.Status.Conditions + for i := range conditions { + if conditions[i].Type == batchv1.JobComplete { + return (conditions[i].Status == corev1.ConditionTrue) + } + } + return false +} diff --git a/internal/controller/pgupgrade/jobs_test.go b/internal/controller/pgupgrade/jobs_test.go new file mode 100644 index 0000000000..8dfc4731a2 --- /dev/null +++ b/internal/controller/pgupgrade/jobs_test.go @@ -0,0 +1,283 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package pgupgrade + +import ( + "context" + "os" + "strings" + "testing" + + "gotest.tools/v3/assert" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "sigs.k8s.io/yaml" + + "github.com/crunchydata/postgres-operator/internal/initialize" + "github.com/crunchydata/postgres-operator/internal/testing/cmp" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +func TestGenerateUpgradeJob(t *testing.T) { + ctx := context.Background() + reconciler := &PGUpgradeReconciler{} + + upgrade := &v1beta1.PGUpgrade{} + upgrade.Namespace = "ns1" + upgrade.Name = "pgu2" + upgrade.UID = "uid3" + upgrade.Spec.Image = initialize.Pointer("img4") + upgrade.Spec.PostgresClusterName = "pg5" + upgrade.Spec.FromPostgresVersion = 19 + upgrade.Spec.ToPostgresVersion = 25 + upgrade.Spec.Resources.Requests = corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("3.14"), + } + + startup := &appsv1.StatefulSet{} + startup.Spec.Template.Spec = corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: ContainerDatabase, + + SecurityContext: &corev1.SecurityContext{Privileged: new(bool)}, + VolumeMounts: []corev1.VolumeMount{ + {Name: "vm1", MountPath: "/mnt/some/such"}, + }, + }}, + Volumes: []corev1.Volume{ + { + Name: "vol2", + VolumeSource: corev1.VolumeSource{ + HostPath: new(corev1.HostPathVolumeSource), + }, + }, + }, + } + + job := reconciler.generateUpgradeJob(ctx, upgrade, startup, "") + assert.Assert(t, cmp.MarshalMatches(job, ` +apiVersion: batch/v1 +kind: Job +metadata: + creationTimestamp: null + labels: + postgres-operator.crunchydata.com/cluster: pg5 + postgres-operator.crunchydata.com/pgupgrade: pgu2 + postgres-operator.crunchydata.com/role: pgupgrade + postgres-operator.crunchydata.com/version: "25" + name: pgu2-pgdata + namespace: ns1 + ownerReferences: + - apiVersion: postgres-operator.crunchydata.com/v1beta1 + blockOwnerDeletion: true + controller: true + kind: PGUpgrade + name: pgu2 + uid: uid3 +spec: + backoffLimit: 0 + template: + metadata: + creationTimestamp: null + labels: + postgres-operator.crunchydata.com/cluster: pg5 + postgres-operator.crunchydata.com/pgupgrade: pgu2 + postgres-operator.crunchydata.com/role: pgupgrade + postgres-operator.crunchydata.com/version: "25" + spec: + containers: + - command: + - bash + - -ceu + - -- + - |- + declare -r data_volume='/pgdata' old_version="$1" new_version="$2" + printf 'Performing PostgreSQL upgrade from version "%s" to "%s" ...\n\n' "$@" + gid=$(id -G); NSS_WRAPPER_GROUP=$(mktemp) + (sed "/^postgres:x:/ d; /^[^:]*:x:${gid%% *}:/ d" /etc/group + echo "postgres:x:${gid%% *}:") > "${NSS_WRAPPER_GROUP}" + uid=$(id -u); NSS_WRAPPER_PASSWD=$(mktemp) + (sed "/^postgres:x:/ d; /^[^:]*:x:${uid}:/ d" /etc/passwd + echo "postgres:x:${uid}:${gid%% *}::${data_volume}:") > "${NSS_WRAPPER_PASSWD}" + export LD_PRELOAD='libnss_wrapper.so' NSS_WRAPPER_GROUP NSS_WRAPPER_PASSWD + cd /pgdata || exit + echo -e "Step 1: Making new pgdata directory...\n" + mkdir /pgdata/pg"${new_version}" + echo -e "Step 2: Initializing new pgdata directory...\n" + /usr/pgsql-"${new_version}"/bin/initdb -k -D /pgdata/pg"${new_version}" + echo -e "\nStep 3: Setting the expected permissions on the old pgdata directory...\n" + chmod 700 /pgdata/pg"${old_version}" + echo -e "Step 4: Copying shared_preload_libraries setting to new postgresql.conf file...\n" + echo "shared_preload_libraries = '$(/usr/pgsql-"""${old_version}"""/bin/postgres -D \ + /pgdata/pg"""${old_version}""" -C shared_preload_libraries)'" >> /pgdata/pg"${new_version}"/postgresql.conf + echo -e "Step 5: Running pg_upgrade check...\n" + time /usr/pgsql-"${new_version}"/bin/pg_upgrade --old-bindir /usr/pgsql-"${old_version}"/bin \ + --new-bindir /usr/pgsql-"${new_version}"/bin --old-datadir /pgdata/pg"${old_version}"\ + --new-datadir /pgdata/pg"${new_version}" --link --check + echo -e "\nStep 6: Running pg_upgrade...\n" + time /usr/pgsql-"${new_version}"/bin/pg_upgrade --old-bindir /usr/pgsql-"${old_version}"/bin \ + --new-bindir /usr/pgsql-"${new_version}"/bin --old-datadir /pgdata/pg"${old_version}" \ + --new-datadir /pgdata/pg"${new_version}" --link + echo -e "\nStep 7: Copying patroni.dynamic.json...\n" + cp /pgdata/pg"${old_version}"/patroni.dynamic.json /pgdata/pg"${new_version}" + echo -e "\npg_upgrade Job Complete!" + - upgrade + - "19" + - "25" + image: img4 + name: database + resources: + requests: + cpu: 3140m + securityContext: + privileged: false + volumeMounts: + - mountPath: /mnt/some/such + name: vm1 + restartPolicy: Never + volumes: + - hostPath: + path: "" + name: vol2 +status: {} + `)) + + tdeJob := reconciler.generateUpgradeJob(ctx, upgrade, startup, "echo testKey") + b, _ := yaml.Marshal(tdeJob) + assert.Assert(t, strings.Contains(string(b), + `/usr/pgsql-"${new_version}"/bin/initdb -k -D /pgdata/pg"${new_version}" --encryption-key-command "echo testKey"`)) +} + +func TestGenerateRemoveDataJob(t *testing.T) { + ctx := context.Background() + reconciler := &PGUpgradeReconciler{} + + upgrade := &v1beta1.PGUpgrade{} + upgrade.Namespace = "ns1" + upgrade.Name = "pgu2" + upgrade.UID = "uid3" + upgrade.Spec.Image = initialize.Pointer("img4") + upgrade.Spec.PostgresClusterName = "pg5" + upgrade.Spec.FromPostgresVersion = 19 + upgrade.Spec.ToPostgresVersion = 25 + upgrade.Spec.Resources.Requests = corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("3.14"), + } + + sts := &appsv1.StatefulSet{} + sts.Name = "sts" + sts.Spec.Template.Spec = corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: ContainerDatabase, + Image: "img3", + SecurityContext: &corev1.SecurityContext{Privileged: new(bool)}, + VolumeMounts: []corev1.VolumeMount{ + {Name: "vm1", MountPath: "/mnt/some/such"}, + }, + }}, + Volumes: []corev1.Volume{ + { + Name: "vol2", + VolumeSource: corev1.VolumeSource{ + HostPath: new(corev1.HostPathVolumeSource), + }, + }, + }, + } + + job := reconciler.generateRemoveDataJob(ctx, upgrade, sts) + assert.Assert(t, cmp.MarshalMatches(job, ` +apiVersion: batch/v1 +kind: Job +metadata: + creationTimestamp: null + labels: + postgres-operator.crunchydata.com/cluster: pg5 + postgres-operator.crunchydata.com/pgupgrade: pgu2 + postgres-operator.crunchydata.com/role: removedata + name: pgu2-sts + namespace: ns1 + ownerReferences: + - apiVersion: postgres-operator.crunchydata.com/v1beta1 + blockOwnerDeletion: true + controller: true + kind: PGUpgrade + name: pgu2 + uid: uid3 +spec: + backoffLimit: 0 + template: + metadata: + creationTimestamp: null + labels: + postgres-operator.crunchydata.com/cluster: pg5 + postgres-operator.crunchydata.com/pgupgrade: pgu2 + postgres-operator.crunchydata.com/role: removedata + spec: + containers: + - command: + - bash + - -ceu + - -- + - |- + declare -r old_version="$1" + printf 'Removing PostgreSQL data dir for pg%s...\n\n' "$@" + echo -e "Checking the directory exists and isn't being used...\n" + cd /pgdata || exit + if [ "$(/usr/pgsql-"${old_version}"/bin/pg_controldata /pgdata/pg"${old_version}" | grep -c "shut down in recovery")" -ne 1 ]; then echo -e "Directory in use, cannot remove..."; exit 1; fi + echo -e "Removing old pgdata directory...\n" + rm -rf /pgdata/pg"${old_version}" "$(realpath /pgdata/pg${old_version}/pg_wal)" + echo -e "Remove Data Job Complete!" + - remove + - "19" + image: img4 + name: database + resources: + requests: + cpu: 3140m + securityContext: + privileged: false + volumeMounts: + - mountPath: /mnt/some/such + name: vm1 + restartPolicy: Never + volumes: + - hostPath: + path: "" + name: vol2 +status: {} + `)) +} + +func TestPGUpgradeContainerImage(t *testing.T) { + upgrade := &v1beta1.PGUpgrade{} + + t.Setenv("RELATED_IMAGE_PGUPGRADE", "") + os.Unsetenv("RELATED_IMAGE_PGUPGRADE") + assert.Equal(t, pgUpgradeContainerImage(upgrade), "") + + t.Setenv("RELATED_IMAGE_PGUPGRADE", "") + assert.Equal(t, pgUpgradeContainerImage(upgrade), "") + + t.Setenv("RELATED_IMAGE_PGUPGRADE", "env-var-pgbackrest") + assert.Equal(t, pgUpgradeContainerImage(upgrade), "env-var-pgbackrest") + + assert.NilError(t, yaml.Unmarshal( + []byte(`{ image: spec-image }`), &upgrade.Spec)) + assert.Equal(t, pgUpgradeContainerImage(upgrade), "spec-image") +} + +func TestVerifyUpgradeImageValue(t *testing.T) { + upgrade := &v1beta1.PGUpgrade{} + + t.Run("crunchy-postgres", func(t *testing.T) { + t.Setenv("RELATED_IMAGE_PGUPGRADE", "") + os.Unsetenv("RELATED_IMAGE_PGUPGRADE") + err := verifyUpgradeImageValue(upgrade) + assert.ErrorContains(t, err, "crunchy-upgrade") + }) + +} diff --git a/internal/controller/pgupgrade/labels.go b/internal/controller/pgupgrade/labels.go new file mode 100644 index 0000000000..187fe6bf6f --- /dev/null +++ b/internal/controller/pgupgrade/labels.go @@ -0,0 +1,42 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package pgupgrade + +import ( + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +const ( + // ConditionPGUpgradeProgressing is the type used in a condition to indicate that + // an Postgres major upgrade is in progress. + ConditionPGUpgradeProgressing = "Progressing" + + // ConditionPGUpgradeSucceeded is the type used in a condition to indicate the + // status of a Postgres major upgrade. + ConditionPGUpgradeSucceeded = "Succeeded" + + labelPrefix = "postgres-operator.crunchydata.com/" + LabelPGUpgrade = labelPrefix + "pgupgrade" + LabelCluster = labelPrefix + "cluster" + LabelRole = labelPrefix + "role" + LabelVersion = labelPrefix + "version" + LabelPatroni = labelPrefix + "patroni" + LabelPGBackRestBackup = labelPrefix + "pgbackrest-backup" + LabelInstance = labelPrefix + "instance" + + ReplicaCreate = "replica-create" + ContainerDatabase = "database" + + pgUpgrade = "pgupgrade" + removeData = "removedata" +) + +func commonLabels(role string, upgrade *v1beta1.PGUpgrade) map[string]string { + return map[string]string{ + LabelPGUpgrade: upgrade.Name, + LabelCluster: upgrade.Spec.PostgresClusterName, + LabelRole: role, + } +} diff --git a/internal/controller/pgupgrade/pgupgrade_controller.go b/internal/controller/pgupgrade/pgupgrade_controller.go new file mode 100644 index 0000000000..d6d145b793 --- /dev/null +++ b/internal/controller/pgupgrade/pgupgrade_controller.go @@ -0,0 +1,513 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package pgupgrade + +import ( + "context" + "fmt" + + "github.com/pkg/errors" + batchv1 "k8s.io/api/batch/v1" + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/workqueue" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + + "github.com/crunchydata/postgres-operator/internal/config" + "github.com/crunchydata/postgres-operator/internal/controller/runtime" + "github.com/crunchydata/postgres-operator/internal/registration" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +const ( + AnnotationAllowUpgrade = "postgres-operator.crunchydata.com/allow-upgrade" +) + +// PGUpgradeReconciler reconciles a PGUpgrade object +type PGUpgradeReconciler struct { + Client client.Client + Owner client.FieldOwner + + Recorder record.EventRecorder + Registration registration.Registration +} + +//+kubebuilder:rbac:groups="batch",resources="jobs",verbs={list,watch} +//+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="pgupgrades",verbs={list,watch} +//+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="postgresclusters",verbs={list,watch} + +// SetupWithManager sets up the controller with the Manager. +func (r *PGUpgradeReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&v1beta1.PGUpgrade{}). + Owns(&batchv1.Job{}). + Watches( + v1beta1.NewPostgresCluster(), + r.watchPostgresClusters(), + ). + Complete(r) +} + +//+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="pgupgrades",verbs={list} + +// findUpgradesForPostgresCluster returns PGUpgrades that target cluster. +func (r *PGUpgradeReconciler) findUpgradesForPostgresCluster( + ctx context.Context, cluster client.ObjectKey, +) []*v1beta1.PGUpgrade { + var matching []*v1beta1.PGUpgrade + var upgrades v1beta1.PGUpgradeList + + // NOTE: If this becomes slow due to a large number of upgrades in a single + // namespace, we can configure the [ctrl.Manager] field indexer and pass a + // [fields.Selector] here. + // - https://book.kubebuilder.io/reference/watching-resources/externally-managed.html + if r.Client.List(ctx, &upgrades, &client.ListOptions{ + Namespace: cluster.Namespace, + }) == nil { + for i := range upgrades.Items { + if upgrades.Items[i].Spec.PostgresClusterName == cluster.Name { + matching = append(matching, &upgrades.Items[i]) + } + } + } + return matching +} + +// watchPostgresClusters returns a [handler.EventHandler] for PostgresClusters. +func (r *PGUpgradeReconciler) watchPostgresClusters() handler.Funcs { + handle := func(ctx context.Context, cluster client.Object, q workqueue.RateLimitingInterface) { + key := client.ObjectKeyFromObject(cluster) + + for _, upgrade := range r.findUpgradesForPostgresCluster(ctx, key) { + q.Add(ctrl.Request{ + NamespacedName: client.ObjectKeyFromObject(upgrade), + }) + } + } + + return handler.Funcs{ + CreateFunc: func(ctx context.Context, e event.CreateEvent, q workqueue.RateLimitingInterface) { + handle(ctx, e.Object, q) + }, + UpdateFunc: func(ctx context.Context, e event.UpdateEvent, q workqueue.RateLimitingInterface) { + handle(ctx, e.ObjectNew, q) + }, + DeleteFunc: func(ctx context.Context, e event.DeleteEvent, q workqueue.RateLimitingInterface) { + handle(ctx, e.Object, q) + }, + } +} + +//+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="pgupgrades",verbs={get} +//+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="pgupgrades/status",verbs={patch} +//+kubebuilder:rbac:groups="batch",resources="jobs",verbs={delete} +//+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="postgresclusters",verbs={get} +//+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="postgresclusters/status",verbs={patch} +//+kubebuilder:rbac:groups="batch",resources="jobs",verbs={create,patch} +//+kubebuilder:rbac:groups="batch",resources="jobs",verbs={list} +//+kubebuilder:rbac:groups="",resources="endpoints",verbs={get} +//+kubebuilder:rbac:groups="",resources="endpoints",verbs={delete} + +// Reconcile does the work to move the current state of the world toward the +// desired state described in a [v1beta1.PGUpgrade] identified by req. +func (r *PGUpgradeReconciler) Reconcile(ctx context.Context, req ctrl.Request) (result ctrl.Result, err error) { + log := ctrl.LoggerFrom(ctx) + + // Retrieve the upgrade from the client cache, if it exists. A deferred + // function below will send any changes to its Status field. + // + // NOTE: No DeepCopy is necessary here because controller-runtime makes a + // copy before returning from its cache. + // - https://github.com/kubernetes-sigs/controller-runtime/issues/1235 + upgrade := &v1beta1.PGUpgrade{} + err = r.Client.Get(ctx, req.NamespacedName, upgrade) + + if err == nil { + // Write any changes to the upgrade status on the way out. + before := upgrade.DeepCopy() + defer func() { + if !equality.Semantic.DeepEqual(before.Status, upgrade.Status) { + status := r.Client.Status().Patch(ctx, upgrade, client.MergeFrom(before), r.Owner) + + if err == nil && status != nil { + err = status + } else if status != nil { + log.Error(status, "Patching PGUpgrade status") + } + } + }() + } else { + // NotFound cannot be fixed by requeuing so ignore it. During background + // deletion, we receive delete events from upgrade's dependents after + // upgrade is deleted. + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + // Validate the remainder of the upgrade specification. These can likely + // move to CEL rules or a webhook when supported. + + // Exit if upgrade success condition has already been reached. + // If a cluster needs multiple upgrades, it is currently only possible to delete and + // create a new pgupgrade rather than edit an existing succeeded upgrade. + // This controller may be changed in the future to allow multiple uses of + // a single pgupgrade; if that is the case, it will probably need to reset + // the succeeded condition and remove upgrade and removedata jobs. + succeeded := meta.FindStatusCondition(upgrade.Status.Conditions, + ConditionPGUpgradeSucceeded) + if succeeded != nil && succeeded.Reason == "PGUpgradeSucceeded" { + return + } + + if !r.UpgradeAuthorized(upgrade) { + return ctrl.Result{}, nil + } + + // Set progressing condition to true if it doesn't exist already + setStatusToProgressingIfReasonWas("", upgrade) + + // The "from" version must be smaller than the "to" version. + // An invalid PGUpgrade should not be requeued. + if upgrade.Spec.FromPostgresVersion >= upgrade.Spec.ToPostgresVersion { + + meta.SetStatusCondition(&upgrade.Status.Conditions, metav1.Condition{ + ObservedGeneration: upgrade.GetGeneration(), + Type: ConditionPGUpgradeProgressing, + Status: metav1.ConditionFalse, + Reason: "PGUpgradeInvalid", + Message: fmt.Sprintf( + "Cannot upgrade from postgres version %d to %d", + upgrade.Spec.FromPostgresVersion, upgrade.Spec.ToPostgresVersion), + }) + + return ctrl.Result{}, nil + } + + if err = verifyUpgradeImageValue(upgrade); err != nil { + + meta.SetStatusCondition(&upgrade.Status.Conditions, metav1.Condition{ + ObservedGeneration: upgrade.GetGeneration(), + Type: ConditionPGUpgradeProgressing, + Status: metav1.ConditionFalse, + Reason: "PGUpgradeInvalid", + Message: fmt.Sprintf("Error: %s", err), + }) + + return ctrl.Result{}, nil + } + + setStatusToProgressingIfReasonWas("PGUpgradeInvalid", upgrade) + + // Observations and cluster validation + // + // First, read everything we need from the API. Compare the state of the + // world to the upgrade specification, perform any remaining validation. + world, err := r.observeWorld(ctx, upgrade) + // If `observeWorld` returns an error, then exit early. + // If we do no exit here, err is assume nil + if err != nil { + meta.SetStatusCondition(&upgrade.Status.Conditions, metav1.Condition{ + ObservedGeneration: upgrade.Generation, + Type: ConditionPGUpgradeProgressing, + Status: metav1.ConditionFalse, + Reason: "PGClusterErrorWhenObservingWorld", + Message: err.Error(), + }) + + return // FIXME + } + + setStatusToProgressingIfReasonWas("PGClusterErrorWhenObservingWorld", upgrade) + + // ClusterNotFound cannot be fixed by requeuing. We will reconcile again when + // a matching PostgresCluster is created. Set a condition about our + // inability to proceed. + if world.ClusterNotFound != nil { + + meta.SetStatusCondition(&upgrade.Status.Conditions, metav1.Condition{ + ObservedGeneration: upgrade.Generation, + Type: ConditionPGUpgradeProgressing, + Status: metav1.ConditionFalse, + Reason: "PGClusterNotFound", + Message: world.ClusterNotFound.Error(), + }) + + return ctrl.Result{}, nil + } + + setStatusToProgressingIfReasonWas("PGClusterNotFound", upgrade) + + // Get the spec version to check if this cluster is at the requested version + version := int64(world.Cluster.Spec.PostgresVersion) + + // Get the status version and check the jobs to see if this upgrade has completed + statusVersion := int64(world.Cluster.Status.PostgresVersion) + upgradeJob := world.Jobs[pgUpgradeJob(upgrade).Name] + upgradeJobComplete := upgradeJob != nil && + jobCompleted(upgradeJob) + upgradeJobFailed := upgradeJob != nil && + jobFailed(upgradeJob) + + var removeDataJobsFailed bool + var removeDataJobsCompleted []*batchv1.Job + for _, job := range world.Jobs { + if job.GetLabels()[LabelRole] == removeData { + if jobCompleted(job) { + removeDataJobsCompleted = append(removeDataJobsCompleted, job) + } else if jobFailed(job) { + removeDataJobsFailed = true + break + } + } + } + removeDataJobsComplete := len(removeDataJobsCompleted) == world.ReplicasExpected + + // If the PostgresCluster is already set to the desired version, but the upgradejob has + // not completed successfully, the operator assumes that the cluster is already + // running the desired version. We consider this a no-op rather than a successful upgrade. + // Documentation should make it clear that the PostgresCluster postgresVersion + // should be updated _after_ the upgrade is considered successful. + if version == int64(upgrade.Spec.ToPostgresVersion) && !upgradeJobComplete { + meta.SetStatusCondition(&upgrade.Status.Conditions, metav1.Condition{ + ObservedGeneration: upgrade.Generation, + Type: ConditionPGUpgradeProgressing, + Status: metav1.ConditionFalse, + Reason: "PGUpgradeResolved", + Message: fmt.Sprintf( + "PostgresCluster %s is already running version %d", + upgrade.Spec.PostgresClusterName, upgrade.Spec.ToPostgresVersion), + }) + + return ctrl.Result{}, nil + } + + // This condition is unlikely to ever need to be changed, but is added just in case. + setStatusToProgressingIfReasonWas("PGUpgradeResolved", upgrade) + + if statusVersion == int64(upgrade.Spec.ToPostgresVersion) { + meta.SetStatusCondition(&upgrade.Status.Conditions, metav1.Condition{ + ObservedGeneration: upgrade.Generation, + Type: ConditionPGUpgradeProgressing, + Status: metav1.ConditionFalse, + Reason: "PGUpgradeCompleted", + Message: fmt.Sprintf( + "PostgresCluster %s is running version %d", + upgrade.Spec.PostgresClusterName, upgrade.Spec.ToPostgresVersion), + }) + + if upgradeJobComplete && removeDataJobsComplete { + meta.SetStatusCondition(&upgrade.Status.Conditions, metav1.Condition{ + ObservedGeneration: upgrade.Generation, + Type: ConditionPGUpgradeSucceeded, + Status: metav1.ConditionTrue, + Reason: "PGUpgradeSucceeded", + Message: fmt.Sprintf( + "PostgresCluster %s is ready to complete upgrade to version %d", + upgrade.Spec.PostgresClusterName, upgrade.Spec.ToPostgresVersion), + }) + } + + return ctrl.Result{}, nil + } + + // The upgrade needs to manipulate the data directory of the primary while + // Postgres is stopped. Wait until all instances are gone and the primary + // is identified. + // + // Requiring the cluster be shutdown also provides some assurance that the + // user understands downtime requirement of upgrading + if !world.ClusterShutdown { + meta.SetStatusCondition(&upgrade.Status.Conditions, metav1.Condition{ + ObservedGeneration: upgrade.Generation, + Type: ConditionPGUpgradeProgressing, + Status: metav1.ConditionFalse, + Reason: "PGClusterNotShutdown", + Message: "PostgresCluster instances still running", + }) + + return ctrl.Result{}, nil + } + + setStatusToProgressingIfReasonWas("PGClusterNotShutdown", upgrade) + + // A separate check for primary identification allows for cases where the + // PostgresCluster may not have been initialized properly. + if world.ClusterPrimary == nil { + meta.SetStatusCondition(&upgrade.Status.Conditions, metav1.Condition{ + ObservedGeneration: upgrade.Generation, + Type: ConditionPGUpgradeProgressing, + Status: metav1.ConditionFalse, + Reason: "PGClusterPrimaryNotIdentified", + Message: "PostgresCluster primary instance not identified", + }) + + return ctrl.Result{}, nil + } + + setStatusToProgressingIfReasonWas("PGClusterPrimaryNotIdentified", upgrade) + + if version != int64(upgrade.Spec.FromPostgresVersion) && + statusVersion != int64(upgrade.Spec.ToPostgresVersion) { + meta.SetStatusCondition(&upgrade.Status.Conditions, metav1.Condition{ + ObservedGeneration: upgrade.Generation, + Type: ConditionPGUpgradeProgressing, + Status: metav1.ConditionFalse, + Reason: "PGUpgradeInvalidForCluster", + Message: fmt.Sprintf( + "Current postgres version is %d, but upgrade expected %d", + version, upgrade.Spec.FromPostgresVersion), + }) + + return ctrl.Result{}, nil + } + + setStatusToProgressingIfReasonWas("PGUpgradeInvalidForCluster", upgrade) + + // Each upgrade can specify one cluster, but we also want to ensure that + // each cluster is managed by at most one upgrade. Check that the specified + // cluster is annotated with the name of *this* upgrade. + // + // Having an annotation on the cluster also provides some assurance that + // the user that created the upgrade also has authority to create or edit + // the cluster. + + if allowed := world.Cluster.GetAnnotations()[AnnotationAllowUpgrade] == upgrade.Name; !allowed { + meta.SetStatusCondition(&upgrade.Status.Conditions, metav1.Condition{ + ObservedGeneration: upgrade.Generation, + Type: ConditionPGUpgradeProgressing, + Status: metav1.ConditionFalse, + Reason: "PGClusterMissingRequiredAnnotation", + Message: fmt.Sprintf( + "PostgresCluster %s lacks annotation for upgrade %s", + upgrade.Spec.PostgresClusterName, upgrade.GetName()), + }) + + return ctrl.Result{}, nil + } + + setStatusToProgressingIfReasonWas("PGClusterMissingRequiredAnnotation", upgrade) + + // Currently our jobs are set to only run once, so if any job has failed, the + // upgrade has failed. + if upgradeJobFailed || removeDataJobsFailed { + meta.SetStatusCondition(&upgrade.Status.Conditions, metav1.Condition{ + ObservedGeneration: upgrade.Generation, + Type: ConditionPGUpgradeSucceeded, + Status: metav1.ConditionFalse, + Reason: "PGUpgradeFailed", + Message: "Upgrade jobs failed, please check individual pod logs", + }) + + return ctrl.Result{}, nil + } + + // If we have reached this point, all preconditions for upgrade are satisfied. + // If the jobs have already run to completion + // - delete the replica-create jobs to kick off a backup + // - delete the PostgresCluster.Status.Repos to kick off a reconcile + if upgradeJobComplete && removeDataJobsComplete && + statusVersion != int64(upgrade.Spec.ToPostgresVersion) { + + // Patroni will try to recreate replicas using pgBackRest. Convince PGO to + // take a recent backup by deleting its "replica-create" jobs. + for _, object := range world.Jobs { + if backup := object.Labels[LabelPGBackRestBackup]; err == nil && + backup == ReplicaCreate { + + uid := object.GetUID() + version := object.GetResourceVersion() + exactly := client.Preconditions{UID: &uid, ResourceVersion: &version} + // Jobs default to an `orphanDependents` policy, orphaning pods after deletion. + // We don't want that, so we set the delete policy explicitly. + // - https://kubernetes.io/docs/concepts/workloads/controllers/job/ + // - https://github.com/kubernetes/kubernetes/blob/master/pkg/registry/batch/job/strategy.go#L58 + propagate := client.PropagationPolicy(metav1.DeletePropagationBackground) + err = client.IgnoreNotFound(r.Client.Delete(ctx, object, exactly, propagate)) + } + } + + if err == nil { + patch := world.Cluster.DeepCopy() + + // Set the cluster status when we know the upgrade has completed successfully. + // This will serve to help the user see that the upgrade has completed if they + // are only watching the PostgresCluster + patch.Status.PostgresVersion = upgrade.Spec.ToPostgresVersion + + // Set the pgBackRest status for bootstrapping + patch.Status.PGBackRest.Repos = []v1beta1.RepoStatus{} + + err = r.Client.Status().Patch(ctx, patch, client.MergeFrom(world.Cluster), r.Owner) + } + + return ctrl.Result{}, err + } + + // TODO: error from apply could mean that the job exists with a different spec. + if err == nil && !upgradeJobComplete { + err = errors.WithStack(r.apply(ctx, + r.generateUpgradeJob(ctx, upgrade, world.ClusterPrimary, config.FetchKeyCommand(&world.Cluster.Spec)))) + } + + // Create the jobs to remove the data from the replicas, as long as + // the upgrade job has completed. + // (When the cluster is not shutdown, the `world.ClusterReplicas` will be [], + // so there should be no danger of accidentally targeting the primary.) + if err == nil && upgradeJobComplete && !removeDataJobsComplete { + for _, sts := range world.ClusterReplicas { + if err == nil { + err = r.apply(ctx, r.generateRemoveDataJob(ctx, upgrade, sts)) + } + } + } + + // The upgrade job generates a new system identifier for this cluster. + // Clear the old identifier from Patroni by deleting its DCS Endpoints. + // This is safe to do this when all Patroni processes are stopped + // (ClusterShutdown) and PGO has identified a leader to start first + // (ClusterPrimary). + // - https://github.com/zalando/patroni/blob/v2.1.2/docs/existing_data.rst + // + // TODO(cbandy): This works only when using Kubernetes Endpoints for DCS. + if len(world.PatroniEndpoints) > 0 { + for _, object := range world.PatroniEndpoints { + uid := object.GetUID() + version := object.GetResourceVersion() + exactly := client.Preconditions{UID: &uid, ResourceVersion: &version} + err = client.IgnoreNotFound(r.Client.Delete(ctx, object, exactly)) + } + + // Requeue to verify that Patroni endpoints are deleted + return runtime.RequeueWithBackoff(), err // FIXME + } + + // TODO: write upgradeJob back to world? No, we will wake and see it when it + // has some progress. OTOH, whatever we just wrote has the latest metadata.generation. + // TODO: consider what it means to "re-use" the same PGUpgrade for more than + // one postgres version. Should the job name include the version number? + + log.Info("Reconciled", "requeue", !result.IsZero() || err != nil) + return +} + +func setStatusToProgressingIfReasonWas(reason string, upgrade *v1beta1.PGUpgrade) { + progressing := meta.FindStatusCondition(upgrade.Status.Conditions, + ConditionPGUpgradeProgressing) + if progressing == nil || (progressing != nil && progressing.Reason == reason) { + meta.SetStatusCondition(&upgrade.Status.Conditions, metav1.Condition{ + ObservedGeneration: upgrade.GetGeneration(), + Type: ConditionPGUpgradeProgressing, + Status: metav1.ConditionTrue, + Reason: "PGUpgradeProgressing", + Message: fmt.Sprintf( + "Upgrade progressing for cluster %s", + upgrade.Spec.PostgresClusterName), + }) + } +} diff --git a/internal/controller/pgupgrade/registration.go b/internal/controller/pgupgrade/registration.go new file mode 100644 index 0000000000..05d0d80cbd --- /dev/null +++ b/internal/controller/pgupgrade/registration.go @@ -0,0 +1,27 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package pgupgrade + +import ( + "k8s.io/apimachinery/pkg/api/meta" + + "github.com/crunchydata/postgres-operator/internal/registration" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +func (r *PGUpgradeReconciler) UpgradeAuthorized(upgrade *v1beta1.PGUpgrade) bool { + // Allow an upgrade in progress to complete, when the registration requirement is introduced. + // But don't allow new upgrades to be started until a valid token is applied. + progressing := meta.FindStatusCondition(upgrade.Status.Conditions, ConditionPGUpgradeProgressing) != nil + required := r.Registration.Required(r.Recorder, upgrade, &upgrade.Status.Conditions) + + // If a valid token has not been applied, warn the user. + if required && !progressing { + registration.SetRequiredWarning(r.Recorder, upgrade, &upgrade.Status.Conditions) + return false + } + + return true +} diff --git a/internal/controller/pgupgrade/registration_test.go b/internal/controller/pgupgrade/registration_test.go new file mode 100644 index 0000000000..dc3a4144bc --- /dev/null +++ b/internal/controller/pgupgrade/registration_test.go @@ -0,0 +1,95 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package pgupgrade + +import ( + "testing" + + "gotest.tools/v3/assert" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/crunchydata/postgres-operator/internal/controller/runtime" + "github.com/crunchydata/postgres-operator/internal/registration" + "github.com/crunchydata/postgres-operator/internal/testing/cmp" + "github.com/crunchydata/postgres-operator/internal/testing/events" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +func TestUpgradeAuthorized(t *testing.T) { + t.Run("UpgradeAlreadyInProgress", func(t *testing.T) { + reconciler := new(PGUpgradeReconciler) + upgrade := new(v1beta1.PGUpgrade) + + for _, required := range []bool{false, true} { + reconciler.Registration = registration.RegistrationFunc( + func(record.EventRecorder, client.Object, *[]metav1.Condition) bool { + return required + }) + + meta.SetStatusCondition(&upgrade.Status.Conditions, metav1.Condition{ + Type: ConditionPGUpgradeProgressing, + Status: metav1.ConditionTrue, + }) + + result := reconciler.UpgradeAuthorized(upgrade) + assert.Assert(t, result, "expected signal to proceed") + + progressing := meta.FindStatusCondition(upgrade.Status.Conditions, ConditionPGUpgradeProgressing) + assert.Equal(t, progressing.Status, metav1.ConditionTrue) + } + }) + + t.Run("RegistrationRequired", func(t *testing.T) { + recorder := events.NewRecorder(t, runtime.Scheme) + upgrade := new(v1beta1.PGUpgrade) + upgrade.Name = "some-upgrade" + + reconciler := PGUpgradeReconciler{ + Recorder: recorder, + Registration: registration.RegistrationFunc( + func(record.EventRecorder, client.Object, *[]metav1.Condition) bool { + return true + }), + } + + meta.RemoveStatusCondition(&upgrade.Status.Conditions, ConditionPGUpgradeProgressing) + + result := reconciler.UpgradeAuthorized(upgrade) + assert.Assert(t, !result, "expected signal to not proceed") + + condition := meta.FindStatusCondition(upgrade.Status.Conditions, v1beta1.Registered) + if assert.Check(t, condition != nil) { + assert.Equal(t, condition.Status, metav1.ConditionFalse) + } + + if assert.Check(t, len(recorder.Events) > 0) { + assert.Equal(t, recorder.Events[0].Type, "Warning") + assert.Equal(t, recorder.Events[0].Regarding.Kind, "PGUpgrade") + assert.Equal(t, recorder.Events[0].Regarding.Name, "some-upgrade") + assert.Assert(t, cmp.Contains(recorder.Events[0].Note, "requires")) + } + }) + + t.Run("RegistrationCompleted", func(t *testing.T) { + reconciler := new(PGUpgradeReconciler) + upgrade := new(v1beta1.PGUpgrade) + + called := false + reconciler.Registration = registration.RegistrationFunc( + func(record.EventRecorder, client.Object, *[]metav1.Condition) bool { + called = true + return false + }) + + meta.RemoveStatusCondition(&upgrade.Status.Conditions, ConditionPGUpgradeProgressing) + + result := reconciler.UpgradeAuthorized(upgrade) + assert.Assert(t, result, "expected signal to proceed") + assert.Assert(t, called, "expected registration package to clear conditions") + }) +} diff --git a/internal/controller/pgupgrade/utils.go b/internal/controller/pgupgrade/utils.go new file mode 100644 index 0000000000..292107e440 --- /dev/null +++ b/internal/controller/pgupgrade/utils.go @@ -0,0 +1,64 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package pgupgrade + +import ( + "os" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + "github.com/crunchydata/postgres-operator/internal/initialize" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +// The owner reference created by controllerutil.SetControllerReference blocks +// deletion. The OwnerReferencesPermissionEnforcement plugin requires that the +// creator of such a reference have either "delete" permission on the owner or +// "update" permission on the owner's "finalizers" subresource. +// - https://docs.k8s.io/reference/access-authn-authz/admission-controllers/ +// +kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="pgupgrades/finalizers",verbs={update} + +// setControllerReference sets owner as a Controller OwnerReference on controlled. +// It panics if another controller is already set. +func (r *PGUpgradeReconciler) setControllerReference( + owner *v1beta1.PGUpgrade, controlled client.Object, +) { + if metav1.GetControllerOf(controlled) != nil { + panic(controllerutil.SetControllerReference(owner, controlled, r.Client.Scheme())) + } + + controlled.SetOwnerReferences(append( + controlled.GetOwnerReferences(), + metav1.OwnerReference{ + APIVersion: v1beta1.GroupVersion.String(), + Kind: "PGUpgrade", + Name: owner.GetName(), + UID: owner.GetUID(), + BlockOwnerDeletion: initialize.Pointer(true), + Controller: initialize.Pointer(true), + }, + )) +} + +// Merge takes sets of labels and merges them. The last set +// provided will win in case of conflicts. +func Merge(sets ...map[string]string) labels.Set { + merged := labels.Set{} + for _, set := range sets { + merged = labels.Merge(merged, set) + } + return merged +} + +// defaultFromEnv reads the environment variable key when value is empty. +func defaultFromEnv(value, key string) string { + if value == "" { + return os.Getenv(key) + } + return value +} diff --git a/internal/controller/pgupgrade/world.go b/internal/controller/pgupgrade/world.go new file mode 100644 index 0000000000..18d056fe25 --- /dev/null +++ b/internal/controller/pgupgrade/world.go @@ -0,0 +1,175 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package pgupgrade + +import ( + "context" + + "github.com/pkg/errors" + appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +// The client used by the controller sets up a cache and an informer for any GVK +// that it GETs. That informer needs the "watch" permission. +// - https://github.com/kubernetes-sigs/controller-runtime/issues/1249 +// - https://github.com/kubernetes-sigs/controller-runtime/issues/1454 +//+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="postgresclusters",verbs={get,watch} +//+kubebuilder:rbac:groups="",resources="endpoints",verbs={list,watch} +//+kubebuilder:rbac:groups="batch",resources="jobs",verbs={list,watch} +//+kubebuilder:rbac:groups="apps",resources="statefulsets",verbs={list,watch} + +func (r *PGUpgradeReconciler) observeWorld( + ctx context.Context, upgrade *v1beta1.PGUpgrade, +) (*World, error) { + selectCluster := labels.SelectorFromSet(labels.Set{ + LabelCluster: upgrade.Spec.PostgresClusterName, + }) + + world := NewWorld() + world.Upgrade = upgrade + + cluster := v1beta1.NewPostgresCluster() + err := errors.WithStack( + r.Client.Get(ctx, client.ObjectKey{ + Namespace: upgrade.Namespace, + Name: upgrade.Spec.PostgresClusterName, + }, cluster)) + err = world.populateCluster(cluster, err) + + if err == nil { + var endpoints corev1.EndpointsList + err = errors.WithStack( + r.Client.List(ctx, &endpoints, + client.InNamespace(upgrade.Namespace), + client.MatchingLabelsSelector{Selector: selectCluster}, + )) + world.populatePatroniEndpoints(endpoints.Items) + } + + if err == nil { + var jobs batchv1.JobList + err = errors.WithStack( + r.Client.List(ctx, &jobs, + client.InNamespace(upgrade.Namespace), + client.MatchingLabelsSelector{Selector: selectCluster}, + )) + for i := range jobs.Items { + world.Jobs[jobs.Items[i].Name] = &jobs.Items[i] + } + } + + if err == nil { + var statefulsets appsv1.StatefulSetList + err = errors.WithStack( + r.Client.List(ctx, &statefulsets, + client.InNamespace(upgrade.Namespace), + client.MatchingLabelsSelector{Selector: selectCluster}, + )) + world.populateStatefulSets(statefulsets.Items) + } + + if err == nil { + world.populateShutdown() + } + + return world, err +} + +func (w *World) populateCluster(cluster *v1beta1.PostgresCluster, err error) error { + if err == nil { + w.Cluster = cluster + w.ClusterNotFound = nil + + } else if apierrors.IsNotFound(err) { + w.Cluster = nil + w.ClusterNotFound = err + err = nil + } + return err +} + +func (w *World) populatePatroniEndpoints(endpoints []corev1.Endpoints) { + for index, endpoint := range endpoints { + if endpoint.Labels[LabelPatroni] != "" { + w.PatroniEndpoints = append(w.PatroniEndpoints, &endpoints[index]) + } + } +} + +// populateStatefulSets assigns +// a) the expected number of replicas -- the number of StatefulSets that have the expected +// LabelInstance label, minus 1 (for the primary) +// b) the primary StatefulSet and replica StatefulSets if the cluster is shutdown. +// When the cluster is not shutdown, we cannot verify which StatefulSet is the primary. +func (w *World) populateStatefulSets(statefulSets []appsv1.StatefulSet) { + w.ReplicasExpected = -1 + if w.Cluster != nil { + startup := w.Cluster.Status.StartupInstance + for index, sts := range statefulSets { + if sts.Labels[LabelInstance] != "" { + w.ReplicasExpected++ + if startup != "" { + switch sts.Name { + case startup: + w.ClusterPrimary = &statefulSets[index] + default: + w.ClusterReplicas = append(w.ClusterReplicas, &statefulSets[index]) + } + } + } + } + } +} + +func (w *World) populateShutdown() { + if w.Cluster != nil { + status := w.Cluster.Status + generation := status.ObservedGeneration + + // The cluster is "shutdown" only when it is specified *and* the status + // indicates all instances are stopped. + shutdownValue := w.Cluster.Spec.Shutdown + if shutdownValue != nil { + w.ClusterShutdown = *shutdownValue + } else { + w.ClusterShutdown = false + } + w.ClusterShutdown = w.ClusterShutdown && generation == w.Cluster.GetGeneration() + + sets := status.InstanceSets + for _, set := range sets { + if n := set.Replicas; n != 0 { + w.ClusterShutdown = false + } + } + } +} + +type World struct { + Cluster *v1beta1.PostgresCluster + Upgrade *v1beta1.PGUpgrade + + ClusterNotFound error + ClusterPrimary *appsv1.StatefulSet + ClusterReplicas []*appsv1.StatefulSet + ClusterShutdown bool + ReplicasExpected int + + PatroniEndpoints []*corev1.Endpoints + Jobs map[string]*batchv1.Job +} + +func NewWorld() *World { + return &World{ + Jobs: make(map[string]*batchv1.Job), + } +} diff --git a/internal/controller/pgupgrade/world_test.go b/internal/controller/pgupgrade/world_test.go new file mode 100644 index 0000000000..4aa24f714d --- /dev/null +++ b/internal/controller/pgupgrade/world_test.go @@ -0,0 +1,230 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package pgupgrade + +import ( + "fmt" + "testing" + + "gotest.tools/v3/assert" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + "github.com/crunchydata/postgres-operator/internal/initialize" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +func TestPopulateCluster(t *testing.T) { + t.Run("Found", func(t *testing.T) { + cluster := v1beta1.NewPostgresCluster() + cluster.SetName("cluster") + + world := NewWorld() + err := world.populateCluster(cluster, nil) + + assert.NilError(t, err) + assert.Equal(t, world.Cluster, cluster) + assert.Assert(t, world.ClusterNotFound == nil) + }) + + t.Run("NotFound", func(t *testing.T) { + cluster := v1beta1.NewPostgresCluster() + expected := apierrors.NewNotFound(schema.GroupResource{}, "name") + + world := NewWorld() + err := world.populateCluster(cluster, expected) + + assert.NilError(t, err, "NotFound is handled") + assert.Assert(t, world.Cluster == nil) + assert.Equal(t, world.ClusterNotFound, expected) + }) + + t.Run("Error", func(t *testing.T) { + cluster := v1beta1.NewPostgresCluster() + expected := fmt.Errorf("danger") + + world := NewWorld() + err := world.populateCluster(cluster, expected) + + assert.Equal(t, err, expected) + assert.Assert(t, world.Cluster == nil) + assert.Assert(t, world.ClusterNotFound == nil) + }) +} + +func TestPopulatePatroniEndpoint(t *testing.T) { + endpoints := []corev1.Endpoints{ + { + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + LabelPatroni: "west", + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + LabelPatroni: "east", + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "different-label": "north", + }, + }, + }, + } + + world := NewWorld() + world.populatePatroniEndpoints(endpoints) + + // The first two have the correct labels. + assert.DeepEqual(t, world.PatroniEndpoints, []*corev1.Endpoints{ + &endpoints[0], + &endpoints[1], + }) +} + +func TestPopulateShutdown(t *testing.T) { + t.Run("NoCluster", func(t *testing.T) { + world := NewWorld() + + world.populateShutdown() + assert.Assert(t, !world.ClusterShutdown) + }) + + t.Run("NotShutdown", func(t *testing.T) { + cluster := v1beta1.NewPostgresCluster() + cluster.Spec.Shutdown = initialize.Bool(false) + + world := NewWorld() + world.Cluster = cluster + + world.populateShutdown() + assert.Assert(t, !world.ClusterShutdown) + }) + + t.Run("OldStatus", func(t *testing.T) { + cluster := v1beta1.NewPostgresCluster() + cluster.SetGeneration(99) + cluster.Spec.Shutdown = initialize.Bool(true) + cluster.Status.ObservedGeneration = 21 + + world := NewWorld() + world.Cluster = cluster + + world.populateShutdown() + assert.Assert(t, !world.ClusterShutdown) + }) + + t.Run("InstancesRunning", func(t *testing.T) { + cluster := v1beta1.NewPostgresCluster() + cluster.SetGeneration(99) + cluster.Spec.Shutdown = initialize.Bool(true) + cluster.Status.ObservedGeneration = 99 + cluster.Status.InstanceSets = []v1beta1.PostgresInstanceSetStatus{{Replicas: 2}} + + world := NewWorld() + world.Cluster = cluster + + world.populateShutdown() + assert.Assert(t, !world.ClusterShutdown) + }) + + t.Run("InstancesStopped", func(t *testing.T) { + cluster := v1beta1.NewPostgresCluster() + cluster.SetGeneration(99) + cluster.Spec.Shutdown = initialize.Bool(true) + cluster.Status.ObservedGeneration = 99 + cluster.Status.InstanceSets = []v1beta1.PostgresInstanceSetStatus{{Replicas: 0}} + + world := NewWorld() + world.Cluster = cluster + + world.populateShutdown() + assert.Assert(t, world.ClusterShutdown) + }) +} + +func TestPopulateStatefulSets(t *testing.T) { + t.Run("NoPopulatesWithoutStartupGiven", func(t *testing.T) { + cluster := v1beta1.NewPostgresCluster() + world := NewWorld() + world.Cluster = cluster + + primary := appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "the-one", + Labels: map[string]string{ + LabelInstance: "whatever", + }, + }, + } + replica := appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "something-else", + Labels: map[string]string{ + LabelInstance: "whatever", + }, + }, + } + other := appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "repo-host", + Labels: map[string]string{ + "other-label": "other", + }, + }, + } + world.populateStatefulSets([]appsv1.StatefulSet{primary, replica, other}) + + assert.Assert(t, world.ClusterPrimary == nil) + assert.Assert(t, world.ClusterReplicas == nil) + assert.Assert(t, world.ReplicasExpected == 1) + }) + + t.Run("PopulatesWithStartupGiven", func(t *testing.T) { + cluster := v1beta1.NewPostgresCluster() + cluster.Status.StartupInstance = "the-one" + + world := NewWorld() + world.Cluster = cluster + + primary := appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "the-one", + Labels: map[string]string{ + LabelInstance: "whatever", + }, + }, + } + replica := appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "something-else", + Labels: map[string]string{ + LabelInstance: "whatever", + }, + }, + } + other := appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "repo-host", + Labels: map[string]string{ + "other-label": "other", + }, + }, + } + world.populateStatefulSets([]appsv1.StatefulSet{primary, replica, other}) + + assert.DeepEqual(t, world.ClusterPrimary, &primary) + assert.DeepEqual(t, world.ClusterReplicas, []*appsv1.StatefulSet{&replica}) + assert.Assert(t, world.ReplicasExpected == 1) + }) +} diff --git a/internal/controller/postgrescluster/apply.go b/internal/controller/postgrescluster/apply.go index 2b65c1c965..2dae1f7d80 100644 --- a/internal/controller/postgrescluster/apply.go +++ b/internal/controller/postgrescluster/apply.go @@ -1,35 +1,15 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster import ( "context" - "encoding/json" - "fmt" "reflect" - jsonpatch "github.com/evanphx/json-patch" - "github.com/pkg/errors" - appsv1 "k8s.io/api/apps/v1" - batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/validation/field" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/crunchydata/postgres-operator/internal/kubeapi" @@ -58,22 +38,7 @@ func (r *Reconciler) apply(ctx context.Context, object client.Object) error { // Some fields cannot be server-side applied correctly. When their outcome // does not match the intent, send a json-patch to get really specific. switch actual := object.(type) { - case *appsv1.StatefulSet: - applyPodTemplateSpec(patch, - actual.Spec.Template, intent.(*appsv1.StatefulSet).Spec.Template, - "spec", "template") - - case *batchv1.Job: - applyPodTemplateSpec(patch, - actual.Spec.Template, intent.(*batchv1.Job).Spec.Template, - "spec", "template") - case *corev1.Service: - // Changing Service.Spec.Type requires a special apply-patch sometimes. - if err != nil { - err = r.handleServiceError(ctx, object.(*corev1.Service), data, err) - } - applyServiceSpec(patch, actual.Spec, intent.(*corev1.Service).Spec, "spec") } @@ -84,95 +49,11 @@ func (r *Reconciler) apply(ctx context.Context, object client.Object) error { return err } -// handleServiceError inspects err for expected Kubernetes API responses to -// writing a Service. It returns err when it cannot resolve the issue, otherwise -// it returns nil. -func (r *Reconciler) handleServiceError( - ctx context.Context, service *corev1.Service, apply []byte, err error, -) error { - var status metav1.Status - if api := apierrors.APIStatus(nil); errors.As(err, &api) { - status = api.Status() - } - - // Service.Spec.Ports.NodePort must be cleared for ClusterIP prior to - // Kubernetes 1.20. When all the errors are about disallowed "nodePort", - // run a json-patch on the apply-patch to set them all to null. - // - https://issue.k8s.io/33766 - if service.Spec.Type == corev1.ServiceTypeClusterIP { - add := json.RawMessage(`"add"`) - null := json.RawMessage(`null`) - patch := make(jsonpatch.Patch, 0, len(service.Spec.Ports)) - - if apierrors.IsInvalid(err) && status.Details != nil { - for i, cause := range status.Details.Causes { - path := json.RawMessage(fmt.Sprintf(`"/spec/ports/%d/nodePort"`, i)) - - if cause.Type == metav1.CauseType(field.ErrorTypeForbidden) && - cause.Field == fmt.Sprintf("spec.ports[%d].nodePort", i) { - patch = append(patch, - jsonpatch.Operation{"op": &add, "value": &null, "path": &path}) - } - } - } - - // Amend the apply-patch when all the errors can be fixed. - if len(patch) == len(service.Spec.Ports) { - apply, err = patch.Apply(apply) - } - - // Send the apply-patch with force=true. - if err == nil { - patch := client.RawPatch(client.Apply.Type(), apply) - err = r.patch(ctx, service, patch, client.ForceOwnership) - } - } - - return err -} - -// applyPodSecurityContext is called by Reconciler.apply to work around issues -// with server-side apply. -func applyPodSecurityContext( - patch *kubeapi.JSON6902, actual, intent *corev1.PodSecurityContext, path ...string, -) { - if intent == nil { - // This won't happen because we populate all PodSecurityContext. - return - } - if actual == nil { - patch.Replace(path...)(intent) - return - } - // Empty "omitempty" slices are ignored until Kubernetes 1.19. - // - https://issue.k8s.io/89273 - if !equality.Semantic.DeepEqual(actual.SupplementalGroups, intent.SupplementalGroups) { - patch.Replace(append(path, "supplementalGroups")...)(intent.SupplementalGroups) - } -} - -// applyPodTemplateSpec is called by Reconciler.apply to work around issues -// with server-side apply. -func applyPodTemplateSpec( - patch *kubeapi.JSON6902, actual, intent corev1.PodTemplateSpec, path ...string, -) { - applyPodSecurityContext(patch, - actual.Spec.SecurityContext, - intent.Spec.SecurityContext, - append(path, "spec", "securityContext")...) -} - // applyServiceSpec is called by Reconciler.apply to work around issues // with server-side apply. func applyServiceSpec( patch *kubeapi.JSON6902, actual, intent corev1.ServiceSpec, path ...string, ) { - // Empty "omitempty" slices are ignored until Kubernetes 1.19. - // - https://issue.k8s.io/89273 - if !equality.Semantic.DeepEqual(actual.ExternalIPs, intent.ExternalIPs) { - patch.Replace(append(path, "externalIPs")...)(intent.ExternalIPs) - } - // Service.Spec.Selector is not +mapType=atomic until Kubernetes 1.22. // - https://issue.k8s.io/97970 if !equality.Semantic.DeepEqual(actual.Selector, intent.Selector) { diff --git a/internal/controller/postgrescluster/apply_test.go b/internal/controller/postgrescluster/apply_test.go index 738b9eaedf..c163e8a5ab 100644 --- a/internal/controller/postgrescluster/apply_test.go +++ b/internal/controller/postgrescluster/apply_test.go @@ -1,19 +1,6 @@ -// +build envtest - -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster @@ -29,7 +16,6 @@ import ( "gotest.tools/v3/assert" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" - v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -37,29 +23,18 @@ import ( "k8s.io/client-go/discovery" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/envtest" + + "github.com/crunchydata/postgres-operator/internal/testing/require" ) func TestServerSideApply(t *testing.T) { - // TODO: Update tests that include envtest package to better handle - // running in parallel - // t.Parallel() - ctx := context.Background() - env := &envtest.Environment{} - config, err := env.Start() - assert.NilError(t, err) - t.Cleanup(func() { assert.Check(t, env.Stop()) }) - - cc, err := client.New(config, client.Options{}) - assert.NilError(t, err) + cfg, cc := setupKubernetes(t) + require.ParallelCapacity(t, 0) - ns := &v1.Namespace{} - ns.GenerateName = "postgres-operator-test-" - assert.NilError(t, cc.Create(ctx, ns)) - t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, ns)) }) + ns := setupNamespace(t, cc) - dc, err := discovery.NewDiscoveryClientForConfig(config) + dc, err := discovery.NewDiscoveryClientForConfig(cfg) assert.NilError(t, err) server, err := dc.ServerVersion() @@ -70,9 +45,9 @@ func TestServerSideApply(t *testing.T) { t.Run("ObjectMeta", func(t *testing.T) { reconciler := Reconciler{Client: cc, Owner: client.FieldOwner(t.Name())} - constructor := func() *v1.ConfigMap { - var cm v1.ConfigMap - cm.SetGroupVersionKind(v1.SchemeGroupVersion.WithKind("ConfigMap")) + constructor := func() *corev1.ConfigMap { + var cm corev1.ConfigMap + cm.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("ConfigMap")) cm.Namespace, cm.Name = ns.Name, "object-meta" cm.Data = map[string]string{"key": "value"} return &cm @@ -90,8 +65,18 @@ func TestServerSideApply(t *testing.T) { after := constructor() assert.NilError(t, cc.Patch(ctx, after, client.Apply, reconciler.Owner)) assert.Assert(t, after.GetResourceVersion() != "") - assert.Assert(t, after.GetResourceVersion() != before.GetResourceVersion(), - "expected https://github.com/kubernetes-sigs/controller-runtime/issues/1356") + + switch { + case serverVersion.LessThan(version.MustParseGeneric("1.25.15")): + case serverVersion.AtLeast(version.MustParseGeneric("1.26")) && serverVersion.LessThan(version.MustParseGeneric("1.26.10")): + case serverVersion.AtLeast(version.MustParseGeneric("1.27")) && serverVersion.LessThan(version.MustParseGeneric("1.27.7")): + + assert.Assert(t, after.GetResourceVersion() != before.GetResourceVersion(), + "expected https://issue.k8s.io/116861") + + default: + assert.Assert(t, after.GetResourceVersion() == before.GetResourceVersion()) + } // Our apply method generates the correct apply-patch. again := constructor() @@ -105,16 +90,16 @@ func TestServerSideApply(t *testing.T) { reconciler := Reconciler{Client: cc, Owner: client.FieldOwner(t.Name())} // Setup two possible controllers. - controller1 := new(v1.ConfigMap) + controller1 := new(corev1.ConfigMap) controller1.Namespace, controller1.Name = ns.Name, "controller1" assert.NilError(t, cc.Create(ctx, controller1)) - controller2 := new(v1.ConfigMap) + controller2 := new(corev1.ConfigMap) controller2.Namespace, controller2.Name = ns.Name, "controller2" assert.NilError(t, cc.Create(ctx, controller2)) // Create an object that is controlled. - controlled := new(v1.ConfigMap) + controlled := new(corev1.ConfigMap) controlled.Namespace, controlled.Name = ns.Name, "controlled" assert.NilError(t, controllerutil.SetControllerReference(controller1, controlled, cc.Scheme())) @@ -124,8 +109,8 @@ func TestServerSideApply(t *testing.T) { assert.Assert(t, original != nil) // Try to change the controller using client.Apply. - applied := new(v1.ConfigMap) - applied.SetGroupVersionKind(v1.SchemeGroupVersion.WithKind("ConfigMap")) + applied := new(corev1.ConfigMap) + applied.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("ConfigMap")) applied.Namespace, applied.Name = controlled.Namespace, controlled.Name assert.NilError(t, controllerutil.SetControllerReference(controller2, applied, cc.Scheme())) @@ -157,7 +142,7 @@ func TestServerSideApply(t *testing.T) { ) }) - t.Run("StatefulSetPodTemplate", func(t *testing.T) { + t.Run("StatefulSetStatus", func(t *testing.T) { constructor := func(name string) *appsv1.StatefulSet { var sts appsv1.StatefulSet sts.SetGroupVersionKind(appsv1.SchemeGroupVersion.WithKind("StatefulSet")) @@ -170,57 +155,33 @@ func TestServerSideApply(t *testing.T) { } reconciler := Reconciler{Client: cc, Owner: client.FieldOwner(t.Name())} + upstream := constructor("status-upstream") - // Start with fields filled out. - intent := constructor("change-to-zero") - intent.Spec.Template.Spec.SecurityContext = &corev1.PodSecurityContext{ - SupplementalGroups: []int64{1, 2, 3}, - } - - // Create the StatefulSet. - before := intent.DeepCopy() - assert.NilError(t, - cc.Patch(ctx, before, client.Apply, client.ForceOwnership, reconciler.Owner)) - - // Change fields to zero. - intent.Spec.Template.Spec.SecurityContext.SupplementalGroups = nil - - // client.Apply cannot correct it in old versions of Kubernetes. - after := intent.DeepCopy() - assert.NilError(t, - cc.Patch(ctx, after, client.Apply, client.ForceOwnership, reconciler.Owner)) - + // The structs defined in "k8s.io/api/apps/v1" marshal empty status fields. switch { - case serverVersion.LessThan(version.MustParseGeneric("1.18.19")): - - // - https://pr.k8s.io/101179 - assert.Assert(t, !equality.Semantic.DeepEqual( - after.Spec.Template.Spec.SecurityContext, - intent.Spec.Template.Spec.SecurityContext), - "expected https://issue.k8s.io/89273, got %v", - after.Spec.Template.Spec.SecurityContext) + case serverVersion.LessThan(version.MustParseGeneric("1.22")): + assert.ErrorContains(t, + cc.Patch(ctx, upstream, client.Apply, client.ForceOwnership, reconciler.Owner), + "field not declared in schema", + "expected https://issue.k8s.io/109210") default: - assert.DeepEqual(t, - after.Spec.Template.Spec.SecurityContext, - intent.Spec.Template.Spec.SecurityContext) + assert.NilError(t, + cc.Patch(ctx, upstream, client.Apply, client.ForceOwnership, reconciler.Owner)) } - // Our apply method corrects it. - again := intent.DeepCopy() + // Our apply method generates the correct apply-patch. + again := constructor("status-local") assert.NilError(t, reconciler.apply(ctx, again)) - assert.DeepEqual(t, - again.Spec.Template.Spec.SecurityContext, - intent.Spec.Template.Spec.SecurityContext) }) t.Run("ServiceSelector", func(t *testing.T) { - constructor := func(name string) *v1.Service { - var service v1.Service - service.SetGroupVersionKind(v1.SchemeGroupVersion.WithKind("Service")) + constructor := func(name string) *corev1.Service { + var service corev1.Service + service.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("Service")) service.Namespace, service.Name = ns.Name, name - service.Spec.Ports = []v1.ServicePort{{ - Port: 9999, Protocol: v1.ProtocolTCP, + service.Spec.Ports = []corev1.ServicePort{{ + Port: 9999, Protocol: corev1.ProtocolTCP, }} return &service } @@ -338,105 +299,4 @@ func TestServerSideApply(t *testing.T) { }) } }) - - t.Run("ServiceSpec", func(t *testing.T) { - constructor := func(name string) *corev1.Service { - var service corev1.Service - service.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("Service")) - service.Namespace, service.Name = ns.Name, name - service.Spec.Ports = []corev1.ServicePort{{ - Port: 9999, Protocol: corev1.ProtocolTCP, - }} - return &service - } - - reconciler := Reconciler{Client: cc, Owner: client.FieldOwner(t.Name())} - - // Start with fields filled out. - intent := constructor("change-to-zero") - intent.Spec.ExternalIPs = []string{"10.9.8.7", "192.0.2.10"} - - // Create the StatefulSet. - before := intent.DeepCopy() - assert.NilError(t, - cc.Patch(ctx, before, client.Apply, client.ForceOwnership, reconciler.Owner)) - - // Change fields to zero. - intent.Spec.ExternalIPs = nil - - // client.Apply cannot correct it in old versions of Kubernetes. - after := intent.DeepCopy() - assert.NilError(t, - cc.Patch(ctx, after, client.Apply, client.ForceOwnership, reconciler.Owner)) - - switch { - case serverVersion.LessThan(version.MustParseGeneric("1.18.19")): - - // - https://pr.k8s.io/101179 - assert.Assert(t, !equality.Semantic.DeepEqual( - after.Spec.ExternalIPs, - intent.Spec.ExternalIPs), - "expected https://issue.k8s.io/89273, got %v", - after.Spec.ExternalIPs) - - default: - assert.DeepEqual(t, after.Spec.ExternalIPs, intent.Spec.ExternalIPs) - } - - // Our apply method corrects it. - again := intent.DeepCopy() - assert.NilError(t, reconciler.apply(ctx, again)) - assert.DeepEqual(t, again.Spec.ExternalIPs, intent.Spec.ExternalIPs) - }) - - t.Run("ServiceType", func(t *testing.T) { - constructor := func(name string) *corev1.Service { - var service corev1.Service - service.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("Service")) - service.Namespace, service.Name = ns.Name, name - service.Spec.Ports = []corev1.ServicePort{ - {Name: "one", Port: 9999, Protocol: corev1.ProtocolTCP}, - {Name: "two", Port: 1234, Protocol: corev1.ProtocolTCP}, - } - return &service - } - - reconciler := Reconciler{Client: cc, Owner: client.FieldOwner(t.Name())} - - // Start as NodePort. - intent := constructor("node-port") - intent.Spec.Type = corev1.ServiceTypeNodePort - - // Create the Service. - before := intent.DeepCopy() - assert.NilError(t, - cc.Patch(ctx, before, client.Apply, client.ForceOwnership, reconciler.Owner)) - - // Change to ClusterIP. - intent.Spec.Type = corev1.ServiceTypeClusterIP - - // client.Apply cannot change it in old versions of Kubernetes. - after := intent.DeepCopy() - err := cc.Patch(ctx, after, client.Apply, client.ForceOwnership, reconciler.Owner) - - switch { - case serverVersion.LessThan(version.MustParseGeneric("1.20")): - - assert.ErrorContains(t, err, "nodePort: Forbidden", - "expected https://issue.k8s.io/33766") - - default: - assert.NilError(t, err) - assert.Equal(t, after.Spec.Type, intent.Spec.Type) - assert.Equal(t, after.Spec.ClusterIP, before.Spec.ClusterIP, - "expected to keep the same ClusterIP") - } - - // Our apply method changes it. - again := intent.DeepCopy() - assert.NilError(t, reconciler.apply(ctx, again)) - assert.Equal(t, again.Spec.Type, intent.Spec.Type) - assert.Equal(t, again.Spec.ClusterIP, before.Spec.ClusterIP, - "expected to keep the same ClusterIP") - }) } diff --git a/internal/controller/postgrescluster/cluster.go b/internal/controller/postgrescluster/cluster.go index eb451e92b9..3ba6eab0e8 100644 --- a/internal/controller/postgrescluster/cluster.go +++ b/internal/controller/postgrescluster/cluster.go @@ -1,47 +1,38 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster import ( "context" + "fmt" "io" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" - v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" + "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/internal/patroni" + "github.com/crunchydata/postgres-operator/internal/pki" "github.com/crunchydata/postgres-operator/internal/postgres" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) -// +kubebuilder:rbac:groups="",resources=configmaps,verbs=create;patch +// +kubebuilder:rbac:groups="",resources="configmaps",verbs={create,patch} // reconcileClusterConfigMap writes the ConfigMap that contains generated // files (etc) that apply to the entire cluster. func (r *Reconciler) reconcileClusterConfigMap( ctx context.Context, cluster *v1beta1.PostgresCluster, pgHBAs postgres.HBAs, pgParameters postgres.Parameters, -) (*v1.ConfigMap, error) { - clusterConfigMap := &v1.ConfigMap{ObjectMeta: naming.ClusterConfigMap(cluster)} - clusterConfigMap.SetGroupVersionKind(v1.SchemeGroupVersion.WithKind("ConfigMap")) +) (*corev1.ConfigMap, error) { + clusterConfigMap := &corev1.ConfigMap{ObjectMeta: naming.ClusterConfigMap(cluster)} + clusterConfigMap.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("ConfigMap")) err := errors.WithStack(r.setControllerReference(cluster, clusterConfigMap)) @@ -62,15 +53,15 @@ func (r *Reconciler) reconcileClusterConfigMap( return clusterConfigMap, err } -// +kubebuilder:rbac:groups="",resources=services,verbs=create;patch +// +kubebuilder:rbac:groups="",resources="services",verbs={create,patch} // reconcileClusterPodService writes the Service that can provide stable DNS // names to Pods related to cluster. func (r *Reconciler) reconcileClusterPodService( ctx context.Context, cluster *v1beta1.PostgresCluster, -) (*v1.Service, error) { - clusterPodService := &v1.Service{ObjectMeta: naming.ClusterPodService(cluster)} - clusterPodService.SetGroupVersionKind(v1.SchemeGroupVersion.WithKind("Service")) +) (*corev1.Service, error) { + clusterPodService := &corev1.Service{ObjectMeta: naming.ClusterPodService(cluster)} + clusterPodService.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("Service")) err := errors.WithStack(r.setControllerReference(cluster, clusterPodService)) @@ -85,7 +76,7 @@ func (r *Reconciler) reconcileClusterPodService( // this allows a properly configured Pod to get a DNS record based on its name. // - https://docs.k8s.io/concepts/services-networking/service/#headless-services // - https://docs.k8s.io/concepts/services-networking/dns-pod-service/#pods - clusterPodService.Spec.ClusterIP = v1.ClusterIPNone + clusterPodService.Spec.ClusterIP = corev1.ClusterIPNone clusterPodService.Spec.PublishNotReadyAddresses = true clusterPodService.Spec.Selector = map[string]string{ naming.LabelCluster: cluster.Name, @@ -149,20 +140,6 @@ func (r *Reconciler) generateClusterPrimaryService( TargetPort: intstr.FromString(naming.PortPostgreSQL), }} - // Copy the LoadBalancerStatus of the leader Service into external fields. - // These fields are presented in the "External-IP" field of `kubectl get`. - // - https://releases.k8s.io/v1.18.0/pkg/printers/internalversion/printers.go#L1046 - // - https://releases.k8s.io/v1.22.0/pkg/printers/internalversion/printers.go#L1110 - if leader.Spec.Type == corev1.ServiceTypeLoadBalancer { - for _, ingress := range leader.Status.LoadBalancer.Ingress { - service.Spec.ExternalIPs = append(service.Spec.ExternalIPs, ingress.IP) - - if service.Spec.ExternalName == "" && ingress.Hostname != "" { - service.Spec.ExternalName = ingress.Hostname - } - } - } - // Resolve to the ClusterIP for which Patroni has configured the Endpoints. endpoints.Subsets = []corev1.EndpointSubset{{ Addresses: []corev1.EndpointAddress{{IP: leader.Spec.ClusterIP}}, @@ -193,7 +170,7 @@ func (r *Reconciler) generateClusterPrimaryService( // to the PostgreSQL primary instance. func (r *Reconciler) reconcileClusterPrimaryService( ctx context.Context, cluster *v1beta1.PostgresCluster, leader *corev1.Service, -) error { +) (*corev1.Service, error) { service, endpoints, err := r.generateClusterPrimaryService(cluster, leader) if err == nil { @@ -202,7 +179,7 @@ func (r *Reconciler) reconcileClusterPrimaryService( if err == nil { err = errors.WithStack(r.apply(ctx, endpoints)) } - return err + return service, err } // generateClusterReplicaService returns a v1.Service that exposes PostgreSQL @@ -213,33 +190,66 @@ func (r *Reconciler) generateClusterReplicaService( service := &corev1.Service{ObjectMeta: naming.ClusterReplicaService(cluster)} service.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("Service")) - service.Annotations = naming.Merge( - cluster.Spec.Metadata.GetAnnotationsOrNil()) + service.Annotations = cluster.Spec.Metadata.GetAnnotationsOrNil() + service.Labels = cluster.Spec.Metadata.GetLabelsOrNil() + + if spec := cluster.Spec.ReplicaService; spec != nil { + service.Annotations = naming.Merge(service.Annotations, + spec.Metadata.GetAnnotationsOrNil()) + service.Labels = naming.Merge(service.Labels, + spec.Metadata.GetLabelsOrNil()) + } + + // add our labels last so they aren't overwritten service.Labels = naming.Merge( - cluster.Spec.Metadata.GetLabelsOrNil(), + service.Labels, map[string]string{ naming.LabelCluster: cluster.Name, naming.LabelRole: naming.RoleReplica, }) - // Allocate an IP address and let Kubernetes manage the Endpoints by - // selecting Pods with the Patroni replica role. - // - https://docs.k8s.io/concepts/services-networking/service/#defining-a-service - service.Spec.Type = corev1.ServiceTypeClusterIP - service.Spec.Selector = map[string]string{ - naming.LabelCluster: cluster.Name, - naming.LabelRole: naming.RolePatroniReplica, - } - // The TargetPort must be the name (not the number) of the PostgreSQL // ContainerPort. This name allows the port number to differ between Pods, // which can happen during a rolling update. - service.Spec.Ports = []corev1.ServicePort{{ + servicePort := corev1.ServicePort{ Name: naming.PortPostgreSQL, Port: *cluster.Spec.Port, Protocol: corev1.ProtocolTCP, TargetPort: intstr.FromString(naming.PortPostgreSQL), - }} + } + + // Default to a service type of ClusterIP + service.Spec.Type = corev1.ServiceTypeClusterIP + + // Check user provided spec for a specified type + if spec := cluster.Spec.ReplicaService; spec != nil { + service.Spec.Type = corev1.ServiceType(spec.Type) + if spec.NodePort != nil { + if service.Spec.Type == corev1.ServiceTypeClusterIP { + // The NodePort can only be set when the Service type is NodePort or + // LoadBalancer. However, due to a known issue prior to Kubernetes + // 1.20, we clear these errors during our apply. To preserve the + // appropriate behavior, we log an Event and return an error. + // TODO(tjmoore4): Once Validation Rules are available, this check + // and event could potentially be removed in favor of that validation + r.Recorder.Eventf(cluster, corev1.EventTypeWarning, "MisconfiguredClusterIP", + "NodePort cannot be set with type ClusterIP on Service %q", service.Name) + return nil, fmt.Errorf("NodePort cannot be set with type ClusterIP on Service %q", service.Name) + } + servicePort.NodePort = *spec.NodePort + } + service.Spec.ExternalTrafficPolicy = initialize.FromPointer(spec.ExternalTrafficPolicy) + service.Spec.InternalTrafficPolicy = spec.InternalTrafficPolicy + } + service.Spec.Ports = []corev1.ServicePort{servicePort} + + // Allocate an IP address and let Kubernetes manage the Endpoints by + // selecting Pods with the Patroni replica role. + // - https://docs.k8s.io/concepts/services-networking/service/#defining-a-service + service.Spec.Selector = map[string]string{ + naming.LabelCluster: cluster.Name, + naming.LabelRole: naming.RolePatroniReplica, + } err := errors.WithStack(r.setControllerReference(cluster, service)) @@ -252,22 +262,29 @@ func (r *Reconciler) generateClusterReplicaService( // replica instances. func (r *Reconciler) reconcileClusterReplicaService( ctx context.Context, cluster *v1beta1.PostgresCluster, -) error { +) (*corev1.Service, error) { service, err := r.generateClusterReplicaService(cluster) if err == nil { err = errors.WithStack(r.apply(ctx, service)) } - return err + return service, err } // reconcileDataSource is responsible for reconciling the data source for a PostgreSQL cluster. // This involves ensuring the PostgreSQL data directory for the cluster is properly populated // prior to bootstrapping the cluster, specifically according to any data source configured in the // PostgresCluster spec. +// TODO(benjaminjb): Right now the spec will accept a dataSource with both a PostgresCluster and +// a PGBackRest section, but the code will only honor the PostgresCluster in that case; this would +// be better handled with a webhook to reject a spec with both `dataSource.postgresCluster` and +// `dataSource.pgbackrest` fields func (r *Reconciler) reconcileDataSource(ctx context.Context, cluster *v1beta1.PostgresCluster, observed *observedInstances, - clusterVolumes []v1.PersistentVolumeClaim) (bool, error) { + clusterVolumes []corev1.PersistentVolumeClaim, + rootCA *pki.RootCertificateAuthority, + backupsSpecFound bool, +) (bool, error) { // a hash func to hash the pgBackRest restore options hashFunc := func(jobConfigs []string) (string, error) { @@ -288,7 +305,8 @@ func (r *Reconciler) reconcileDataSource(ctx context.Context, // determine if the user wants to initialize the PG data directory postgresDataInitRequested := cluster.Spec.DataSource != nil && - cluster.Spec.DataSource.PostgresCluster != nil + (cluster.Spec.DataSource.PostgresCluster != nil || + cluster.Spec.DataSource.PGBackRest != nil) // determine if the user has requested an in-place restore restoreID := cluster.GetAnnotations()[naming.PGBackRestRestore] @@ -301,6 +319,7 @@ func (r *Reconciler) reconcileDataSource(ctx context.Context, // in place (and therefore recreating the data directory). If the user hasn't requested // PG data initialization or an in-place restore, then simply return. var dataSource *v1beta1.PostgresClusterDataSource + var cloudDataSource *v1beta1.PGBackRestDataSource switch { case restoreInPlaceRequested: dataSource = cluster.Spec.Backups.PGBackRest.Restore.PostgresClusterDataSource @@ -309,6 +328,9 @@ func (r *Reconciler) reconcileDataSource(ctx context.Context, // restore ID for bootstrap restoreID = "~pgo-bootstrap-" + cluster.GetName() dataSource = cluster.Spec.DataSource.PostgresCluster + if dataSource == nil { + cloudDataSource = cluster.Spec.DataSource.PGBackRest + } default: return false, nil } @@ -341,8 +363,15 @@ func (r *Reconciler) reconcileDataSource(ctx context.Context, // calculate the configHash for the options in the current data source, and if an existing // restore Job exists, determine if the config has changed - configs := []string{dataSource.ClusterName, dataSource.RepoName} - configs = append(configs, dataSource.Options...) + var configs []string + switch { + case dataSource != nil: + configs = []string{dataSource.ClusterName, dataSource.RepoName} + configs = append(configs, dataSource.Options...) + case cloudDataSource != nil: + configs = []string{cloudDataSource.Stanza, cloudDataSource.Repo.Name} + configs = append(configs, cloudDataSource.Options...) + } configHash, err := hashFunc(configs) if err != nil { return false, errors.WithStack(err) @@ -375,9 +404,18 @@ func (r *Reconciler) reconcileDataSource(ctx context.Context, } // proceed with initializing the PG data directory if not already initialized - if err := r.reconcilePostgresClusterDataSource(ctx, cluster, dataSource, - configHash, clusterVolumes); err != nil { - return true, err + switch { + case dataSource != nil: + if err := r.reconcilePostgresClusterDataSource(ctx, cluster, dataSource, + configHash, clusterVolumes, rootCA, + backupsSpecFound); err != nil { + return true, err + } + case cloudDataSource != nil: + if err := r.reconcileCloudBasedDataSource(ctx, cluster, cloudDataSource, + configHash, clusterVolumes); err != nil { + return true, err + } } // return early until the PG data directory is initialized return true, nil diff --git a/internal/controller/postgrescluster/cluster_test.go b/internal/controller/postgrescluster/cluster_test.go index f594429b6c..be9e371a56 100644 --- a/internal/controller/postgrescluster/cluster_test.go +++ b/internal/controller/postgrescluster/cluster_test.go @@ -1,58 +1,42 @@ -// +build envtest +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - import ( "context" - "os" - "strings" "testing" - "time" - "github.com/crunchydata/postgres-operator/internal/initialize" - "github.com/crunchydata/postgres-operator/internal/naming" - "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" "github.com/pkg/errors" "go.opentelemetry.io/otel" "gotest.tools/v3/assert" appsv1 "k8s.io/api/apps/v1" - batchv1beta1 "k8s.io/api/batch/v1beta1" + batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" - v1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/crunchydata/postgres-operator/internal/initialize" + "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/internal/testing/cmp" + "github.com/crunchydata/postgres-operator/internal/testing/require" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) var gvks = []schema.GroupVersionKind{{ - Group: v1.SchemeGroupVersion.Group, - Version: v1.SchemeGroupVersion.Version, + Group: corev1.SchemeGroupVersion.Group, + Version: corev1.SchemeGroupVersion.Version, Kind: "ConfigMapList", }, { - Group: v1.SchemeGroupVersion.Group, - Version: v1.SchemeGroupVersion.Version, + Group: corev1.SchemeGroupVersion.Group, + Version: corev1.SchemeGroupVersion.Version, Kind: "SecretList", }, { Group: appsv1.SchemeGroupVersion.Group, @@ -63,24 +47,24 @@ var gvks = []schema.GroupVersionKind{{ Version: appsv1.SchemeGroupVersion.Version, Kind: "DeploymentList", }, { - Group: batchv1beta1.SchemeGroupVersion.Group, - Version: batchv1beta1.SchemeGroupVersion.Version, + Group: batchv1.SchemeGroupVersion.Group, + Version: batchv1.SchemeGroupVersion.Version, Kind: "CronJobList", }, { - Group: v1.SchemeGroupVersion.Group, - Version: v1.SchemeGroupVersion.Version, + Group: corev1.SchemeGroupVersion.Group, + Version: corev1.SchemeGroupVersion.Version, Kind: "PersistentVolumeClaimList", }, { - Group: v1.SchemeGroupVersion.Group, - Version: v1.SchemeGroupVersion.Version, + Group: corev1.SchemeGroupVersion.Group, + Version: corev1.SchemeGroupVersion.Version, Kind: "ServiceList", }, { - Group: v1.SchemeGroupVersion.Group, - Version: v1.SchemeGroupVersion.Version, + Group: corev1.SchemeGroupVersion.Group, + Version: corev1.SchemeGroupVersion.Version, Kind: "EndpointsList", }, { - Group: v1.SchemeGroupVersion.Group, - Version: v1.SchemeGroupVersion.Version, + Group: corev1.SchemeGroupVersion.Group, + Version: corev1.SchemeGroupVersion.Version, Kind: "ServiceAccountList", }, { Group: rbacv1.SchemeGroupVersion.Group, @@ -93,27 +77,18 @@ var gvks = []schema.GroupVersionKind{{ }} func TestCustomLabels(t *testing.T) { - t.Parallel() - - env, cc, config := setupTestEnv(t, ControllerName) - t.Cleanup(func() { teardownTestEnv(t, env) }) - - reconciler := &Reconciler{} - ctx, cancel := setupManager(t, config, func(mgr manager.Manager) { - reconciler = &Reconciler{ - Client: cc, - Owner: client.FieldOwner(t.Name()), - Recorder: mgr.GetEventRecorderFor(ControllerName), - Tracer: otel.Tracer(t.Name()), - } - }) - t.Cleanup(func() { teardownManager(cancel, t) }) + ctx := context.Background() + _, cc := setupKubernetes(t) + require.ParallelCapacity(t, 2) + + reconciler := &Reconciler{ + Client: cc, + Owner: client.FieldOwner(t.Name()), + Recorder: new(record.FakeRecorder), + Tracer: otel.Tracer(t.Name()), + } - ns := &v1.Namespace{} - ns.GenerateName = "postgres-operator-test-" - ns.Labels = labels.Set{"postgres-operator-test": t.Name()} - assert.NilError(t, cc.Create(ctx, ns)) - t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, ns)) }) + ns := setupNamespace(t, cc) reconcileTestCluster := func(cluster *v1beta1.PostgresCluster) { assert.NilError(t, errors.WithStack(reconciler.Client.Create(ctx, cluster))) @@ -151,7 +126,7 @@ func TestCustomLabels(t *testing.T) { labels["resource"] = resource.GetLabels() labels["podTemplate"] = resource.Spec.Template.GetLabels() case "CronJob": - var resource batchv1beta1.CronJob + var resource batchv1.CronJob err = runtime.DefaultUnstructuredConverter. FromUnstructured(u.UnstructuredContent(), &resource) labels["resource"] = resource.GetLabels() @@ -170,11 +145,11 @@ func TestCustomLabels(t *testing.T) { cluster.ObjectMeta.Namespace = ns.Name cluster.Spec.InstanceSets = []v1beta1.PostgresInstanceSetSpec{{ Name: "daisy-instance1", - Replicas: Int32(1), + Replicas: initialize.Int32(1), DataVolumeClaimSpec: testVolumeClaimSpec(), }, { Name: "daisy-instance2", - Replicas: Int32(1), + Replicas: initialize.Int32(1), DataVolumeClaimSpec: testVolumeClaimSpec(), }} cluster.Spec.Metadata = &v1beta1.Metadata{ @@ -220,14 +195,14 @@ func TestCustomLabels(t *testing.T) { cluster.ObjectMeta.Namespace = ns.Name cluster.Spec.InstanceSets = []v1beta1.PostgresInstanceSetSpec{{ Name: "max-instance", - Replicas: Int32(1), + Replicas: initialize.Int32(1), DataVolumeClaimSpec: testVolumeClaimSpec(), Metadata: &v1beta1.Metadata{ Labels: map[string]string{"my.instance.label": "max"}, }, }, { Name: "lucy-instance", - Replicas: Int32(1), + Replicas: initialize.Int32(1), DataVolumeClaimSpec: testVolumeClaimSpec(), Metadata: &v1beta1.Metadata{ Labels: map[string]string{"my.instance.label": "lucy"}, @@ -355,27 +330,18 @@ func TestCustomLabels(t *testing.T) { } func TestCustomAnnotations(t *testing.T) { - t.Parallel() - - env, cc, config := setupTestEnv(t, ControllerName) - t.Cleanup(func() { teardownTestEnv(t, env) }) - - reconciler := &Reconciler{} - ctx, cancel := setupManager(t, config, func(mgr manager.Manager) { - reconciler = &Reconciler{ - Client: cc, - Owner: client.FieldOwner(t.Name()), - Recorder: mgr.GetEventRecorderFor(ControllerName), - Tracer: otel.Tracer(t.Name()), - } - }) - t.Cleanup(func() { teardownManager(cancel, t) }) + ctx := context.Background() + _, cc := setupKubernetes(t) + require.ParallelCapacity(t, 2) + + reconciler := &Reconciler{ + Client: cc, + Owner: client.FieldOwner(t.Name()), + Recorder: new(record.FakeRecorder), + Tracer: otel.Tracer(t.Name()), + } - ns := &v1.Namespace{} - ns.GenerateName = "postgres-operator-test-" - ns.Labels = labels.Set{"postgres-operator-test": ""} - assert.NilError(t, cc.Create(ctx, ns)) - t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, ns)) }) + ns := setupNamespace(t, cc) reconcileTestCluster := func(cluster *v1beta1.PostgresCluster) { assert.NilError(t, errors.WithStack(reconciler.Client.Create(ctx, cluster))) @@ -413,7 +379,7 @@ func TestCustomAnnotations(t *testing.T) { annotations["resource"] = resource.GetAnnotations() annotations["podTemplate"] = resource.Spec.Template.GetAnnotations() case "CronJob": - var resource batchv1beta1.CronJob + var resource batchv1.CronJob err = runtime.DefaultUnstructuredConverter. FromUnstructured(u.UnstructuredContent(), &resource) annotations["resource"] = resource.GetAnnotations() @@ -432,11 +398,11 @@ func TestCustomAnnotations(t *testing.T) { cluster.ObjectMeta.Namespace = ns.Name cluster.Spec.InstanceSets = []v1beta1.PostgresInstanceSetSpec{{ Name: "daisy-instance1", - Replicas: Int32(1), + Replicas: initialize.Int32(1), DataVolumeClaimSpec: testVolumeClaimSpec(), }, { Name: "daisy-instance2", - Replicas: Int32(1), + Replicas: initialize.Int32(1), DataVolumeClaimSpec: testVolumeClaimSpec(), }} cluster.Spec.Metadata = &v1beta1.Metadata{ @@ -483,14 +449,14 @@ func TestCustomAnnotations(t *testing.T) { cluster.ObjectMeta.Namespace = ns.Name cluster.Spec.InstanceSets = []v1beta1.PostgresInstanceSetSpec{{ Name: "max-instance", - Replicas: Int32(1), + Replicas: initialize.Int32(1), DataVolumeClaimSpec: testVolumeClaimSpec(), Metadata: &v1beta1.Metadata{ Annotations: map[string]string{"my.instance.annotation": "max"}, }, }, { Name: "lucy-instance", - Replicas: Int32(1), + Replicas: initialize.Int32(1), DataVolumeClaimSpec: testVolumeClaimSpec(), Metadata: &v1beta1.Metadata{ Annotations: map[string]string{"my.instance.annotation": "lucy"}, @@ -617,96 +583,9 @@ func TestCustomAnnotations(t *testing.T) { }) } -func TestContainerSecurityContext(t *testing.T) { - if !strings.EqualFold(os.Getenv("USE_EXISTING_CLUSTER"), "true") { - t.Skip("Test requires pods to be created") - } - - t.Parallel() - - env, cc, config := setupTestEnv(t, ControllerName) - t.Cleanup(func() { teardownTestEnv(t, env) }) - - reconciler := &Reconciler{} - ctx, cancel := setupManager(t, config, func(mgr manager.Manager) { - reconciler = &Reconciler{ - Client: cc, - Owner: client.FieldOwner(t.Name()), - Recorder: mgr.GetEventRecorderFor(ControllerName), - Tracer: otel.Tracer(t.Name()), - } - podExec, err := newPodExecutor(config) - assert.NilError(t, err) - reconciler.PodExec = podExec - }) - t.Cleanup(func() { teardownManager(cancel, t) }) - - ns := &v1.Namespace{} - ns.GenerateName = "postgres-operator-test-" - ns.Labels = labels.Set{"postgres-operator-test": ""} - assert.NilError(t, cc.Create(ctx, ns)) - t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, ns)) }) - - cluster := testCluster() - cluster.Namespace = ns.Name - - assert.NilError(t, errors.WithStack(reconciler.Client.Create(ctx, cluster))) - t.Cleanup(func() { - // Remove finalizers, if any, so the namespace can terminate. - assert.Check(t, client.IgnoreNotFound( - reconciler.Client.Patch(ctx, cluster, client.RawPatch( - client.Merge.Type(), []byte(`{"metadata":{"finalizers":[]}}`))))) - }) - - pods := &corev1.PodList{} - assert.NilError(t, wait.Poll(time.Second, Scale(2*time.Minute), func() (bool, error) { - // Reconcile the cluster - result, err := reconciler.Reconcile(ctx, reconcile.Request{ - NamespacedName: client.ObjectKeyFromObject(cluster), - }) - if err != nil { - return false, err - } - if result.Requeue { - return false, nil - } - - err = reconciler.Client.List(ctx, pods, - client.InNamespace(ns.Name), - client.MatchingLabels{ - naming.LabelCluster: cluster.Name, - }) - if err != nil { - return false, err - } - - // Can expect 4 pods from a cluster - // instance, repo-host, pgbouncer, backup(s) - if len(pods.Items) < 4 { - return false, nil - } - return true, nil - })) - - // Once we have a pod list with pods of each type, check that the - // pods containers have the expected Security Context options - for _, pod := range pods.Items { - for _, container := range pod.Spec.Containers { - assert.Equal(t, *container.SecurityContext.Privileged, false) - assert.Equal(t, *container.SecurityContext.ReadOnlyRootFilesystem, true) - assert.Equal(t, *container.SecurityContext.AllowPrivilegeEscalation, false) - } - for _, initContainer := range pod.Spec.InitContainers { - assert.Equal(t, *initContainer.SecurityContext.Privileged, false) - assert.Equal(t, *initContainer.SecurityContext.ReadOnlyRootFilesystem, true) - assert.Equal(t, *initContainer.SecurityContext.AllowPrivilegeEscalation, false) - } - } -} - func TestGenerateClusterPrimaryService(t *testing.T) { - env, cc, _ := setupTestEnv(t, ControllerName) - t.Cleanup(func() { teardownTestEnv(t, env) }) + _, cc := setupKubernetes(t) + require.ParallelCapacity(t, 0) reconciler := &Reconciler{Client: cc} @@ -722,11 +601,11 @@ func TestGenerateClusterPrimaryService(t *testing.T) { assert.ErrorContains(t, err, "not implemented") alwaysExpect := func(t testing.TB, service *corev1.Service, endpoints *corev1.Endpoints) { - assert.Assert(t, marshalMatches(service.TypeMeta, ` + assert.Assert(t, cmp.MarshalMatches(service.TypeMeta, ` apiVersion: v1 kind: Service `)) - assert.Assert(t, marshalMatches(service.ObjectMeta, ` + assert.Assert(t, cmp.MarshalMatches(service.ObjectMeta, ` creationTimestamp: null labels: postgres-operator.crunchydata.com/cluster: pg5 @@ -741,7 +620,7 @@ ownerReferences: name: pg5 uid: "" `)) - assert.Assert(t, marshalMatches(service.Spec.Ports, ` + assert.Assert(t, cmp.MarshalMatches(service.Spec.Ports, ` - name: postgres port: 2600 protocol: TCP @@ -752,7 +631,7 @@ ownerReferences: assert.Assert(t, service.Spec.Selector == nil, "got %v", service.Spec.Selector) - assert.Assert(t, marshalMatches(endpoints, ` + assert.Assert(t, cmp.MarshalMatches(endpoints, ` apiVersion: v1 kind: Endpoints metadata: @@ -796,47 +675,39 @@ subsets: assert.NilError(t, err) alwaysExpect(t, service, endpoints) - assert.DeepEqual(t, service.Spec.ExternalIPs, []string{ - "55.44.33.22", "99.88.77.66", "1.2.3.4", - }) - assert.Equal(t, service.Spec.ExternalName, "some.host") + // generateClusterPrimaryService no longer sets ExternalIPs or ExternalName from + // LoadBalancer-type leader service + // - https://cloud.google.com/anthos/clusters/docs/security-bulletins#gcp-2020-015 + assert.Equal(t, len(service.Spec.ExternalIPs), 0) + assert.Equal(t, service.Spec.ExternalName, "") }) } func TestReconcileClusterPrimaryService(t *testing.T) { ctx := context.Background() - env, cc, _ := setupTestEnv(t, ControllerName) - t.Cleanup(func() { teardownTestEnv(t, env) }) - - ns := &corev1.Namespace{} - ns.GenerateName = "postgres-operator-test-" - assert.NilError(t, cc.Create(ctx, ns)) - t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, ns)) }) + _, cc := setupKubernetes(t) + require.ParallelCapacity(t, 1) reconciler := &Reconciler{Client: cc, Owner: client.FieldOwner(t.Name())} - cluster := &v1beta1.PostgresCluster{} - cluster.Namespace = ns.Name - cluster.Name = "pg8" - cluster.Spec.PostgresVersion = 12 - cluster.Spec.InstanceSets = []v1beta1.PostgresInstanceSetSpec{{}} - + cluster := testCluster() + cluster.Namespace = setupNamespace(t, cc).Name assert.NilError(t, cc.Create(ctx, cluster)) - assert.ErrorContains(t, - reconciler.reconcileClusterPrimaryService(ctx, cluster, nil), - "not implemented") + _, err := reconciler.reconcileClusterPrimaryService(ctx, cluster, nil) + assert.ErrorContains(t, err, "not implemented") leader := &corev1.Service{} leader.Spec.ClusterIP = "192.0.2.10" - assert.NilError(t, - reconciler.reconcileClusterPrimaryService(ctx, cluster, leader)) + service, err := reconciler.reconcileClusterPrimaryService(ctx, cluster, leader) + assert.NilError(t, err) + assert.Assert(t, service != nil && service.UID != "", "expected created service") } func TestGenerateClusterReplicaServiceIntent(t *testing.T) { - env, cc, _ := setupTestEnv(t, ControllerName) - t.Cleanup(func() { teardownTestEnv(t, env) }) + _, cc := setupKubernetes(t) + require.ParallelCapacity(t, 0) reconciler := &Reconciler{Client: cc} @@ -848,11 +719,12 @@ func TestGenerateClusterReplicaServiceIntent(t *testing.T) { service, err := reconciler.generateClusterReplicaService(cluster) assert.NilError(t, err) - assert.Assert(t, marshalMatches(service.TypeMeta, ` + alwaysExpect := func(t testing.TB, service *corev1.Service) { + assert.Assert(t, cmp.MarshalMatches(service.TypeMeta, ` apiVersion: v1 kind: Service - `)) - assert.Assert(t, marshalMatches(service.ObjectMeta, ` + `)) + assert.Assert(t, cmp.MarshalMatches(service.ObjectMeta, ` creationTimestamp: null labels: postgres-operator.crunchydata.com/cluster: pg2 @@ -866,8 +738,11 @@ ownerReferences: kind: PostgresCluster name: pg2 uid: "" - `)) - assert.Assert(t, marshalMatches(service.Spec, ` + `)) + } + + alwaysExpect(t, service) + assert.Assert(t, cmp.MarshalMatches(service.Spec, ` ports: - name: postgres port: 9876 @@ -879,6 +754,39 @@ selector: type: ClusterIP `)) + types := []struct { + Type string + Expect func(testing.TB, *corev1.Service) + }{ + {Type: "ClusterIP", Expect: func(t testing.TB, service *corev1.Service) { + assert.Equal(t, service.Spec.Type, corev1.ServiceTypeClusterIP) + }}, + {Type: "NodePort", Expect: func(t testing.TB, service *corev1.Service) { + assert.Equal(t, service.Spec.Type, corev1.ServiceTypeNodePort) + }}, + {Type: "LoadBalancer", Expect: func(t testing.TB, service *corev1.Service) { + assert.Equal(t, service.Spec.Type, corev1.ServiceTypeLoadBalancer) + }}, + } + + for _, test := range types { + t.Run(test.Type, func(t *testing.T) { + cluster := cluster.DeepCopy() + cluster.Spec.ReplicaService = &v1beta1.ServiceSpec{Type: test.Type} + + service, err := reconciler.generateClusterReplicaService(cluster) + assert.NilError(t, err) + alwaysExpect(t, service) + test.Expect(t, service) + assert.Assert(t, cmp.MarshalMatches(service.Spec.Ports, ` +- name: postgres + port: 9876 + protocol: TCP + targetPort: postgres + `)) + }) + } + t.Run("AnnotationsLabels", func(t *testing.T) { cluster := cluster.DeepCopy() cluster.Spec.Metadata = &v1beta1.Metadata{ @@ -890,19 +798,19 @@ type: ClusterIP assert.NilError(t, err) // Annotations present in the metadata. - assert.Assert(t, marshalMatches(service.ObjectMeta.Annotations, ` + assert.Assert(t, cmp.MarshalMatches(service.ObjectMeta.Annotations, ` some: note `)) // Labels present in the metadata. - assert.Assert(t, marshalMatches(service.ObjectMeta.Labels, ` + assert.Assert(t, cmp.MarshalMatches(service.ObjectMeta.Labels, ` happy: label postgres-operator.crunchydata.com/cluster: pg2 postgres-operator.crunchydata.com/role: replica `)) // Labels not in the selector. - assert.Assert(t, marshalMatches(service.Spec.Selector, ` + assert.Assert(t, cmp.MarshalMatches(service.Spec.Selector, ` postgres-operator.crunchydata.com/cluster: pg2 postgres-operator.crunchydata.com/role: replica `)) diff --git a/internal/controller/postgrescluster/controller.go b/internal/controller/postgrescluster/controller.go index ca4d616944..d459d30a10 100644 --- a/internal/controller/postgrescluster/controller.go +++ b/internal/controller/postgrescluster/controller.go @@ -1,77 +1,72 @@ -package postgrescluster - -/* -Copyright 2021 Crunchy Data Solutions, Inc. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +package postgrescluster import ( "context" + "errors" "fmt" "io" + "time" - "github.com/pkg/errors" "go.opentelemetry.io/otel/trace" appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" - batchv1beta1 "k8s.io/api/batch/v1beta1" corev1 "k8s.io/api/core/v1" + policyv1 "k8s.io/api/policy/v1" rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/client-go/discovery" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" + "github.com/crunchydata/postgres-operator/internal/config" + "github.com/crunchydata/postgres-operator/internal/controller/runtime" + "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/logging" + "github.com/crunchydata/postgres-operator/internal/pgaudit" "github.com/crunchydata/postgres-operator/internal/pgbackrest" "github.com/crunchydata/postgres-operator/internal/pgbouncer" "github.com/crunchydata/postgres-operator/internal/pgmonitor" "github.com/crunchydata/postgres-operator/internal/pki" "github.com/crunchydata/postgres-operator/internal/postgres" + "github.com/crunchydata/postgres-operator/internal/registration" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) const ( // ControllerName is the name of the PostgresCluster controller ControllerName = "postgrescluster-controller" - - // workerCount defines the number of worker queues for the PostgresCluster controller - workerCount = 2 ) // Reconciler holds resources for the PostgresCluster reconciler type Reconciler struct { - Client client.Client - Owner client.FieldOwner - Recorder record.EventRecorder - Tracer trace.Tracer - IsOpenShift bool - - PodExec func( - namespace, pod, container string, + Client client.Client + DiscoveryClient *discovery.DiscoveryClient + IsOpenShift bool + Owner client.FieldOwner + PodExec func( + ctx context.Context, namespace, pod, container string, stdin io.Reader, stdout, stderr io.Writer, command ...string, ) error + Recorder record.EventRecorder + Registration registration.Registration + Tracer trace.Tracer } -// +kubebuilder:rbac:groups="",resources=events,verbs=create;patch -// +kubebuilder:rbac:groups=postgres-operator.crunchydata.com,resources=postgresclusters,verbs=get;list;watch -// +kubebuilder:rbac:groups=postgres-operator.crunchydata.com,resources=postgresclusters/status,verbs=patch +// +kubebuilder:rbac:groups="",resources="events",verbs={create,patch} +// +kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="postgresclusters",verbs={get,list,watch} +// +kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="postgresclusters/status",verbs={patch} // Reconcile reconciles a ConfigMap in a namespace managed by the PostgreSQL Operator func (r *Reconciler) Reconcile( @@ -81,15 +76,6 @@ func (r *Reconciler) Reconcile( log := logging.FromContext(ctx) defer span.End() - // create the result that will be updated following a call to each reconciler - result := reconcile.Result{} - updateResult := func(next reconcile.Result, err error) error { - if err == nil { - result = updateReconcileResult(result, next) - } - return err - } - // get the postgrescluster from the cache cluster := &v1beta1.PostgresCluster{} if err := r.Client.Get(ctx, request.NamespacedName, cluster); err != nil { @@ -100,7 +86,7 @@ func (r *Reconciler) Reconcile( log.Error(err, "unable to fetch PostgresCluster") span.RecordError(err) } - return result, err + return runtime.ErrorWithBackoff(err) } // Set any defaults that may not have been stored in the API. No DeepCopy @@ -126,49 +112,119 @@ func (r *Reconciler) Reconcile( if result, err := r.handleDelete(ctx, cluster); err != nil { span.RecordError(err) log.Error(err, "deleting") - return reconcile.Result{}, err + return runtime.ErrorWithBackoff(err) } else if result != nil { if log := log.V(1); log.Enabled() { - if result.RequeueAfter > 0 { - // RequeueAfter implies Requeue, but set both to make the next - // log message more clear. - result.Requeue = true - } log.Info("deleting", "result", fmt.Sprintf("%+v", *result)) } return *result, nil } + // Perform initial validation on a cluster + // TODO: Move this to a defaulting (mutating admission) webhook + // to leverage regular validation. + + // verify all needed image values are defined + if err := config.VerifyImageValues(cluster); err != nil { + // warning event with missing image information + r.Recorder.Event(cluster, corev1.EventTypeWarning, "MissingRequiredImage", + err.Error()) + // specifically allow reconciliation if the cluster is shutdown to + // facilitate upgrades, otherwise return + if !initialize.FromPointer(cluster.Spec.Shutdown) { + return runtime.ErrorWithBackoff(err) + } + } + + if cluster.Spec.Standby != nil && + cluster.Spec.Standby.Enabled && + cluster.Spec.Standby.Host == "" && + cluster.Spec.Standby.RepoName == "" { + // When a standby cluster is requested but a repoName or host is not provided + // the cluster will be created as a non-standby. Reject any clusters with + // this configuration and provide an event + path := field.NewPath("spec", "standby") + err := field.Invalid(path, cluster.Name, "Standby requires a host or repoName to be enabled") + r.Recorder.Event(cluster, corev1.EventTypeWarning, "InvalidStandbyConfiguration", err.Error()) + return runtime.ErrorWithBackoff(err) + } + var ( - clusterConfigMap *corev1.ConfigMap - clusterReplicationSecret *corev1.Secret - clusterPodService *corev1.Service - clusterVolumes []corev1.PersistentVolumeClaim - instanceServiceAccount *corev1.ServiceAccount - instances *observedInstances - patroniLeaderService *corev1.Service - primaryCertificate *corev1.SecretProjection - rootCA *pki.RootCertificateAuthority - monitoringSecret *corev1.Secret - err error + clusterConfigMap *corev1.ConfigMap + clusterReplicationSecret *corev1.Secret + clusterPodService *corev1.Service + clusterVolumes []corev1.PersistentVolumeClaim + instanceServiceAccount *corev1.ServiceAccount + instances *observedInstances + patroniLeaderService *corev1.Service + primaryCertificate *corev1.SecretProjection + primaryService *corev1.Service + replicaService *corev1.Service + rootCA *pki.RootCertificateAuthority + monitoringSecret *corev1.Secret + exporterQueriesConfig *corev1.ConfigMap + exporterWebConfig *corev1.ConfigMap + err error + backupsSpecFound bool + backupsReconciliationAllowed bool + dedicatedSnapshotPVC *corev1.PersistentVolumeClaim ) - // Define the function for the updating the PostgresCluster status. Returns any error that - // occurs while attempting to patch the status, while otherwise simply returning the - // Result and error variables that are populated while reconciling the PostgresCluster. - patchClusterStatus := func() (reconcile.Result, error) { + patchClusterStatus := func() error { if !equality.Semantic.DeepEqual(before.Status, cluster.Status) { // NOTE(cbandy): Kubernetes prior to v1.16.10 and v1.17.6 does not track // managed fields on the status subresource: https://issue.k8s.io/88901 - if err := errors.WithStack(r.Client.Status().Patch( - ctx, cluster, client.MergeFrom(before), r.Owner)); err != nil { + if err := r.Client.Status().Patch( + ctx, cluster, client.MergeFrom(before), r.Owner); err != nil { log.Error(err, "patching cluster status") - return result, err + return err } log.V(1).Info("patched cluster status") } - return result, err + return nil + } + + if r.Registration != nil && r.Registration.Required(r.Recorder, cluster, &cluster.Status.Conditions) { + registration.SetAdvanceWarning(r.Recorder, cluster, &cluster.Status.Conditions) + } + cluster.Status.RegistrationRequired = nil + cluster.Status.TokenRequired = "" + + // if the cluster is paused, set a condition and return + if cluster.Spec.Paused != nil && *cluster.Spec.Paused { + meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{ + Type: v1beta1.PostgresClusterProgressing, + Status: metav1.ConditionFalse, + Reason: "Paused", + Message: "No spec changes will be applied and no other statuses will be updated.", + + ObservedGeneration: cluster.GetGeneration(), + }) + return runtime.ErrorWithBackoff(patchClusterStatus()) + } else { + meta.RemoveStatusCondition(&cluster.Status.Conditions, v1beta1.PostgresClusterProgressing) + } + + if err == nil { + backupsSpecFound, backupsReconciliationAllowed, err = r.BackupsEnabled(ctx, cluster) + + // If we cannot reconcile because the backup reconciliation is paused, set a condition and exit + if !backupsReconciliationAllowed { + meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{ + Type: v1beta1.PostgresClusterProgressing, + Status: metav1.ConditionFalse, + Reason: "Paused", + Message: "Reconciliation is paused: please fill in spec.backups " + + "or add the postgres-operator.crunchydata.com/authorizeBackupRemoval " + + "annotation to authorize backup removal.", + + ObservedGeneration: cluster.GetGeneration(), + }) + return runtime.ErrorWithBackoff(patchClusterStatus()) + } else { + meta.RemoveStatusCondition(&cluster.Status.Conditions, v1beta1.PostgresClusterProgressing) + } } pgHBAs := postgres.NewHBAs() @@ -176,18 +232,49 @@ func (r *Reconciler) Reconcile( pgbouncer.PostgreSQL(cluster, &pgHBAs) pgParameters := postgres.NewParameters() - pgbackrest.PostgreSQL(cluster, &pgParameters) - + pgaudit.PostgreSQLParameters(&pgParameters) + pgbackrest.PostgreSQL(cluster, &pgParameters, backupsSpecFound) pgmonitor.PostgreSQLParameters(cluster, &pgParameters) + // Set huge_pages = try if a hugepages resource limit > 0, otherwise set "off" + postgres.SetHugePages(cluster, &pgParameters) + + if err == nil { + rootCA, err = r.reconcileRootCertificate(ctx, cluster) + } + + if err == nil { + // Since any existing data directories must be moved prior to bootstrapping the + // cluster, further reconciliation will not occur until the directory move Jobs + // (if configured) have completed. Func reconcileDirMoveJobs() will therefore + // return a bool indicating that the controller should return early while any + // required Jobs are running, after which it will indicate that an early + // return is no longer needed, and reconciliation can proceed normally. + returnEarly, err := r.reconcileDirMoveJobs(ctx, cluster) + if err != nil || returnEarly { + return runtime.ErrorWithBackoff(errors.Join(err, patchClusterStatus())) + } + } if err == nil { clusterVolumes, err = r.observePersistentVolumeClaims(ctx, cluster) } + if err == nil { + clusterVolumes, err = r.configureExistingPVCs(ctx, cluster, clusterVolumes) + } if err == nil { instances, err = r.observeInstances(ctx, cluster) } + + result := reconcile.Result{} + + if err == nil { + var requeue time.Duration + if requeue, err = r.reconcilePatroniStatus(ctx, cluster, instances); err == nil && requeue > 0 { + result.RequeueAfter = requeue + } + } if err == nil { - err = updateResult(r.reconcilePatroniStatus(ctx, cluster, instances)) + err = r.reconcilePatroniSwitchover(ctx, cluster, instances) } // reconcile the Pod service before reconciling any data source in case it is necessary // to start Pods during data source reconciliation that require network connections (e.g. @@ -196,6 +283,13 @@ func (r *Reconciler) Reconcile( if err == nil { clusterPodService, err = r.reconcileClusterPodService(ctx, cluster) } + // reconcile the RBAC resources before reconciling any data source in case + // restore/move Job pods require the ServiceAccount to access any data source. + // e.g., we are restoring from an S3 source using an IAM for access + // - https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts-technical-overview.html + if err == nil { + instanceServiceAccount, err = r.reconcileRBACResources(ctx, cluster) + } // First handle reconciling any data source configured for the PostgresCluster. This includes // reconciling the data source defined to bootstrap a new cluster, as well as a reconciling // a data source to perform restore in-place and re-bootstrap the cluster. @@ -206,18 +300,14 @@ func (r *Reconciler) Reconcile( // the controller should return early while data initialization is in progress, after // which it will indicate that an early return is no longer needed, and reconciliation // can proceed normally. - var returnEarly bool - returnEarly, err = r.reconcileDataSource(ctx, cluster, instances, clusterVolumes) + returnEarly, err := r.reconcileDataSource(ctx, cluster, instances, clusterVolumes, rootCA, backupsSpecFound) if err != nil || returnEarly { - return patchClusterStatus() + return runtime.ErrorWithBackoff(errors.Join(err, patchClusterStatus())) } } if err == nil { clusterConfigMap, err = r.reconcileClusterConfigMap(ctx, cluster, pgHBAs, pgParameters) } - if err == nil { - rootCA, err = r.reconcileRootCertificate(ctx, cluster) - } if err == nil { clusterReplicationSecret, err = r.reconcileReplicationSecret(ctx, cluster, rootCA) } @@ -225,16 +315,13 @@ func (r *Reconciler) Reconcile( patroniLeaderService, err = r.reconcilePatroniLeaderLease(ctx, cluster) } if err == nil { - err = r.reconcileClusterPrimaryService(ctx, cluster, patroniLeaderService) + primaryService, err = r.reconcileClusterPrimaryService(ctx, cluster, patroniLeaderService) } if err == nil { - err = r.reconcileClusterReplicaService(ctx, cluster) + replicaService, err = r.reconcileClusterReplicaService(ctx, cluster) } if err == nil { - primaryCertificate, err = r.reconcileClusterCertificate(ctx, rootCA, cluster) - } - if err == nil { - instanceServiceAccount, err = r.reconcileRBACResources(ctx, cluster) + primaryCertificate, err = r.reconcileClusterCertificate(ctx, rootCA, cluster, primaryService, replicaService) } if err == nil { err = r.reconcilePatroniDistributedConfiguration(ctx, cluster) @@ -245,11 +332,19 @@ func (r *Reconciler) Reconcile( if err == nil { monitoringSecret, err = r.reconcileMonitoringSecret(ctx, cluster) } + if err == nil { + exporterQueriesConfig, err = r.reconcileExporterQueriesConfig(ctx, cluster) + } + if err == nil { + exporterWebConfig, err = r.reconcileExporterWebConfig(ctx, cluster) + } if err == nil { err = r.reconcileInstanceSets( - ctx, cluster, clusterConfigMap, clusterReplicationSecret, - rootCA, clusterPodService, instanceServiceAccount, instances, - patroniLeaderService, primaryCertificate, clusterVolumes) + ctx, cluster, clusterConfigMap, clusterReplicationSecret, rootCA, + clusterPodService, instanceServiceAccount, instances, patroniLeaderService, + primaryCertificate, clusterVolumes, exporterQueriesConfig, exporterWebConfig, + backupsSpecFound, + ) } if err == nil { @@ -260,7 +355,20 @@ func (r *Reconciler) Reconcile( } if err == nil { - err = updateResult(r.reconcilePGBackRest(ctx, cluster, instances)) + var next reconcile.Result + if next, err = r.reconcilePGBackRest(ctx, cluster, + instances, rootCA, backupsSpecFound); err == nil && !next.IsZero() { + result.Requeue = result.Requeue || next.Requeue + if next.RequeueAfter > 0 { + result.RequeueAfter = next.RequeueAfter + } + } + } + if err == nil { + dedicatedSnapshotPVC, err = r.reconcileDedicatedSnapshotVolume(ctx, cluster, clusterVolumes) + } + if err == nil { + err = r.reconcileVolumeSnapshots(ctx, cluster, dedicatedSnapshotPVC) } if err == nil { err = r.reconcilePGBouncer(ctx, cluster, instances, primaryCertificate, rootCA) @@ -268,8 +376,17 @@ func (r *Reconciler) Reconcile( if err == nil { err = r.reconcilePGMonitor(ctx, cluster, instances, monitoringSecret) } - - // TODO reconcile pgadmin4 + if err == nil { + err = r.reconcileDatabaseInitSQL(ctx, cluster, instances) + } + if err == nil { + err = r.reconcilePGAdmin(ctx, cluster) + } + if err == nil { + // This is after [Reconciler.rolloutInstances] to ensure that recreating + // Pods takes precedence. + err = r.handlePatroniRestarts(ctx, cluster, instances) + } // at this point everything reconciled successfully, and we can update the // observedGeneration @@ -277,7 +394,7 @@ func (r *Reconciler) Reconcile( log.V(1).Info("reconciled cluster") - return patchClusterStatus() + return result, errors.Join(err, patchClusterStatus()) } // deleteControlled safely deletes object when it is controlled by cluster. @@ -312,7 +429,7 @@ func (r *Reconciler) patch( // creator of such a reference have either "delete" permission on the owner or // "update" permission on the owner's "finalizers" subresource. // - https://docs.k8s.io/reference/access-authn-authz/admission-controllers/ -// +kubebuilder:rbac:groups=postgres-operator.crunchydata.com,resources=postgresclusters/finalizers,verbs=update +// +kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="postgresclusters/finalizers",verbs={update} // setControllerReference sets owner as a Controller OwnerReference on controlled. // Only one OwnerReference can be a controller, so it returns an error if another @@ -331,24 +448,33 @@ func (r *Reconciler) setOwnerReference( return controllerutil.SetOwnerReference(owner, controlled, r.Client.Scheme()) } -// +kubebuilder:rbac:groups="",resources=configmaps,verbs=get;list;watch -// +kubebuilder:rbac:groups="",resources=endpoints,verbs=get;list;watch -// +kubebuilder:rbac:groups="",resources=persistentvolumeclaims,verbs=get;list;watch -// +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch -// +kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch -// +kubebuilder:rbac:groups="",resources=serviceaccounts,verbs=get;list;watch -// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch -// +kubebuilder:rbac:groups=apps,resources=statefulsets,verbs=get;list;watch -// +kubebuilder:rbac:groups=batch,resources=jobs,verbs=get;list;watch -// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=roles,verbs=get;list;watch -// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=rolebindings,verbs=get;list;watch -// +kubebuilder:rbac:groups=batch,resources=cronjobs,verbs=get;list;watch +// +kubebuilder:rbac:groups="",resources="configmaps",verbs={get,list,watch} +// +kubebuilder:rbac:groups="",resources="endpoints",verbs={get,list,watch} +// +kubebuilder:rbac:groups="",resources="persistentvolumeclaims",verbs={get,list,watch} +// +kubebuilder:rbac:groups="",resources="secrets",verbs={get,list,watch} +// +kubebuilder:rbac:groups="",resources="services",verbs={get,list,watch} +// +kubebuilder:rbac:groups="",resources="serviceaccounts",verbs={get,list,watch} +// +kubebuilder:rbac:groups="apps",resources="deployments",verbs={get,list,watch} +// +kubebuilder:rbac:groups="apps",resources="statefulsets",verbs={get,list,watch} +// +kubebuilder:rbac:groups="batch",resources="jobs",verbs={get,list,watch} +// +kubebuilder:rbac:groups="rbac.authorization.k8s.io",resources="roles",verbs={get,list,watch} +// +kubebuilder:rbac:groups="rbac.authorization.k8s.io",resources="rolebindings",verbs={get,list,watch} +// +kubebuilder:rbac:groups="batch",resources="cronjobs",verbs={get,list,watch} +// +kubebuilder:rbac:groups="policy",resources="poddisruptionbudgets",verbs={get,list,watch} // SetupWithManager adds the PostgresCluster controller to the provided runtime manager func (r *Reconciler) SetupWithManager(mgr manager.Manager) error { if r.PodExec == nil { var err error - r.PodExec, err = newPodExecutor(mgr.GetConfig()) + r.PodExec, err = runtime.NewPodExecutor(mgr.GetConfig()) + if err != nil { + return err + } + } + + if r.DiscoveryClient == nil { + var err error + r.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(mgr.GetConfig()) if err != nil { return err } @@ -356,9 +482,6 @@ func (r *Reconciler) SetupWithManager(mgr manager.Manager) error { return builder.ControllerManagedBy(mgr). For(&v1beta1.PostgresCluster{}). - WithOptions(controller.Options{ - MaxConcurrentReconciles: workerCount, - }). Owns(&corev1.ConfigMap{}). Owns(&corev1.Endpoints{}). Owns(&corev1.PersistentVolumeClaim{}). @@ -370,9 +493,35 @@ func (r *Reconciler) SetupWithManager(mgr manager.Manager) error { Owns(&batchv1.Job{}). Owns(&rbacv1.Role{}). Owns(&rbacv1.RoleBinding{}). - Owns(&batchv1beta1.CronJob{}). - Watches(&source.Kind{Type: &corev1.Pod{}}, r.watchPods()). - Watches(&source.Kind{Type: &appsv1.StatefulSet{}}, + Owns(&batchv1.CronJob{}). + Owns(&policyv1.PodDisruptionBudget{}). + Watches(&corev1.Pod{}, r.watchPods()). + Watches(&appsv1.StatefulSet{}, r.controllerRefHandlerFuncs()). // watch all StatefulSets Complete(r) } + +// GroupVersionKindExists checks to see whether a given Kind for a given +// GroupVersion exists in the Kubernetes API Server. +func (r *Reconciler) GroupVersionKindExists(groupVersion, kind string) (*bool, error) { + if r.DiscoveryClient == nil { + return initialize.Bool(false), nil + } + + resourceList, err := r.DiscoveryClient.ServerResourcesForGroupVersion(groupVersion) + if err != nil { + if apierrors.IsNotFound(err) { + return initialize.Bool(false), nil + } + + return nil, err + } + + for _, resource := range resourceList.APIResources { + if resource.Kind == kind { + return initialize.Bool(true), nil + } + } + + return initialize.Bool(false), nil +} diff --git a/internal/controller/postgrescluster/controller_ref_manager.go b/internal/controller/postgrescluster/controller_ref_manager.go index 9acc752242..8c4a34189f 100644 --- a/internal/controller/postgrescluster/controller_ref_manager.go +++ b/internal/controller/postgrescluster/controller_ref_manager.go @@ -1,24 +1,13 @@ -package postgrescluster - -/* -Copyright 2021 Crunchy Data Solutions, Inc. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +package postgrescluster import ( "context" - kerr "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/workqueue" @@ -109,7 +98,7 @@ func (r *Reconciler) claimObject(ctx context.Context, postgresCluster *v1beta1.P if err := r.adoptObject(ctx, postgresCluster, obj); err != nil { // If adopt attempt failed because the resource no longer exists, then simply // ignore. Otherwise return the error. - if kerr.IsNotFound(err) { + if apierrors.IsNotFound(err) { return nil } return err @@ -143,7 +132,7 @@ func (r *Reconciler) getPostgresClusterForObject(ctx context.Context, Name: clusterName, Namespace: obj.GetNamespace(), }, postgresCluster); err != nil { - if kerr.IsNotFound(err) { + if apierrors.IsNotFound(err) { return false, nil, nil } return false, nil, err @@ -192,23 +181,21 @@ func (r *Reconciler) releaseObject(ctx context.Context, // StatefulSets within the cluster as needed to manage controller ownership refs. func (r *Reconciler) controllerRefHandlerFuncs() *handler.Funcs { - // var err error - ctx := context.Background() - log := logging.FromContext(ctx) + log := logging.FromContext(context.Background()) errMsg := "managing StatefulSet controller refs" return &handler.Funcs{ - CreateFunc: func(updateEvent event.CreateEvent, workQueue workqueue.RateLimitingInterface) { + CreateFunc: func(ctx context.Context, updateEvent event.CreateEvent, workQueue workqueue.RateLimitingInterface) { if err := r.manageControllerRefs(ctx, updateEvent.Object); err != nil { log.Error(err, errMsg) } }, - UpdateFunc: func(updateEvent event.UpdateEvent, workQueue workqueue.RateLimitingInterface) { + UpdateFunc: func(ctx context.Context, updateEvent event.UpdateEvent, workQueue workqueue.RateLimitingInterface) { if err := r.manageControllerRefs(ctx, updateEvent.ObjectNew); err != nil { log.Error(err, errMsg) } }, - DeleteFunc: func(updateEvent event.DeleteEvent, workQueue workqueue.RateLimitingInterface) { + DeleteFunc: func(ctx context.Context, updateEvent event.DeleteEvent, workQueue workqueue.RateLimitingInterface) { if err := r.manageControllerRefs(ctx, updateEvent.Object); err != nil { log.Error(err, errMsg) } diff --git a/internal/controller/postgrescluster/controller_ref_manager_test.go b/internal/controller/postgrescluster/controller_ref_manager_test.go index 460956fbde..8543fe390d 100644 --- a/internal/controller/postgrescluster/controller_ref_manager_test.go +++ b/internal/controller/postgrescluster/controller_ref_manager_test.go @@ -1,86 +1,42 @@ -// +build envtest +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - import ( + "context" "testing" - "go.opentelemetry.io/otel" - "gotest.tools/v3/assert" - appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/rand" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/manager" "github.com/crunchydata/postgres-operator/internal/naming" - "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" + "github.com/crunchydata/postgres-operator/internal/testing/require" ) func TestManageControllerRefs(t *testing.T) { + _, tClient := setupKubernetes(t) + require.ParallelCapacity(t, 1) - // setup the test environment and ensure a clean teardown - tEnv, tClient, cfg := setupTestEnv(t, ControllerName) - r := &Reconciler{} - ctx, cancel := setupManager(t, cfg, func(mgr manager.Manager) { - r = &Reconciler{ - Client: mgr.GetClient(), - Recorder: mgr.GetEventRecorderFor(ControllerName), - Tracer: otel.Tracer(ControllerName), - Owner: ControllerName, - } - }) - t.Cleanup(func() { - teardownManager(cancel, t) - teardownTestEnv(t, tEnv) - }) - + ctx := context.Background() + r := &Reconciler{Client: tClient} clusterName := "hippo" - namespace := "postgres-operator-test-" + rand.String(6) - - ns := &corev1.Namespace{} - ns.Name = namespace - assert.NilError(t, tClient.Create(ctx, ns)) - t.Cleanup(func() { assert.Check(t, tClient.Delete(ctx, ns)) }) - // create a PostgresCluster to test with - postgresCluster := &v1beta1.PostgresCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: clusterName, - Namespace: namespace, - }, - Spec: v1beta1.PostgresClusterSpec{ - PostgresVersion: 12, - InstanceSets: []v1beta1.PostgresInstanceSetSpec{{Name: "instance1"}}, - }, - } + cluster := testCluster() + cluster.Namespace = setupNamespace(t, tClient).Name // create the test PostgresCluster - if err := tClient.Create(ctx, postgresCluster); err != nil { + if err := tClient.Create(ctx, cluster); err != nil { t.Fatal(err) } // create a base StatefulSet that can be used by the various tests below objBase := &appsv1.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, + Namespace: cluster.Namespace, }, Spec: appsv1.StatefulSetSpec{ Selector: &metav1.LabelSelector{ @@ -97,7 +53,7 @@ func TestManageControllerRefs(t *testing.T) { t.Run("adopt Object", func(t *testing.T) { obj := objBase.DeepCopy() - obj.Name = "adpot" + obj.Name = "adopt" obj.Labels = map[string]string{naming.LabelCluster: clusterName} if err := r.Client.Create(ctx, obj); err != nil { @@ -115,7 +71,7 @@ func TestManageControllerRefs(t *testing.T) { var foundControllerOwnerRef bool for _, ref := range obj.GetOwnerReferences() { if *ref.Controller && *ref.BlockOwnerDeletion && - ref.UID == postgresCluster.GetUID() && + ref.UID == cluster.GetUID() && ref.Name == clusterName && ref.Kind == "PostgresCluster" { foundControllerOwnerRef = true break @@ -136,7 +92,7 @@ func TestManageControllerRefs(t *testing.T) { APIVersion: "group/version", Kind: "PostgresCluster", Name: clusterName, - UID: postgresCluster.GetUID(), + UID: cluster.GetUID(), Controller: &isTrue, BlockOwnerDeletion: &isTrue, }) @@ -185,7 +141,7 @@ func TestManageControllerRefs(t *testing.T) { obj := objBase.DeepCopy() obj.Name = "ignore-no-postgrescluster" - obj.Labels = map[string]string{naming.LabelCluster: "noexist"} + obj.Labels = map[string]string{naming.LabelCluster: "nonexistent"} if err := r.Client.Create(ctx, obj); err != nil { t.Error(err) diff --git a/internal/controller/postgrescluster/controller_test.go b/internal/controller/postgrescluster/controller_test.go index e234aad0bc..e6fdc5cb86 100644 --- a/internal/controller/postgrescluster/controller_test.go +++ b/internal/controller/postgrescluster/controller_test.go @@ -1,19 +1,6 @@ -// +build envtest - -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster @@ -23,57 +10,46 @@ import ( "strings" "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" . "github.com/onsi/gomega/gstruct" "github.com/pkg/errors" + "go.opentelemetry.io/otel" "gotest.tools/v3/assert" appsv1 "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/apimachinery/pkg/util/version" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/yaml" "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/internal/registration" + "github.com/crunchydata/postgres-operator/internal/testing/require" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) func TestDeleteControlled(t *testing.T) { ctx := context.Background() - tEnv, cc, _ := setupTestEnv(t, t.Name()) - t.Cleanup(func() { teardownTestEnv(t, tEnv) }) + _, cc := setupKubernetes(t) + require.ParallelCapacity(t, 1) + ns := setupNamespace(t, cc) reconciler := Reconciler{Client: cc} - ns := &v1.Namespace{} - ns.GenerateName = "postgres-operator-test-" - ns.Labels = map[string]string{"postgres-operator-test": t.Name()} - assert.NilError(t, cc.Create(ctx, ns)) - t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, ns)) }) - - cluster := &v1beta1.PostgresCluster{} - assert.NilError(t, yaml.Unmarshal([]byte(`{ - spec: { - postgresVersion: 13, - instances: [{ - name: instance, - }], - }, - }`), cluster)) - + cluster := testCluster() cluster.Namespace = ns.Name cluster.Name = strings.ToLower(t.Name()) - cluster.Spec.Image = CrunchyPostgresHAImage - assert.NilError(t, cc.Create(ctx, cluster)) t.Run("NoOwnership", func(t *testing.T) { - secret := &v1.Secret{} + secret := &corev1.Secret{} secret.Namespace = ns.Name secret.Name = "solo" @@ -85,7 +61,7 @@ func TestDeleteControlled(t *testing.T) { }) t.Run("Owned", func(t *testing.T) { - secret := &v1.Secret{} + secret := &corev1.Secret{} secret.Namespace = ns.Name secret.Name = "owned" @@ -98,7 +74,7 @@ func TestDeleteControlled(t *testing.T) { }) t.Run("Controlled", func(t *testing.T) { - secret := &v1.Secret{} + secret := &corev1.Secret{} secret.Namespace = ns.Name secret.Name = "controlled" @@ -113,9 +89,37 @@ func TestDeleteControlled(t *testing.T) { }) } +var olmClusterYAML = ` +metadata: + name: olm +spec: + postgresVersion: 13 + image: postgres + instances: + - name: register-now + dataVolumeClaimSpec: + accessModes: + - "ReadWriteMany" + resources: + requests: + storage: 1Gi + backups: + pgbackrest: + image: pgbackrest + repos: + - name: repo1 + volume: + volumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi +` + var _ = Describe("PostgresCluster Reconciler", func() { var test struct { - Namespace *v1.Namespace + Namespace *corev1.Namespace Reconciler Reconciler Recorder *record.FakeRecorder } @@ -123,7 +127,7 @@ var _ = Describe("PostgresCluster Reconciler", func() { BeforeEach(func() { ctx := context.Background() - test.Namespace = &v1.Namespace{} + test.Namespace = &corev1.Namespace{} test.Namespace.Name = "postgres-operator-test-" + rand.String(6) Expect(suite.Client.Create(ctx, test.Namespace)).To(Succeed()) @@ -133,6 +137,7 @@ var _ = Describe("PostgresCluster Reconciler", func() { test.Reconciler.Client = suite.Client test.Reconciler.Owner = "asdf" test.Reconciler.Recorder = test.Recorder + test.Reconciler.Registration = nil test.Reconciler.Tracer = otel.Tracer("asdf") }) @@ -173,6 +178,49 @@ var _ = Describe("PostgresCluster Reconciler", func() { return result } + Context("Cluster with Registration Requirement, no token", func() { + var cluster *v1beta1.PostgresCluster + + BeforeEach(func() { + test.Reconciler.Registration = registration.RegistrationFunc( + func(record.EventRecorder, client.Object, *[]metav1.Condition) bool { + return true + }) + + cluster = create(olmClusterYAML) + Expect(reconcile(cluster)).To(BeZero()) + }) + + AfterEach(func() { + ctx := context.Background() + + if cluster != nil { + Expect(client.IgnoreNotFound( + suite.Client.Delete(ctx, cluster), + )).To(Succeed()) + + // Remove finalizers, if any, so the namespace can terminate. + Expect(client.IgnoreNotFound( + suite.Client.Patch(ctx, cluster, client.RawPatch( + client.Merge.Type(), []byte(`{"metadata":{"finalizers":[]}}`))), + )).To(Succeed()) + } + }) + + Specify("Cluster RegistrationRequired Status", func() { + existing := &v1beta1.PostgresCluster{} + Expect(suite.Client.Get( + context.Background(), client.ObjectKeyFromObject(cluster), existing, + )).To(Succeed()) + + Expect(meta.IsStatusConditionFalse(existing.Status.Conditions, v1beta1.Registered)).To(BeTrue()) + + event, ok := <-test.Recorder.Events + Expect(ok).To(BeTrue()) + Expect(event).To(ContainSubstring("Register Soon")) + }) + }) + Context("Cluster", func() { var cluster *v1beta1.PostgresCluster @@ -182,6 +230,7 @@ metadata: name: carlos spec: postgresVersion: 13 + image: postgres instances: - name: samba dataVolumeClaimSpec: @@ -190,6 +239,18 @@ spec: resources: requests: storage: 1Gi + backups: + pgbackrest: + image: pgbackrest + repos: + - name: repo1 + volume: + volumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi `) Expect(reconcile(cluster)).To(BeZero()) }) @@ -211,7 +272,7 @@ spec: }) Specify("Cluster ConfigMap", func() { - ccm := &v1.ConfigMap{} + ccm := &corev1.ConfigMap{} Expect(suite.Client.Get(context.Background(), client.ObjectKey{ Namespace: test.Namespace.Name, Name: "carlos-config", }, ccm)).To(Succeed()) @@ -235,7 +296,7 @@ spec: }) Specify("Cluster Pod Service", func() { - cps := &v1.Service{} + cps := &corev1.Service{} Expect(suite.Client.Get(context.Background(), client.ObjectKey{ Namespace: test.Namespace.Name, Name: "carlos-pods", }, cps)).To(Succeed()) @@ -282,26 +343,60 @@ spec: // // NOTE(cbandy): Kubernetes prior to v1.16.10 and v1.17.6 does not track // managed fields on the status subresource: https://issue.k8s.io/88901 - Expect(existing.ManagedFields).To(ContainElement( - MatchFields(IgnoreExtras, Fields{ - "Manager": Equal(string(test.Reconciler.Owner)), - "FieldsV1": PointTo(MatchAllFields(Fields{ - "Raw": WithTransform(func(in []byte) (out map[string]interface{}) { - Expect(yaml.Unmarshal(in, &out)).To(Succeed()) - return out - }, MatchAllKeys(Keys{ - "f:metadata": MatchAllKeys(Keys{ - "f:finalizers": Not(BeZero()), - }), - "f:status": Not(BeZero()), + switch { + case suite.ServerVersion.LessThan(version.MustParseGeneric("1.22")): + + // Kubernetes 1.22 began tracking subresources in managed fields. + // - https://pr.k8s.io/100970 + Expect(existing.ManagedFields).To(ContainElement( + MatchFields(IgnoreExtras, Fields{ + "Manager": Equal(string(test.Reconciler.Owner)), + "FieldsV1": PointTo(MatchAllFields(Fields{ + "Raw": WithTransform(func(in []byte) (out map[string]interface{}) { + Expect(yaml.Unmarshal(in, &out)).To(Succeed()) + return out + }, MatchAllKeys(Keys{ + "f:metadata": MatchAllKeys(Keys{ + "f:finalizers": Not(BeZero()), + }), + "f:status": Not(BeZero()), + })), })), - })), - }), - ), `controller should manage only the "status" field`) + }), + ), `controller should manage only "finalizers" and "status"`) + + default: + Expect(existing.ManagedFields).To(ContainElements( + MatchFields(IgnoreExtras, Fields{ + "Manager": Equal(string(test.Reconciler.Owner)), + "FieldsV1": PointTo(MatchAllFields(Fields{ + "Raw": WithTransform(func(in []byte) (out map[string]interface{}) { + Expect(yaml.Unmarshal(in, &out)).To(Succeed()) + return out + }, MatchAllKeys(Keys{ + "f:metadata": MatchAllKeys(Keys{ + "f:finalizers": Not(BeZero()), + }), + })), + })), + }), + MatchFields(IgnoreExtras, Fields{ + "Manager": Equal(string(test.Reconciler.Owner)), + "FieldsV1": PointTo(MatchAllFields(Fields{ + "Raw": WithTransform(func(in []byte) (out map[string]interface{}) { + Expect(yaml.Unmarshal(in, &out)).To(Succeed()) + return out + }, MatchAllKeys(Keys{ + "f:status": Not(BeZero()), + })), + })), + }), + ), `controller should manage only "finalizers" and "status"`) + } }) Specify("Patroni Distributed Configuration", func() { - ds := &v1.Service{} + ds := &corev1.Service{} Expect(suite.Client.Get(context.Background(), client.ObjectKey{ Namespace: test.Namespace.Name, Name: "carlos-ha-config", }, ds)).To(Succeed()) @@ -340,6 +435,7 @@ metadata: name: carlos spec: postgresVersion: 13 + image: postgres instances: - name: samba dataVolumeClaimSpec: @@ -348,6 +444,18 @@ spec: resources: requests: storage: 1Gi + backups: + pgbackrest: + image: pgbackrest + repos: + - name: repo1 + volume: + volumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi `) Expect(reconcile(cluster)).To(BeZero()) @@ -380,7 +488,7 @@ spec: }) Specify("Instance ConfigMap", func() { - icm := &v1.ConfigMap{} + icm := &corev1.ConfigMap{} Expect(suite.Client.Get(context.Background(), client.ObjectKey{ Namespace: test.Namespace.Name, Name: instance.Name + "-config", }, icm)).To(Succeed()) @@ -435,7 +543,7 @@ spec: It("resets Instance StatefulSet.Spec.Replicas", func() { ctx := context.Background() - patch := client.MergeFrom(instance.DeepCopyObject()) + patch := client.MergeFrom(instance.DeepCopy()) *instance.Spec.Replicas = 2 Expect(suite.Client.Patch(ctx, &instance, patch)).To(Succeed()) diff --git a/internal/controller/postgrescluster/delete.go b/internal/controller/postgrescluster/delete.go index 0a2b0617ed..63fc007f40 100644 --- a/internal/controller/postgrescluster/delete.go +++ b/internal/controller/postgrescluster/delete.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster @@ -27,7 +16,7 @@ import ( "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) -// +kubebuilder:rbac:groups=postgres-operator.crunchydata.com,resources=postgresclusters,verbs=patch +// +kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="postgresclusters",verbs={patch} // handleDelete sets a finalizer on cluster and performs the finalization of // cluster when it is being deleted. It returns (nil, nil) when cluster is diff --git a/internal/controller/postgrescluster/delete_test.go b/internal/controller/postgrescluster/delete_test.go deleted file mode 100644 index e119f34a00..0000000000 --- a/internal/controller/postgrescluster/delete_test.go +++ /dev/null @@ -1,465 +0,0 @@ -// +build envtest - -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package postgrescluster - -import ( - "context" - "io" - "os" - "path/filepath" - "strings" - "testing" - "time" - - "go.opentelemetry.io/otel" - "gotest.tools/v3/assert" - appsv1 "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/tools/record" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/envtest" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/yaml" - - "github.com/crunchydata/postgres-operator/internal/patroni" - "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" -) - -func TestReconcilerHandleDelete(t *testing.T) { - if !strings.EqualFold(os.Getenv("USE_EXISTING_CLUSTER"), "true") { - t.Skip("requires a running garbage collection controller") - } - // TODO: Update tests that include envtest package to better handle - // running in parallel - // t.Parallel() - - ctx := context.Background() - env := &envtest.Environment{ - CRDDirectoryPaths: []string{ - filepath.Join("..", "..", "..", "config", "crd", "bases"), - }, - } - - options := client.Options{} - options.Scheme = runtime.NewScheme() - assert.NilError(t, scheme.AddToScheme(options.Scheme)) - assert.NilError(t, v1beta1.AddToScheme(options.Scheme)) - - config, err := env.Start() - assert.NilError(t, err) - t.Cleanup(func() { assert.Check(t, env.Stop()) }) - - cc, err := client.New(config, options) - assert.NilError(t, err) - - ns := &v1.Namespace{} - ns.GenerateName = "postgres-operator-test-" - ns.Labels = labels.Set{"postgres-operator-test": t.Name()} - assert.NilError(t, cc.Create(ctx, ns)) - t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, ns)) }) - - reconciler := Reconciler{ - Client: cc, - Owner: client.FieldOwner(t.Name()), - Recorder: new(record.FakeRecorder), - Tracer: otel.Tracer(t.Name()), - } - - reconciler.PodExec, err = newPodExecutor(config) - assert.NilError(t, err) - - mustReconcile := func(t *testing.T, cluster *v1beta1.PostgresCluster) reconcile.Result { - t.Helper() - key := client.ObjectKeyFromObject(cluster) - request := reconcile.Request{NamespacedName: key} - result, err := reconciler.Reconcile(ctx, request) - assert.NilError(t, err, "%+v", err) - return result - } - - for _, test := range []struct { - name string - beforeCreate func(*testing.T, *v1beta1.PostgresCluster) - beforeDelete func(*testing.T, *v1beta1.PostgresCluster) - propagation metav1.DeletionPropagation - - waitForRunningInstances int32 - }{ - // Normal delete of a healthly cluster. - { - name: "Background", propagation: metav1.DeletePropagationBackground, - waitForRunningInstances: 2, - }, - // TODO(cbandy): metav1.DeletePropagationForeground - - // Normal delete of a healthy cluster after a failover. - { - name: "AfterFailover", propagation: metav1.DeletePropagationBackground, - waitForRunningInstances: 2, - - beforeDelete: func(t *testing.T, cluster *v1beta1.PostgresCluster) { - list := v1.PodList{} - selector, err := labels.Parse(strings.Join([]string{ - "postgres-operator.crunchydata.com/cluster=" + cluster.Name, - "postgres-operator.crunchydata.com/instance", - }, ",")) - assert.NilError(t, err) - assert.NilError(t, cc.List(ctx, &list, - client.InNamespace(cluster.Namespace), - client.MatchingLabelsSelector{Selector: selector})) - - var primary *v1.Pod - var replica *v1.Pod - for i := range list.Items { - if list.Items[i].Labels["postgres-operator.crunchydata.com/role"] == "replica" { - replica = &list.Items[i] - } else { - primary = &list.Items[i] - } - } - - if true && - assert.Check(t, primary != nil, "expected to find a primary in %+v", list.Items) && - assert.Check(t, replica != nil, "expected to find a replica in %+v", list.Items) { - success, err := patroni.Executor( - func(_ context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string) error { - return reconciler.PodExec(replica.Namespace, replica.Name, "database", stdin, stdout, stderr, command...) - }, - ).ChangePrimaryAndWait(ctx, primary.Name, replica.Name) - - assert.NilError(t, err) - assert.Assert(t, success) - } - }, - }, - - // Normal delete of cluster that could never run PostgreSQL. - { - name: "NeverRunning", propagation: metav1.DeletePropagationBackground, - waitForRunningInstances: 0, - - beforeCreate: func(_ *testing.T, cluster *v1beta1.PostgresCluster) { - cluster.Spec.Image = "example.com/does-not-exist" - }, - }, - } { - t.Run(test.name, func(t *testing.T) { - cluster := &v1beta1.PostgresCluster{} - assert.NilError(t, yaml.Unmarshal([]byte(`{ - spec: { - postgresVersion: 13, - instances: [ - { - replicas: 2, - dataVolumeClaimSpec: { - accessModes: [ReadWriteOnce], - resources: { requests: { storage: 1Gi } }, - }, - }, - ], - }, - }`), cluster)) - - cluster.Namespace = ns.Name - cluster.Name = strings.ToLower(test.name) - cluster.Spec.Image = CrunchyPostgresHAImage - - if test.beforeCreate != nil { - test.beforeCreate(t, cluster) - } - - assert.NilError(t, cc.Create(ctx, cluster)) - - t.Cleanup(func() { - // Remove finalizers, if any, so the namespace can terminate. - assert.Check(t, client.IgnoreNotFound( - cc.Patch(ctx, cluster, client.RawPatch( - client.Merge.Type(), []byte(`{"metadata":{"finalizers":[]}}`))))) - }) - - // Start cluster. - mustReconcile(t, cluster) - - assert.NilError(t, - cc.Get(ctx, client.ObjectKeyFromObject(cluster), cluster)) - assert.Assert(t, - sets.NewString(cluster.Finalizers...). - Has("postgres-operator.crunchydata.com/finalizer"), - "cluster should immediately have a finalizer") - - // Continue until instances are healthy. - var instances []appsv1.StatefulSet - assert.NilError(t, wait.Poll(time.Second, Scale(time.Minute), func() (bool, error) { - mustReconcile(t, cluster) - - list := appsv1.StatefulSetList{} - selector, err := labels.Parse(strings.Join([]string{ - "postgres-operator.crunchydata.com/cluster=" + cluster.Name, - "postgres-operator.crunchydata.com/instance", - }, ",")) - assert.NilError(t, err) - assert.NilError(t, cc.List(ctx, &list, - client.InNamespace(cluster.Namespace), - client.MatchingLabelsSelector{Selector: selector})) - - instances = list.Items - - ready := int32(0) - for i := range instances { - ready += instances[i].Status.ReadyReplicas - } - return ready >= test.waitForRunningInstances, nil - }), "expected %v instances to be ready, got:\n%+v", test.waitForRunningInstances, instances) - - if test.beforeDelete != nil { - test.beforeDelete(t, cluster) - } - - switch test.propagation { - case metav1.DeletePropagationBackground: - // Background deletion is the default for custom resources. - // - https://issue.k8s.io/81628 - assert.NilError(t, cc.Delete(ctx, cluster)) - default: - assert.NilError(t, cc.Delete(ctx, cluster, - client.PropagationPolicy(test.propagation))) - } - - // Stop cluster. - result := mustReconcile(t, cluster) - - // If things started running, then they should stop in a certain order. - if test.waitForRunningInstances > 0 { - - // Replicas should stop first, leaving just the one primary. - var instances []v1.Pod - assert.NilError(t, wait.Poll(time.Second, Scale(time.Minute), func() (bool, error) { - if result.Requeue { - result = mustReconcile(t, cluster) - } - - list := v1.PodList{} - selector, err := labels.Parse(strings.Join([]string{ - "postgres-operator.crunchydata.com/cluster=" + cluster.Name, - "postgres-operator.crunchydata.com/instance", - }, ",")) - assert.NilError(t, err) - assert.NilError(t, cc.List(ctx, &list, - client.InNamespace(cluster.Namespace), - client.MatchingLabelsSelector{Selector: selector})) - - instances = list.Items - - // Patroni doesn't use "primary" to identify the primary. - return len(instances) == 1 && - instances[0].Labels["postgres-operator.crunchydata.com/role"] == "master", nil - }), "expected one instance, got:\n%+v", instances) - - // Patroni DCS objects should not be deleted yet. - { - list := v1.EndpointsList{} - selector, err := labels.Parse(strings.Join([]string{ - "postgres-operator.crunchydata.com/cluster=" + cluster.Name, - "postgres-operator.crunchydata.com/patroni", - }, ",")) - assert.NilError(t, err) - assert.NilError(t, cc.List(ctx, &list, - client.InNamespace(cluster.Namespace), - client.MatchingLabelsSelector{Selector: selector})) - - assert.Assert(t, len(list.Items) >= 2, // config + leader - "expected Patroni DCS objects to remain, there are %v", - len(list.Items)) - - // Endpoints are deleted differently than other resources, and - // Patroni might have recreated them to stay alive. Check that - // they are all from before the cluster delete operation. - // - https://issue.k8s.io/99407 - assert.NilError(t, - cc.Get(ctx, client.ObjectKeyFromObject(cluster), cluster)) - - for _, endpoints := range list.Items { - assert.Assert(t, - endpoints.CreationTimestamp.Time.Before(cluster.DeletionTimestamp.Time), - `expected %q to be after %+v`, cluster.DeletionTimestamp, endpoints) - } - } - } - - // Continue until cluster is gone. - assert.NilError(t, wait.Poll(time.Second, Scale(time.Minute), func() (bool, error) { - mustReconcile(t, cluster) - - err := cc.Get(ctx, client.ObjectKeyFromObject(cluster), cluster) - return apierrors.IsNotFound(err), client.IgnoreNotFound(err) - }), "expected cluster to be deleted, got:\n%+v", *cluster) - - var endpoints []v1.Endpoints - assert.NilError(t, wait.Poll(time.Second, Scale(time.Minute/3), func() (bool, error) { - list := v1.EndpointsList{} - selector, err := labels.Parse(strings.Join([]string{ - "postgres-operator.crunchydata.com/cluster=" + cluster.Name, - "postgres-operator.crunchydata.com/patroni", - }, ",")) - assert.NilError(t, err) - assert.NilError(t, cc.List(ctx, &list, - client.InNamespace(cluster.Namespace), - client.MatchingLabelsSelector{Selector: selector})) - - endpoints = list.Items - - return len(endpoints) == 0, nil - }), "Patroni DCS objects should be gone, got:\n%+v", endpoints) - }) - } -} - -func TestReconcilerHandleDeleteNamespace(t *testing.T) { - if !strings.EqualFold(os.Getenv("USE_EXISTING_CLUSTER"), "true") { - t.Skip("requires a running garbage collection controller") - } - - // TODO: Update tests that include envtest package to better handle - // running in parallel - // t.Parallel() - - ctx := context.Background() - env := &envtest.Environment{ - CRDDirectoryPaths: []string{ - filepath.Join("..", "..", "..", "config", "crd", "bases"), - }, - } - - options := client.Options{} - options.Scheme = runtime.NewScheme() - assert.NilError(t, scheme.AddToScheme(options.Scheme)) - assert.NilError(t, v1beta1.AddToScheme(options.Scheme)) - - config, err := env.Start() - assert.NilError(t, err) - t.Cleanup(func() { assert.Check(t, env.Stop()) }) - - cc, err := client.New(config, options) - assert.NilError(t, err) - - ns := &v1.Namespace{} - ns.GenerateName = "postgres-operator-test-" - ns.Labels = labels.Set{"postgres-operator-test": t.Name()} - assert.NilError(t, cc.Create(ctx, ns)) - t.Cleanup(func() { assert.Check(t, client.IgnoreNotFound(cc.Delete(ctx, ns))) }) - - var mm struct { - manager.Manager - Context context.Context - Error chan error - Stop context.CancelFunc - } - - mm.Context, mm.Stop = context.WithCancel(context.Background()) - mm.Error = make(chan error, 1) - mm.Manager, err = manager.New(config, manager.Options{ - Namespace: ns.Name, - Scheme: options.Scheme, - - HealthProbeBindAddress: "0", // disable - MetricsBindAddress: "0", // disable - }) - assert.NilError(t, err) - - reconciler := Reconciler{ - Client: mm.GetClient(), - Owner: client.FieldOwner(t.Name()), - Recorder: new(record.FakeRecorder), - Tracer: otel.Tracer(t.Name()), - } - assert.NilError(t, reconciler.SetupWithManager(mm.Manager)) - - go func() { mm.Error <- mm.Start(mm.Context) }() - t.Cleanup(func() { mm.Stop(); assert.Check(t, <-mm.Error) }) - - cluster := &v1beta1.PostgresCluster{} - assert.NilError(t, yaml.Unmarshal([]byte(`{ - spec: { - postgresVersion: 13, - instances: [ - { - replicas: 2, - dataVolumeClaimSpec: { - accessModes: [ReadWriteOnce], - resources: { requests: { storage: 1Gi } }, - }, - }, - ], - }, - }`), cluster)) - - cluster.Namespace = ns.Name - cluster.Name = strings.ToLower("DeleteNamespace") - cluster.Spec.Image = CrunchyPostgresHAImage - - assert.NilError(t, cc.Create(ctx, cluster)) - - t.Cleanup(func() { - // Remove finalizers, if any, so the namespace can terminate. - assert.Check(t, client.IgnoreNotFound( - cc.Patch(ctx, cluster, client.RawPatch( - client.Merge.Type(), []byte(`{"metadata":{"finalizers":[]}}`))))) - }) - - var instances []appsv1.StatefulSet - assert.NilError(t, wait.Poll(time.Second, Scale(time.Minute), func() (bool, error) { - list := appsv1.StatefulSetList{} - selector, err := labels.Parse(strings.Join([]string{ - "postgres-operator.crunchydata.com/cluster=" + cluster.Name, - "postgres-operator.crunchydata.com/instance", - }, ",")) - assert.NilError(t, err) - assert.NilError(t, cc.List(ctx, &list, - client.InNamespace(cluster.Namespace), - client.MatchingLabelsSelector{Selector: selector})) - - instances = list.Items - - ready := 0 - for i := range instances { - ready += int(instances[i].Status.ReadyReplicas) - } - return ready >= 2, nil - }), "expected instances to be ready, got:\n%+v", instances) - - // Delete the namespace. - assert.NilError(t, cc.Delete(ctx, ns)) - - assert.NilError(t, wait.PollImmediate(time.Second, Scale(time.Minute), func() (bool, error) { - err := cc.Get(ctx, client.ObjectKeyFromObject(cluster), cluster) - return apierrors.IsNotFound(err), client.IgnoreNotFound(err) - }), "expected cluster to be deleted, got:\n%+v", *cluster) - - assert.NilError(t, wait.PollImmediate(time.Second, Scale(time.Minute/4), func() (bool, error) { - err := cc.Get(ctx, client.ObjectKeyFromObject(ns), &v1.Namespace{}) - return apierrors.IsNotFound(err), client.IgnoreNotFound(err) - }), "expected namespace to be deleted") -} diff --git a/internal/controller/postgrescluster/helpers_test.go b/internal/controller/postgrescluster/helpers_test.go index 64bfc2c5b4..0536b466d4 100644 --- a/internal/controller/postgrescluster/helpers_test.go +++ b/internal/controller/postgrescluster/helpers_test.go @@ -1,51 +1,38 @@ -// +build envtest - -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster import ( "context" "os" - "path/filepath" "strconv" - "strings" "testing" "time" - "gotest.tools/v3/assert/cmp" - v1 "k8s.io/api/core/v1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/envtest" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/yaml" "github.com/crunchydata/postgres-operator/internal/controller/runtime" + "github.com/crunchydata/postgres-operator/internal/initialize" + "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/internal/testing/require" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) var ( //TODO(tjmoore4): With the new RELATED_IMAGES defaulting behavior, tests could be refactored // to reference those environment variables instead of hard coded image values - CrunchyPostgresHAImage = "registry.developers.crunchydata.com/crunchydata/crunchy-postgres-ha:centos8-13.3-4.7.0" - CrunchyPGBackRestImage = "registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:centos8-13.3-4.7.0" - CrunchyPGBouncerImage = "registry.developers.crunchydata.com/crunchydata/crunchy-pgbouncer:centos8-13.3-4.7.0" + CrunchyPostgresHAImage = "registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-13.6-1" + CrunchyPGBackRestImage = "registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi8-2.38-0" + CrunchyPGBouncerImage = "registry.developers.crunchydata.com/crunchydata/crunchy-pgbouncer:ubi8-1.16-2" ) // Scale extends d according to PGO_TEST_TIMEOUT_SCALE. @@ -66,26 +53,55 @@ func init() { } } -// marshalMatches converts actual to YAML and compares that to expected. -func marshalMatches(actual interface{}, expected string) cmp.Comparison { - b, err := yaml.Marshal(actual) - if err != nil { - return func() cmp.Result { return cmp.ResultFromError(err) } - } - return cmp.DeepEqual(string(b), strings.Trim(expected, "\t\n")+"\n") +// setupKubernetes starts or connects to a Kubernetes API and returns a client +// that uses it. See [require.Kubernetes] for more details. +func setupKubernetes(t testing.TB) (*rest.Config, client.Client) { + t.Helper() + + // Start and/or connect to a Kubernetes API, or Skip when that's not configured. + cfg, cc := require.Kubernetes2(t) + + // Log the status of any test namespaces after this test fails. + t.Cleanup(func() { + if t.Failed() { + var namespaces corev1.NamespaceList + _ = cc.List(context.Background(), &namespaces, client.HasLabels{"postgres-operator-test"}) + + type shaped map[string]corev1.NamespaceStatus + result := make([]shaped, len(namespaces.Items)) + + for i, ns := range namespaces.Items { + result[i] = shaped{ns.Labels["postgres-operator-test"]: ns.Status} + } + + formatted, _ := yaml.Marshal(result) + t.Logf("Test Namespaces:\n%s", formatted) + } + }) + + return cfg, cc } -func testVolumeClaimSpec() v1.PersistentVolumeClaimSpec { +// setupNamespace creates a random namespace that will be deleted by t.Cleanup. +// +// Deprecated: Use [require.Namespace] instead. +func setupNamespace(t testing.TB, cc client.Client) *corev1.Namespace { + t.Helper() + return require.Namespace(t, cc) +} + +func testVolumeClaimSpec() corev1.PersistentVolumeClaimSpec { // Defines a volume claim spec that can be used to create instances - return v1.PersistentVolumeClaimSpec{ - AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, - Resources: v1.ResourceRequirements{ - Requests: map[v1.ResourceName]resource.Quantity{ - v1.ResourceStorage: resource.MustParse("1Gi"), + return corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + Resources: corev1.VolumeResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceStorage: resource.MustParse("1Gi"), }, }, } } + func testCluster() *v1beta1.PostgresCluster { // Defines a base cluster spec that can be used by tests to generate a // cluster with an expected number of instances @@ -96,12 +112,12 @@ func testCluster() *v1beta1.PostgresCluster { Spec: v1beta1.PostgresClusterSpec{ PostgresVersion: 13, Image: CrunchyPostgresHAImage, - ImagePullSecrets: []v1.LocalObjectReference{{ + ImagePullSecrets: []corev1.LocalObjectReference{{ Name: "myImagePullSecret"}, }, InstanceSets: []v1beta1.PostgresInstanceSetSpec{{ Name: "instance1", - Replicas: Int32(1), + Replicas: initialize.Int32(1), DataVolumeClaimSpec: testVolumeClaimSpec(), }}, Backups: v1beta1.Backups{ @@ -125,44 +141,75 @@ func testCluster() *v1beta1.PostgresCluster { return cluster.DeepCopy() } -// setupTestEnv configures and starts an EnvTest instance of etcd and the Kubernetes API server -// for test usage, as well as creates a new client instance. -func setupTestEnv(t *testing.T, - _ string) (*envtest.Environment, client.Client, *rest.Config) { - - testEnv := &envtest.Environment{ - CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "config", "crd", "bases")}, - } - cfg, err := testEnv.Start() - if err != nil { - t.Fatal(err) +func testBackupJob(cluster *v1beta1.PostgresCluster) *batchv1.Job { + job := batchv1.Job{ + TypeMeta: metav1.TypeMeta{ + APIVersion: batchv1.SchemeGroupVersion.String(), + Kind: "Job", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "backup-job-1", + Namespace: cluster.Namespace, + Labels: map[string]string{ + naming.LabelCluster: cluster.Name, + naming.LabelPGBackRestBackup: "", + naming.LabelPGBackRestRepo: "repo1", + }, + }, + Spec: batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{Name: "test", Image: "test"}}, + RestartPolicy: corev1.RestartPolicyNever, + }, + }, + }, } - t.Log("Test environment started") - pgoScheme, err := runtime.CreatePostgresOperatorScheme() - if err != nil { - t.Fatal(err) - } - client, err := client.New(cfg, client.Options{Scheme: pgoScheme}) - if err != nil { - t.Fatal(err) + return job.DeepCopy() +} + +func testRestoreJob(cluster *v1beta1.PostgresCluster) *batchv1.Job { + job := batchv1.Job{ + TypeMeta: metav1.TypeMeta{ + APIVersion: batchv1.SchemeGroupVersion.String(), + Kind: "Job", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "restore-job-1", + Namespace: cluster.Namespace, + Labels: naming.PGBackRestRestoreJobLabels(cluster.Name), + }, + Spec: batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{Name: "test", Image: "test"}}, + RestartPolicy: corev1.RestartPolicyNever, + }, + }, + }, } - return testEnv, client, cfg + return job.DeepCopy() } // setupManager creates the runtime manager used during controller testing func setupManager(t *testing.T, cfg *rest.Config, - contollerSetup func(mgr manager.Manager)) (context.Context, context.CancelFunc) { + controllerSetup func(mgr manager.Manager)) (context.Context, context.CancelFunc) { + ctx, cancel := context.WithCancel(context.Background()) + + // Disable health endpoints + options := runtime.Options{} + options.HealthProbeBindAddress = "0" + options.Metrics.BindAddress = "0" - mgr, err := runtime.CreateRuntimeManager("", cfg, true) + mgr, err := runtime.NewManager(cfg, options) if err != nil { t.Fatal(err) } - contollerSetup(mgr) + controllerSetup(mgr) - ctx, cancel := context.WithCancel(context.Background()) go func() { if err := mgr.Start(ctx); err != nil { t.Error(err) @@ -173,16 +220,7 @@ func setupManager(t *testing.T, cfg *rest.Config, return ctx, cancel } -// teardownTestEnv stops the test environment when the tests -// have completed -func teardownTestEnv(t *testing.T, testEnv *envtest.Environment) { - if err := testEnv.Stop(); err != nil { - t.Error(err) - } - t.Log("Test environment stopped") -} - -// teardownManager stops the runtimem manager when the tests +// teardownManager stops the runtime manager when the tests // have completed func teardownManager(cancel context.CancelFunc, t *testing.T) { cancel() diff --git a/internal/controller/postgrescluster/instance.go b/internal/controller/postgrescluster/instance.go index 908ce710d9..66321cc738 100644 --- a/internal/controller/postgrescluster/instance.go +++ b/internal/controller/postgrescluster/instance.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster @@ -24,20 +13,23 @@ import ( "time" "github.com/pkg/errors" - attributes "go.opentelemetry.io/otel/label" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/meta" + policyv1 "k8s.io/api/policy/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/sets" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/crunchydata/postgres-operator/internal/config" + "github.com/crunchydata/postgres-operator/internal/controller/runtime" + "github.com/crunchydata/postgres-operator/internal/feature" "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/logging" "github.com/crunchydata/postgres-operator/internal/naming" @@ -209,7 +201,7 @@ type observedInstances struct { byName map[string]*Instance bySet map[string][]*Instance forCluster []*Instance - setNames sets.String + setNames sets.Set[string] } // newObservedInstances builds an observedInstances from Kubernetes API objects. @@ -221,7 +213,7 @@ func newObservedInstances( observed := observedInstances{ byName: make(map[string]*Instance), bySet: make(map[string][]*Instance), - setNames: make(sets.String), + setNames: make(sets.Set[string]), } sets := make(map[string]*v1beta1.PostgresInstanceSetSpec) @@ -266,17 +258,44 @@ func newObservedInstances( return &observed } -// +kubebuilder:rbac:groups="",resources=pods,verbs=list -// +kubebuilder:rbac:groups=apps,resources=statefulsets,verbs=list +// writablePod looks at observedInstances and finds an instance that matches +// a few conditions. The instance should be non-terminating, running, and +// writable i.e. the instance with the primary. If such an instance exists, it +// is returned along with the instance pod. +func (observed *observedInstances) writablePod(container string) (*corev1.Pod, *Instance) { + if observed == nil { + return nil, nil + } + + for _, instance := range observed.forCluster { + if terminating, known := instance.IsTerminating(); terminating || !known { + continue + } + if writable, known := instance.IsWritable(); !writable || !known { + continue + } + running, known := instance.IsRunning(container) + if running && known && len(instance.Pods) > 0 { + return instance.Pods[0], instance + } + } + + return nil, nil +} + +// +kubebuilder:rbac:groups="",resources="pods",verbs={list} +// +kubebuilder:rbac:groups="apps",resources="statefulsets",verbs={list} // observeInstances populates cluster.Status.InstanceSets with observations and // builds an observedInstances by reading from the Kubernetes API. func (r *Reconciler) observeInstances( ctx context.Context, cluster *v1beta1.PostgresCluster, ) (*observedInstances, error) { - pods := &v1.PodList{} + pods := &corev1.PodList{} runners := &appsv1.StatefulSetList{} + autogrow := feature.Enabled(ctx, feature.AutoGrowVolumes) + selector, err := naming.AsSelector(naming.ClusterInstances(cluster.Name)) if err == nil { err = errors.WithStack( @@ -295,57 +314,126 @@ func (r *Reconciler) observeInstances( observed := newObservedInstances(cluster, runners.Items, pods.Items) + // Save desired volume size values in case the status is removed. + // This may happen in cases where the Pod is restarted, the cluster + // is shutdown, etc. Only save values for instances defined in the spec. + previousDesiredRequests := make(map[string]string) + if autogrow { + for _, statusIS := range cluster.Status.InstanceSets { + if statusIS.DesiredPGDataVolume != nil { + for k, v := range statusIS.DesiredPGDataVolume { + previousDesiredRequests[k] = v + } + } + } + } + // Fill out status sorted by set name. cluster.Status.InstanceSets = cluster.Status.InstanceSets[:0] - for _, name := range observed.setNames.List() { + for _, name := range sets.List(observed.setNames) { status := v1beta1.PostgresInstanceSetStatus{Name: name} + status.DesiredPGDataVolume = make(map[string]string) + for _, instance := range observed.bySet[name] { + status.Replicas += int32(len(instance.Pods)) //nolint:gosec + if ready, known := instance.IsReady(); known && ready { status.ReadyReplicas++ } - if terminating, known := instance.IsTerminating(); known && !terminating { - status.Replicas++ - - if matches, known := instance.PodMatchesPodTemplate(); known && matches { - status.UpdatedReplicas++ + if matches, known := instance.PodMatchesPodTemplate(); known && matches { + status.UpdatedReplicas++ + } + if autogrow { + // Store desired pgData volume size for each instance Pod. + // The 'suggested-pgdata-pvc-size' annotation value is stored in the PostgresCluster + // status so that 1) it is available to the function 'reconcilePostgresDataVolume' + // and 2) so that the value persists after Pod restart and cluster shutdown events. + for _, pod := range instance.Pods { + // don't set an empty status + if pod.Annotations["suggested-pgdata-pvc-size"] != "" { + status.DesiredPGDataVolume[instance.Name] = pod.Annotations["suggested-pgdata-pvc-size"] + } } } } + // If autogrow is enabled, get the desired volume size for each instance. + if autogrow { + for _, instance := range observed.bySet[name] { + status.DesiredPGDataVolume[instance.Name] = r.storeDesiredRequest(ctx, cluster, + name, status.DesiredPGDataVolume[instance.Name], previousDesiredRequests[instance.Name]) + } + } + cluster.Status.InstanceSets = append(cluster.Status.InstanceSets, status) } - // Determine if a restore is in progress. If so, simply return to ensure the startup instance - // remains properly set throughout the duration of the restore. - restoreCondition := meta.FindStatusCondition(cluster.Status.Conditions, - ConditionPGBackRestRestoreProgressing) - restoringInPlace := restoreCondition != nil && - (restoreCondition.Status == metav1.ConditionTrue) - if restoringInPlace { - return observed, err + return observed, err +} + +// storeDesiredRequest saves the appropriate request value to the PostgresCluster +// status. If the value has grown, create an Event. +func (r *Reconciler) storeDesiredRequest( + ctx context.Context, cluster *v1beta1.PostgresCluster, + instanceSetName, desiredRequest, desiredRequestBackup string, +) string { + var current resource.Quantity + var previous resource.Quantity + var err error + log := logging.FromContext(ctx) + + // Parse the desired request from the cluster's status. + if desiredRequest != "" { + current, err = resource.ParseQuantity(desiredRequest) + if err != nil { + log.Error(err, "Unable to parse pgData volume request from status ("+ + desiredRequest+") for "+cluster.Name+"/"+instanceSetName) + // If there was an error parsing the value, treat as unset (equivalent to zero). + desiredRequest = "" + current, _ = resource.ParseQuantity("") + + } } - // Go through the observed instances and check if a primary has been determined. - // If the cluster is being shutdown and this instance is the primary, store - // the instance name as the startup instance. If the primary can be determined - // from the instance and the cluster is not being shutdown, clear any stored - // startup instance values. - for _, instance := range observed.forCluster { - if primary, known := instance.IsPrimary(); primary && known { - if cluster.Spec.Shutdown != nil && *cluster.Spec.Shutdown { - cluster.Status.StartupInstance = instance.Name - } else { - cluster.Status.StartupInstance = "" - cluster.Status.StartupInstanceSet = "" - } + // Parse the desired request from the status backup. + if desiredRequestBackup != "" { + previous, err = resource.ParseQuantity(desiredRequestBackup) + if err != nil { + log.Error(err, "Unable to parse pgData volume request from status backup ("+ + desiredRequestBackup+") for "+cluster.Name+"/"+instanceSetName) + // If there was an error parsing the value, treat as unset (equivalent to zero). + desiredRequestBackup = "" + previous, _ = resource.ParseQuantity("") + } } - return observed, err + // Determine if the limit is set for this instance set. + var limitSet bool + for _, specInstance := range cluster.Spec.InstanceSets { + if specInstance.Name == instanceSetName { + limitSet = !specInstance.DataVolumeClaimSpec.Resources.Limits.Storage().IsZero() + } + } + + if limitSet && current.Value() > previous.Value() { + r.Recorder.Eventf(cluster, corev1.EventTypeNormal, "VolumeAutoGrow", + "pgData volume expansion to %v requested for %s/%s.", + current.String(), cluster.Name, instanceSetName) + } + + // If the desired size was not observed, update with previously stored value. + // This can happen in scenarios where the annotation on the Pod is missing + // such as when the cluster is shutdown or a Pod is in the middle of a restart. + if desiredRequest == "" { + desiredRequest = desiredRequestBackup + } + + return desiredRequest } -// +kubebuilder:rbac:groups="",resources=pods,verbs=list -// +kubebuilder:rbac:groups=apps,resources=statefulsets,verbs=patch +// +kubebuilder:rbac:groups="",resources="pods",verbs={list} +// +kubebuilder:rbac:groups="apps",resources="statefulsets",verbs={patch} // deleteInstances gracefully stops instances of cluster to avoid failovers and // unclean shutdowns of PostgreSQL. It returns (nil, nil) when finished. @@ -353,7 +441,7 @@ func (r *Reconciler) deleteInstances( ctx context.Context, cluster *v1beta1.PostgresCluster, ) (*reconcile.Result, error) { // Find all instance pods to determine which to shutdown and in what order. - pods := &v1.PodList{} + pods := &corev1.PodList{} instances, err := naming.AsSelector(naming.ClusterInstances(cluster.Name)) if err == nil { err = errors.WithStack( @@ -377,7 +465,7 @@ func (r *Reconciler) deleteInstances( result := reconcile.Result{} // stop schedules pod for deletion by scaling its controller to zero. - stop := func(pod *v1.Pod) error { + stop := func(pod *corev1.Pod) error { instance := &unstructured.Unstructured{} instance.SetNamespace(cluster.Namespace) @@ -404,7 +492,7 @@ func (r *Reconciler) deleteInstances( // mistake that something else is deleting objects. Use RequeueAfter to // avoid being rate-limited due to a deluge of delete events. if err != nil { - result.RequeueAfter = 10 * time.Second + result = runtime.RequeueWithoutBackoff(10 * time.Second) } return client.IgnoreNotFound(err) } @@ -437,10 +525,10 @@ func (r *Reconciler) deleteInstances( return &result, err } -// +kubebuilder:rbac:groups="",resources=configmaps,verbs=delete;list -// +kubebuilder:rbac:groups="",resources=secrets,verbs=delete;list -// +kubebuilder:rbac:groups="",resources=persistentvolumeclaims,verbs=delete;list -// +kubebuilder:rbac:groups=apps,resources=statefulsets,verbs=delete;list +// +kubebuilder:rbac:groups="",resources="configmaps",verbs={delete,list} +// +kubebuilder:rbac:groups="",resources="secrets",verbs={delete,list} +// +kubebuilder:rbac:groups="",resources="persistentvolumeclaims",verbs={delete,list} +// +kubebuilder:rbac:groups="apps",resources="statefulsets",verbs={delete,list} // deleteInstance will delete all resources related to a single instance func (r *Reconciler) deleteInstance( @@ -449,20 +537,20 @@ func (r *Reconciler) deleteInstance( instanceName string, ) error { gvks := []schema.GroupVersionKind{{ - Group: v1.SchemeGroupVersion.Group, - Version: v1.SchemeGroupVersion.Version, + Group: corev1.SchemeGroupVersion.Group, + Version: corev1.SchemeGroupVersion.Version, Kind: "ConfigMapList", }, { - Group: v1.SchemeGroupVersion.Group, - Version: v1.SchemeGroupVersion.Version, + Group: corev1.SchemeGroupVersion.Group, + Version: corev1.SchemeGroupVersion.Version, Kind: "SecretList", }, { Group: appsv1.SchemeGroupVersion.Group, Version: appsv1.SchemeGroupVersion.Version, Kind: "StatefulSetList", }, { - Group: v1.SchemeGroupVersion.Group, - Version: v1.SchemeGroupVersion.Version, + Group: corev1.SchemeGroupVersion.Group, + Version: corev1.SchemeGroupVersion.Version, Kind: "PersistentVolumeClaimList", }} @@ -495,16 +583,36 @@ func (r *Reconciler) deleteInstance( func (r *Reconciler) reconcileInstanceSets( ctx context.Context, cluster *v1beta1.PostgresCluster, - clusterConfigMap *v1.ConfigMap, - clusterReplicationSecret *v1.Secret, + clusterConfigMap *corev1.ConfigMap, + clusterReplicationSecret *corev1.Secret, rootCA *pki.RootCertificateAuthority, - clusterPodService *v1.Service, - instanceServiceAccount *v1.ServiceAccount, + clusterPodService *corev1.Service, + instanceServiceAccount *corev1.ServiceAccount, instances *observedInstances, - patroniLeaderService *v1.Service, - primaryCertificate *v1.SecretProjection, - clusterVolumes []v1.PersistentVolumeClaim, + patroniLeaderService *corev1.Service, + primaryCertificate *corev1.SecretProjection, + clusterVolumes []corev1.PersistentVolumeClaim, + exporterQueriesConfig, exporterWebConfig *corev1.ConfigMap, + backupsSpecFound bool, ) error { + + // Go through the observed instances and check if a primary has been determined. + // If the cluster is being shutdown and this instance is the primary, store + // the instance name as the startup instance. If the primary can be determined + // from the instance and the cluster is not being shutdown, clear any stored + // startup instance values. + for _, instance := range instances.forCluster { + if primary, known := instance.IsPrimary(); primary && known { + if cluster.Spec.Shutdown != nil && *cluster.Spec.Shutdown { + cluster.Status.StartupInstance = instance.Name + cluster.Status.StartupInstanceSet = instance.Spec.Name + } else { + cluster.Status.StartupInstance = "" + cluster.Status.StartupInstanceSet = "" + } + } + } + // get the number of instance pods from the observedInstance information var numInstancePods int for i := range instances.forCluster { @@ -514,14 +622,21 @@ func (r *Reconciler) reconcileInstanceSets( // Range over instance sets to scale up and ensure that each set has // at least the number of replicas defined in the spec. The set can // have more replicas than defined - for i, set := range cluster.Spec.InstanceSets { + for i := range cluster.Spec.InstanceSets { + set := &cluster.Spec.InstanceSets[i] _, err := r.scaleUpInstances( - ctx, cluster, instances, &cluster.Spec.InstanceSets[i], + ctx, cluster, instances, set, clusterConfigMap, clusterReplicationSecret, rootCA, clusterPodService, instanceServiceAccount, patroniLeaderService, primaryCertificate, - findAvailableInstanceNames(set, instances, clusterVolumes), - numInstancePods, clusterVolumes) + findAvailableInstanceNames(*set, instances, clusterVolumes), + numInstancePods, clusterVolumes, exporterQueriesConfig, exporterWebConfig, + backupsSpecFound, + ) + + if err == nil { + err = r.reconcileInstanceSetPodDisruptionBudget(ctx, cluster, set) + } if err != nil { return err } @@ -535,6 +650,12 @@ func (r *Reconciler) reconcileInstanceSets( return err } + // Cleanup Instance Set resources that are no longer needed + err = r.cleanupPodDisruptionBudgets(ctx, cluster) + if err != nil { + return err + } + // Rollout changes to instances by calling rolloutInstance. err = r.rolloutInstances(ctx, cluster, instances, func(ctx context.Context, instance *Instance) error { @@ -544,6 +665,40 @@ func (r *Reconciler) reconcileInstanceSets( return err } +// +kubebuilder:rbac:groups="policy",resources="poddisruptionbudgets",verbs={list} + +// cleanupPodDisruptionBudgets removes pdbs that do not have an +// associated Instance Set +func (r *Reconciler) cleanupPodDisruptionBudgets( + ctx context.Context, + cluster *v1beta1.PostgresCluster, +) error { + selector, err := naming.AsSelector(naming.ClusterInstanceSets(cluster.Name)) + + pdbList := &policyv1.PodDisruptionBudgetList{} + if err == nil { + err = r.Client.List(ctx, pdbList, + client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{ + Selector: selector, + }) + } + + if err == nil { + setNames := sets.Set[string]{} + for _, set := range cluster.Spec.InstanceSets { + setNames.Insert(set.Name) + } + for i := range pdbList.Items { + pdb := pdbList.Items[i] + if err == nil && !setNames.Has(pdb.Labels[naming.LabelInstanceSet]) { + err = client.IgnoreNotFound(r.deleteControlled(ctx, cluster, &pdb)) + } + } + } + + return client.IgnoreNotFound(err) +} + // TODO (andrewlecuyer): If relevant instance volume (PVC) information is captured for each // Instance contained within observedInstances, this function might no longer be necessary. // Instead, available names could be derived by looking at observed Instances that have data @@ -554,12 +709,12 @@ func (r *Reconciler) reconcileInstanceSets( // for the instance set specified that are not currently associated with an instance, and then // returning the instance names associated with those PVC's. func findAvailableInstanceNames(set v1beta1.PostgresInstanceSetSpec, - observedInstances *observedInstances, clusterVolumes []v1.PersistentVolumeClaim) []string { + observedInstances *observedInstances, clusterVolumes []corev1.PersistentVolumeClaim) []string { availableInstanceNames := []string{} // first identify any PGDATA volumes for the instance set specified - setVolumes := []v1.PersistentVolumeClaim{} + setVolumes := []corev1.PersistentVolumeClaim{} for _, pvc := range clusterVolumes { // ignore PGDATA PVCs that are terminating if pvc.GetDeletionTimestamp() != nil { @@ -577,7 +732,7 @@ func findAvailableInstanceNames(set v1beta1.PostgresInstanceSetSpec, // any available PGDATA volumes for the instance set that have no corresponding WAL // volumes (which means new PVCs will simply be reconciled instead). if set.WALVolumeClaimSpec != nil { - setVolumesWithWAL := []v1.PersistentVolumeClaim{} + setVolumesWithWAL := []corev1.PersistentVolumeClaim{} for _, setVol := range setVolumes { setVolInstance := setVol.GetLabels()[naming.LabelInstance] for _, pvc := range clusterVolumes { @@ -610,7 +765,7 @@ func findAvailableInstanceNames(set v1beta1.PostgresInstanceSetSpec, return availableInstanceNames } -// +kubebuilder:rbac:groups="",resources=pods,verbs=delete +// +kubebuilder:rbac:groups="",resources="pods",verbs={delete} // rolloutInstance redeploys the Pod of instance by deleting it. Its StatefulSet // will recreate it according to its current PodTemplate. When instance is the @@ -630,7 +785,7 @@ func (r *Reconciler) rolloutInstance( pod := instance.Pods[0] exec := func(_ context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string) error { - return r.PodExec(pod.Namespace, pod.Name, naming.ContainerDatabase, stdin, stdout, stderr, command...) + return r.PodExec(ctx, pod.Namespace, pod.Name, naming.ContainerDatabase, stdin, stdout, stderr, command...) } primary, known := instance.IsPrimary() @@ -706,10 +861,10 @@ func (r *Reconciler) rolloutInstance( // Communicate the lack or slowness of CHECKPOINT and shutdown anyway. if err != nil { - r.Recorder.Eventf(cluster, v1.EventTypeWarning, "NoCheckpoint", + r.Recorder.Eventf(cluster, corev1.EventTypeWarning, "NoCheckpoint", "Unable to checkpoint primary before shutdown: %v", err) } else if duration > threshold { - r.Recorder.Eventf(cluster, v1.EventTypeWarning, "SlowCheckpoint", + r.Recorder.Eventf(cluster, corev1.EventTypeWarning, "SlowCheckpoint", "Shutting down primary despite checkpoint taking over %v", duration) } } @@ -782,10 +937,10 @@ func (r *Reconciler) rolloutInstances( } span.SetAttributes( - attributes.Int("instances", len(instances.forCluster)), - attributes.Int("specified", numSpecified), - attributes.Int("available", numAvailable), - attributes.Int("considering", len(consider)), + attribute.Int("instances", len(instances.forCluster)), + attribute.Int("specified", numSpecified), + attribute.Int("available", numAvailable), + attribute.Int("considering", len(consider)), ) // Redeploy instances up to the allowed maximum while "rolling over" any @@ -809,10 +964,11 @@ func (r *Reconciler) rolloutInstances( // scaleDownInstances removes extra instances from a cluster until it matches // the spec. This function can delete the primary instance and force the // cluster to failover under two conditions: -// - If the instance set that contains the primary instance is removed from -// the spec -// - If the instance set that contains the primary instance is updated to -// have 0 replicas +// - If the instance set that contains the primary instance is removed from +// the spec +// - If the instance set that contains the primary instance is updated to +// have 0 replicas +// // If either of these conditions are met then the primary instance will be // marked for deletion and deleted after all other instances func (r *Reconciler) scaleDownInstances( @@ -828,7 +984,7 @@ func (r *Reconciler) scaleDownInstances( } // grab all pods for the cluster using the observed instances - pods := []v1.Pod{} + pods := []corev1.Pod{} for instanceIndex := range observedInstances.forCluster { for podIndex := range observedInstances.forCluster[instanceIndex].Pods { pods = append(pods, *observedInstances.forCluster[instanceIndex].Pods[podIndex]) @@ -858,10 +1014,10 @@ func (r *Reconciler) scaleDownInstances( // podsToKeep takes a list of pods and a map containing // the number of replicas we want for each instance set // then returns a list of the pods that we want to keep -func podsToKeep(instances []v1.Pod, want map[string]int) []v1.Pod { +func podsToKeep(instances []corev1.Pod, want map[string]int) []corev1.Pod { - f := func(instances []v1.Pod, want int) []v1.Pod { - keep := []v1.Pod{} + f := func(instances []corev1.Pod, want int) []corev1.Pod { + keep := []corev1.Pod{} if want > 0 { for _, instance := range instances { @@ -880,9 +1036,9 @@ func podsToKeep(instances []v1.Pod, want map[string]int) []v1.Pod { return keep } - keepPodList := []v1.Pod{} + keepPodList := []corev1.Pod{} for name, num := range want { - list := []v1.Pod{} + list := []corev1.Pod{} for _, instance := range instances { if instance.Labels[naming.LabelInstanceSet] == name { list = append(list, instance) @@ -895,7 +1051,7 @@ func podsToKeep(instances []v1.Pod, want map[string]int) []v1.Pod { } -// +kubebuilder:rbac:groups=apps,resources=statefulsets,verbs=list +// +kubebuilder:rbac:groups="apps",resources="statefulsets",verbs={list} // scaleUpInstances updates the cluster until the number of instances matches // the cluster spec @@ -904,16 +1060,18 @@ func (r *Reconciler) scaleUpInstances( cluster *v1beta1.PostgresCluster, observed *observedInstances, set *v1beta1.PostgresInstanceSetSpec, - clusterConfigMap *v1.ConfigMap, - clusterReplicationSecret *v1.Secret, + clusterConfigMap *corev1.ConfigMap, + clusterReplicationSecret *corev1.Secret, rootCA *pki.RootCertificateAuthority, - clusterPodService *v1.Service, - instanceServiceAccount *v1.ServiceAccount, - patroniLeaderService *v1.Service, - primaryCertificate *v1.SecretProjection, + clusterPodService *corev1.Service, + instanceServiceAccount *corev1.ServiceAccount, + patroniLeaderService *corev1.Service, + primaryCertificate *corev1.SecretProjection, availableInstanceNames []string, numInstancePods int, clusterVolumes []corev1.PersistentVolumeClaim, + exporterQueriesConfig, exporterWebConfig *corev1.ConfigMap, + backupsSpecFound bool, ) ([]*appsv1.StatefulSet, error) { log := logging.FromContext(ctx) @@ -957,7 +1115,8 @@ func (r *Reconciler) scaleUpInstances( clusterConfigMap, clusterReplicationSecret, rootCA, clusterPodService, instanceServiceAccount, patroniLeaderService, primaryCertificate, instances[i], - numInstancePods, clusterVolumes, + numInstancePods, clusterVolumes, exporterQueriesConfig, exporterWebConfig, + backupsSpecFound, ) } if err == nil { @@ -967,7 +1126,7 @@ func (r *Reconciler) scaleUpInstances( return instances, err } -// +kubebuilder:rbac:groups=apps,resources=statefulsets,verbs=create;patch +// +kubebuilder:rbac:groups="apps",resources="statefulsets",verbs={create,patch} // reconcileInstance writes instance according to spec of cluster. // See Reconciler.reconcileInstanceSet. @@ -976,16 +1135,18 @@ func (r *Reconciler) reconcileInstance( cluster *v1beta1.PostgresCluster, observed *Instance, spec *v1beta1.PostgresInstanceSetSpec, - clusterConfigMap *v1.ConfigMap, - clusterReplicationSecret *v1.Secret, + clusterConfigMap *corev1.ConfigMap, + clusterReplicationSecret *corev1.Secret, rootCA *pki.RootCertificateAuthority, - clusterPodService *v1.Service, - instanceServiceAccount *v1.ServiceAccount, - patroniLeaderService *v1.Service, - primaryCertificate *v1.SecretProjection, + clusterPodService *corev1.Service, + instanceServiceAccount *corev1.ServiceAccount, + patroniLeaderService *corev1.Service, + primaryCertificate *corev1.SecretProjection, instance *appsv1.StatefulSet, numInstancePods int, clusterVolumes []corev1.PersistentVolumeClaim, + exporterQueriesConfig, exporterWebConfig *corev1.ConfigMap, + backupsSpecFound bool, ) error { log := logging.FromContext(ctx).WithValues("instance", instance.Name) ctx = logging.NewContext(ctx, log) @@ -1002,10 +1163,11 @@ func (r *Reconciler) reconcileInstance( } var ( - instanceConfigMap *v1.ConfigMap - instanceCertificates *v1.Secret + instanceConfigMap *corev1.ConfigMap + instanceCertificates *corev1.Secret postgresDataVolume *corev1.PersistentVolumeClaim postgresWALVolume *corev1.PersistentVolumeClaim + tablespaceVolumes []*corev1.PersistentVolumeClaim ) if err == nil { @@ -1016,45 +1178,42 @@ func (r *Reconciler) reconcileInstance( ctx, cluster, spec, instance, rootCA) } if err == nil { - postgresDataVolume, err = r.reconcilePostgresDataVolume(ctx, cluster, spec, instance, clusterVolumes) + postgresDataVolume, err = r.reconcilePostgresDataVolume(ctx, cluster, spec, instance, clusterVolumes, nil) } if err == nil { postgresWALVolume, err = r.reconcilePostgresWALVolume(ctx, cluster, spec, instance, observed, clusterVolumes) } + if err == nil { + tablespaceVolumes, err = r.reconcileTablespaceVolumes(ctx, cluster, spec, instance, clusterVolumes) + } if err == nil { postgres.InstancePod( - ctx, cluster, spec, postgresDataVolume, postgresWALVolume, + ctx, cluster, spec, + primaryCertificate, replicationCertSecretProjection(clusterReplicationSecret), + postgresDataVolume, postgresWALVolume, tablespaceVolumes, &instance.Spec.Template.Spec) + if backupsSpecFound { + addPGBackRestToInstancePodSpec( + ctx, cluster, instanceCertificates, &instance.Spec.Template.Spec) + } + err = patroni.InstancePod( ctx, cluster, clusterConfigMap, clusterPodService, patroniLeaderService, spec, instanceCertificates, instanceConfigMap, &instance.Spec.Template) } - // Add pgBackRest containers, volumes, etc. to the instance Pod spec - if err == nil { - err = addPGBackRestToInstancePodSpec(cluster, &instance.Spec.Template) - } - // Add pgMonitor resources to the instance Pod spec if err == nil { - err = addPGMonitorToInstancePodSpec(cluster, &instance.Spec.Template) + err = addPGMonitorToInstancePodSpec(ctx, cluster, &instance.Spec.Template, exporterQueriesConfig, exporterWebConfig) } - // add the container for the initial copy of the mounted replication client - // certificate files to the /tmp directory and set the proper file permissions - postgres.InitCopyReplicationTLS(cluster, &instance.Spec.Template) - - // add the cluster certificate secret volume to the pod to enable Postgres TLS connections - if err == nil { - err = errors.WithStack(postgres.AddCertVolumeToPod(cluster, &instance.Spec.Template, - naming.ContainerClientCertInit, naming.ContainerDatabase, naming.ContainerClientCertCopy, - primaryCertificate, replicationCertSecretProjection(clusterReplicationSecret))) - } // add nss_wrapper init container and add nss_wrapper env vars to the database and pgbackrest // containers if err == nil { - addNSSWrapper(config.PostgresContainerImage(cluster), + addNSSWrapper( + config.PostgresContainerImage(cluster), + cluster.Spec.ImagePullPolicy, &instance.Spec.Template) } @@ -1064,6 +1223,11 @@ func (r *Reconciler) reconcileInstance( addTMPEmptyDir(&instance.Spec.Template) } + // mount shared memory to the Postgres instance + if err == nil { + addDevSHM(&instance.Spec.Template) + } + if err == nil { err = errors.WithStack(r.apply(ctx, instance)) } @@ -1092,6 +1256,7 @@ func generateInstanceStatefulSetIntent(_ context.Context, naming.LabelCluster: cluster.Name, naming.LabelInstanceSet: spec.Name, naming.LabelInstance: sts.Name, + naming.LabelData: naming.DataPostgres, }) sts.Spec.Selector = &metav1.LabelSelector{ MatchLabels: map[string]string{ @@ -1111,6 +1276,7 @@ func generateInstanceStatefulSetIntent(_ context.Context, naming.LabelCluster: cluster.Name, naming.LabelInstanceSet: spec.Name, naming.LabelInstance: sts.Name, + naming.LabelData: naming.DataPostgres, }) // Don't clutter the namespace with extra ControllerRevisions. @@ -1131,6 +1297,18 @@ func generateInstanceStatefulSetIntent(_ context.Context, // Use scheduling constraints from the cluster spec. sts.Spec.Template.Spec.Affinity = spec.Affinity sts.Spec.Template.Spec.Tolerations = spec.Tolerations + sts.Spec.Template.Spec.TopologySpreadConstraints = spec.TopologySpreadConstraints + sts.Spec.Template.Spec.PriorityClassName = initialize.FromPointer(spec.PriorityClassName) + + // if default pod scheduling is not explicitly disabled, add the default + // pod topology spread constraints + if !initialize.FromPointer(cluster.Spec.DisableDefaultPodScheduling) { + sts.Spec.Template.Spec.TopologySpreadConstraints = append( + sts.Spec.Template.Spec.TopologySpreadConstraints, + defaultTopologySpreadConstraints( + naming.ClusterDataForPostgresAndPGBackRest(cluster.Name), + )...) + } // Though we use a StatefulSet to keep an instance running, we only ever // want one Pod from it. This means that Replicas should only ever be @@ -1157,15 +1335,25 @@ func generateInstanceStatefulSetIntent(_ context.Context, // Restart containers any time they stop, die, are killed, etc. // - https://docs.k8s.io/concepts/workloads/pods/pod-lifecycle/#restart-policy - sts.Spec.Template.Spec.RestartPolicy = v1.RestartPolicyAlways + sts.Spec.Template.Spec.RestartPolicy = corev1.RestartPolicyAlways // ShareProcessNamespace makes Kubernetes' pause process PID 1 and lets // containers see each other's processes. // - https://docs.k8s.io/tasks/configure-pod-container/share-process-namespace/ sts.Spec.Template.Spec.ShareProcessNamespace = initialize.Bool(true) + // Patroni calls the Kubernetes API and pgBackRest may interact with a cloud + // storage provider. Use the instance ServiceAccount and automatically mount + // its Kubernetes credentials. + // - https://cloud.google.com/kubernetes-engine/docs/concepts/workload-identity + // - https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html sts.Spec.Template.Spec.ServiceAccountName = instanceServiceAccountName + // Disable environment variables for services other than the Kubernetes API. + // - https://docs.k8s.io/concepts/services-networking/connect-applications-service/#accessing-the-service + // - https://releases.k8s.io/v1.23.0/pkg/kubelet/kubelet_pods.go#L553-L563 + sts.Spec.Template.Spec.EnableServiceLinks = initialize.Bool(false) + sts.Spec.Template.Spec.SecurityContext = postgres.PodSecurityContext(cluster) // Set the image pull secrets, if any exist. @@ -1175,46 +1363,28 @@ func generateInstanceStatefulSetIntent(_ context.Context, sts.Spec.Template.Spec.ImagePullSecrets = cluster.Spec.ImagePullSecrets } -// addPGBackRestToInstancePodSpec adds pgBackRest configuration to the PodTemplateSpec. This -// includes adding an SSH sidecar if a pgBackRest repoHost is enabled per the current -// PostgresCluster spec, mounting pgBackRest repo volumes if a dedicated repository is not -// configured, and then mounting the proper pgBackRest configuration resources (ConfigMaps -// and Secrets) -func addPGBackRestToInstancePodSpec(cluster *v1beta1.PostgresCluster, - template *v1.PodTemplateSpec) error { - - dedicatedRepoEnabled := pgbackrest.DedicatedRepoHostEnabled(cluster) - pgBackRestConfigContainers := []string{naming.ContainerDatabase} - if dedicatedRepoEnabled { - pgBackRestConfigContainers = append(pgBackRestConfigContainers, - naming.PGBackRestRepoContainerName) - var resources v1.ResourceRequirements - if cluster.Spec.Backups.PGBackRest.RepoHost != nil { - resources = cluster.Spec.Backups.PGBackRest.RepoHost.Resources - } - if err := pgbackrest.AddSSHToPod(cluster, template, true, - resources, naming.ContainerDatabase); err != nil { - return errors.WithStack(err) - } - } - if err := pgbackrest.AddConfigsToPod(cluster, template, pgbackrest.CMInstanceKey, - pgBackRestConfigContainers...); err != nil { - return errors.WithStack(err) - } +// addPGBackRestToInstancePodSpec adds pgBackRest configurations and sidecars +// to the PodSpec. +func addPGBackRestToInstancePodSpec( + ctx context.Context, cluster *v1beta1.PostgresCluster, + instanceCertificates *corev1.Secret, instancePod *corev1.PodSpec, +) { + pgbackrest.AddServerToInstancePod(ctx, cluster, instancePod, + instanceCertificates.Name) - return nil + pgbackrest.AddConfigToInstancePod(cluster, instancePod) } -// +kubebuilder:rbac:groups="",resources=configmaps,verbs=create;patch +// +kubebuilder:rbac:groups="",resources="configmaps",verbs={create,patch} // reconcileInstanceConfigMap writes the ConfigMap that contains generated // files (etc) that apply to instance of cluster. func (r *Reconciler) reconcileInstanceConfigMap( ctx context.Context, cluster *v1beta1.PostgresCluster, spec *v1beta1.PostgresInstanceSetSpec, instance *appsv1.StatefulSet, -) (*v1.ConfigMap, error) { - instanceConfigMap := &v1.ConfigMap{ObjectMeta: naming.InstanceConfigMap(instance)} - instanceConfigMap.SetGroupVersionKind(v1.SchemeGroupVersion.WithKind("ConfigMap")) +) (*corev1.ConfigMap, error) { + instanceConfigMap := &corev1.ConfigMap{ObjectMeta: naming.InstanceConfigMap(instance)} + instanceConfigMap.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("ConfigMap")) // TODO(cbandy): Instance StatefulSet as owner? err := errors.WithStack(r.setControllerReference(cluster, instanceConfigMap)) @@ -1241,8 +1411,8 @@ func (r *Reconciler) reconcileInstanceConfigMap( return instanceConfigMap, err } -// +kubebuilder:rbac:groups="",resources=secrets,verbs=get -// +kubebuilder:rbac:groups="",resources=secrets,verbs=create;patch +// +kubebuilder:rbac:groups="",resources="secrets",verbs={get} +// +kubebuilder:rbac:groups="",resources="secrets",verbs={create,patch} // reconcileInstanceCertificates writes the Secret that contains certificates // and private keys for instance of cluster. @@ -1250,13 +1420,13 @@ func (r *Reconciler) reconcileInstanceCertificates( ctx context.Context, cluster *v1beta1.PostgresCluster, spec *v1beta1.PostgresInstanceSetSpec, instance *appsv1.StatefulSet, root *pki.RootCertificateAuthority, -) (*v1.Secret, error) { - existing := &v1.Secret{ObjectMeta: naming.InstanceCertificates(instance)} +) (*corev1.Secret, error) { + existing := &corev1.Secret{ObjectMeta: naming.InstanceCertificates(instance)} err := errors.WithStack(client.IgnoreNotFound( r.Client.Get(ctx, client.ObjectKeyFromObject(existing), existing))) - instanceCerts := &v1.Secret{ObjectMeta: naming.InstanceCertificates(instance)} - instanceCerts.SetGroupVersionKind(v1.SchemeGroupVersion.WithKind("Secret")) + instanceCerts := &corev1.Secret{ObjectMeta: naming.InstanceCertificates(instance)} + instanceCerts.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("Secret")) // TODO(cbandy): Instance StatefulSet as owner? if err == nil { @@ -1279,7 +1449,7 @@ func (r *Reconciler) reconcileInstanceCertificates( // expects an *unencrypted* private key. We're also adding other values and // other formats, so indicate that with the "Opaque" type. // - https://docs.k8s.io/concepts/configuration/secret/#secret-types - instanceCerts.Type = v1.SecretTypeOpaque + instanceCerts.Type = corev1.SecretTypeOpaque instanceCerts.Data = make(map[string][]byte) var leafCert *pki.LeafCertificate @@ -1292,9 +1462,64 @@ func (r *Reconciler) reconcileInstanceCertificates( root.Certificate, leafCert.Certificate, leafCert.PrivateKey, instanceCerts) } + if err == nil { + err = pgbackrest.InstanceCertificates(ctx, cluster, + root.Certificate, leafCert.Certificate, leafCert.PrivateKey, + instanceCerts) + } if err == nil { err = errors.WithStack(r.apply(ctx, instanceCerts)) } return instanceCerts, err } + +// +kubebuilder:rbac:groups="policy",resources="poddisruptionbudgets",verbs={create,patch,get,delete} + +// reconcileInstanceSetPodDisruptionBudget creates a PDB for an instance set. A +// PDB will be created when the minAvailable is determined to be greater than 0. +// MinAvailable can be defined in the spec or a default value will be set based +// on the number of replicas in the instance set. +func (r *Reconciler) reconcileInstanceSetPodDisruptionBudget( + ctx context.Context, + cluster *v1beta1.PostgresCluster, + spec *v1beta1.PostgresInstanceSetSpec, +) error { + if spec.Replicas == nil { + // Replicas should always have a value because of defaults in the spec + return errors.New("Replicas should be defined") + } + minAvailable := getMinAvailable(spec.MinAvailable, *spec.Replicas) + + meta := naming.InstanceSet(cluster, spec) + meta.Labels = naming.Merge(cluster.Spec.Metadata.GetLabelsOrNil(), + spec.Metadata.GetLabelsOrNil(), + map[string]string{ + naming.LabelCluster: cluster.Name, + naming.LabelInstanceSet: spec.Name, + }) + meta.Annotations = naming.Merge(cluster.Spec.Metadata.GetAnnotationsOrNil(), + spec.Metadata.GetAnnotationsOrNil()) + + selector := naming.ClusterInstanceSet(cluster.Name, spec.Name) + pdb, err := r.generatePodDisruptionBudget(cluster, meta, minAvailable, selector) + + // If 'minAvailable' is set to '0', we will not reconcile the PDB. If one + // already exists, we will remove it. + var scaled int + if err == nil { + scaled, err = intstr.GetScaledValueFromIntOrPercent(minAvailable, int(*spec.Replicas), true) + } + if err == nil && scaled <= 0 { + err := errors.WithStack(r.Client.Get(ctx, client.ObjectKeyFromObject(pdb), pdb)) + if err == nil { + err = errors.WithStack(r.deleteControlled(ctx, cluster, pdb)) + } + return client.IgnoreNotFound(err) + } + + if err == nil { + err = errors.WithStack(r.apply(ctx, pdb)) + } + return err +} diff --git a/internal/controller/postgrescluster/instance.md b/internal/controller/postgrescluster/instance.md index 0152e7fc80..f0de4c5d7a 100644 --- a/internal/controller/postgrescluster/instance.md +++ b/internal/controller/postgrescluster/instance.md @@ -1,16 +1,7 @@ ## Shutdown and Startup Logic Detail @@ -69,7 +60,7 @@ instance name or set to blank ("") ### Logic Map With this, the grid below shows the expected replica count value, depending on -the the values. Below, the letters represent the following: +the values. Below, the letters represent the following: M = StartupInstance matches the instance name diff --git a/internal/controller/postgrescluster/instance_rollout_test.go b/internal/controller/postgrescluster/instance_rollout_test.go index 0fab7a1536..e668907497 100644 --- a/internal/controller/postgrescluster/instance_rollout_test.go +++ b/internal/controller/postgrescluster/instance_rollout_test.go @@ -1,31 +1,20 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster import ( "context" - "encoding/json" "io" - "io/ioutil" "strings" "testing" - "go.opentelemetry.io/otel/oteltest" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/trace" + "go.opentelemetry.io/otel/sdk/trace/tracetest" "gotest.tools/v3/assert" - "gotest.tools/v3/assert/cmp" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -35,6 +24,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" "github.com/crunchydata/postgres-operator/internal/initialize" + "github.com/crunchydata/postgres-operator/internal/testing/cmp" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -70,11 +60,11 @@ func TestReconcilerRolloutInstance(t *testing.T) { key := client.ObjectKey{Namespace: "ns1", Name: "one-pod-bruh"} reconciler := &Reconciler{} reconciler.Client = fake.NewClientBuilder().WithObjects(instances[0].Pods[0]).Build() - reconciler.Tracer = oteltest.DefaultTracer() + reconciler.Tracer = otel.Tracer(t.Name()) execCalls := 0 reconciler.PodExec = func( - namespace, pod, container string, stdin io.Reader, _, _ io.Writer, command ...string, + ctx context.Context, namespace, pod, container string, stdin io.Reader, _, _ io.Writer, command ...string, ) error { execCalls++ @@ -84,7 +74,7 @@ func TestReconcilerRolloutInstance(t *testing.T) { assert.Equal(t, container, "database") // Checkpoint with timeout. - b, _ := ioutil.ReadAll(stdin) + b, _ := io.ReadAll(stdin) assert.Equal(t, string(b), "SET statement_timeout = :'timeout'; CHECKPOINT;") commandString := strings.Join(command, " ") assert.Assert(t, cmp.Contains(commandString, "psql")) @@ -131,9 +121,9 @@ func TestReconcilerRolloutInstance(t *testing.T) { t.Run("Success", func(t *testing.T) { execCalls := 0 reconciler := &Reconciler{} - reconciler.Tracer = oteltest.DefaultTracer() + reconciler.Tracer = otel.Tracer(t.Name()) reconciler.PodExec = func( - namespace, pod, container string, _ io.Reader, stdout, _ io.Writer, command ...string, + ctx context.Context, namespace, pod, container string, _ io.Reader, stdout, _ io.Writer, command ...string, ) error { execCalls++ @@ -159,9 +149,9 @@ func TestReconcilerRolloutInstance(t *testing.T) { t.Run("Failure", func(t *testing.T) { reconciler := &Reconciler{} - reconciler.Tracer = oteltest.DefaultTracer() + reconciler.Tracer = otel.Tracer(t.Name()) reconciler.PodExec = func( - _, _, _ string, _ io.Reader, _, _ io.Writer, _ ...string, + ctx context.Context, _, _, _ string, _ io.Reader, _, _ io.Writer, _ ...string, ) error { // Nothing useful in stdout. return nil @@ -175,24 +165,24 @@ func TestReconcilerRolloutInstance(t *testing.T) { func TestReconcilerRolloutInstances(t *testing.T) { ctx := context.Background() - reconciler := &Reconciler{Tracer: oteltest.DefaultTracer()} + reconciler := &Reconciler{Tracer: otel.Tracer(t.Name())} accumulate := func(on *[]*Instance) func(context.Context, *Instance) error { return func(_ context.Context, i *Instance) error { *on = append(*on, i); return nil } } logSpanAttributes := func(t testing.TB) { - recorder := new(oteltest.StandardSpanRecorder) - provider := oteltest.NewTracerProvider(oteltest.WithSpanRecorder(recorder)) + recorder := tracetest.NewSpanRecorder() + provider := trace.NewTracerProvider(trace.WithSpanProcessor(recorder)) former := reconciler.Tracer - reconciler.Tracer = provider.Tracer("") + reconciler.Tracer = provider.Tracer(t.Name()) t.Cleanup(func() { reconciler.Tracer = former - for _, span := range recorder.Completed() { - b, _ := json.Marshal(span.Attributes()) - t.Log(span.Name(), string(b)) + for _, span := range recorder.Ended() { + attr := attribute.NewSet(span.Attributes()...) + t.Log(span.Name(), attr.Encoded(attribute.DefaultEncoder())) } }) } diff --git a/internal/controller/postgrescluster/instance_test.go b/internal/controller/postgrescluster/instance_test.go index ca25e257bb..f7f59f50a5 100644 --- a/internal/controller/postgrescluster/instance_test.go +++ b/internal/controller/postgrescluster/instance_test.go @@ -1,47 +1,46 @@ -// +build envtest - -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster import ( "context" "fmt" + "os" "sort" + "strings" "testing" "time" + "github.com/go-logr/logr/funcr" + "github.com/google/go-cmp/cmp/cmpopts" "github.com/pkg/errors" "go.opentelemetry.io/otel" "gotest.tools/v3/assert" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" - v1 "k8s.io/api/core/v1" + policyv1 "k8s.io/api/policy/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "github.com/crunchydata/postgres-operator/internal/controller/runtime" + "github.com/crunchydata/postgres-operator/internal/initialize" + "github.com/crunchydata/postgres-operator/internal/logging" "github.com/crunchydata/postgres-operator/internal/naming" - "github.com/crunchydata/postgres-operator/internal/pgbackrest" + "github.com/crunchydata/postgres-operator/internal/testing/cmp" + "github.com/crunchydata/postgres-operator/internal/testing/events" + "github.com/crunchydata/postgres-operator/internal/testing/require" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -179,7 +178,7 @@ func TestNewObservedInstances(t *testing.T) { // Lookup based on its labels. assert.Equal(t, observed.byName["the-name"], instance) assert.DeepEqual(t, observed.bySet["missing"], []*Instance{instance}) - assert.DeepEqual(t, observed.setNames.List(), []string{"missing"}) + assert.DeepEqual(t, sets.List(observed.setNames), []string{"missing"}) }) t.Run("RunnerMissingOthers", func(t *testing.T) { @@ -212,7 +211,7 @@ func TestNewObservedInstances(t *testing.T) { // Lookup based on its name and labels. assert.Equal(t, observed.byName["the-name"], instance) assert.DeepEqual(t, observed.bySet["missing"], []*Instance{instance}) - assert.DeepEqual(t, observed.setNames.List(), []string{"missing"}) + assert.DeepEqual(t, sets.List(observed.setNames), []string{"missing"}) }) t.Run("Matching", func(t *testing.T) { @@ -257,212 +256,718 @@ func TestNewObservedInstances(t *testing.T) { // Lookup based on its name and labels. assert.Equal(t, observed.byName["the-name"], instance) assert.DeepEqual(t, observed.bySet["00"], []*Instance{instance}) - assert.DeepEqual(t, observed.setNames.List(), []string{"00"}) + assert.DeepEqual(t, sets.List(observed.setNames), []string{"00"}) }) } -func TestAddPGBackRestToInstancePodSpec(t *testing.T) { +func TestStoreDesiredRequest(t *testing.T) { + ctx := context.Background() - clusterName := "hippo" - clusterUID := types.UID("hippouid") - namespace := "test-add-pgbackrest-to-instance-pod-spec" + setupLogCapture := func(ctx context.Context) (context.Context, *[]string) { + calls := []string{} + testlog := funcr.NewJSON(func(object string) { + calls = append(calls, object) + }, funcr.Options{ + Verbosity: 1, + }) + return logging.NewContext(ctx, testlog), &calls + } - // create a PostgresCluster to test with - postgresCluster := &v1beta1.PostgresCluster{ + cluster := v1beta1.PostgresCluster{ ObjectMeta: metav1.ObjectMeta{ - Name: clusterName, - Namespace: namespace, - UID: clusterUID, + Name: "rhino", + Namespace: "test-namespace", }, Spec: v1beta1.PostgresClusterSpec{ - Backups: v1beta1.Backups{ - PGBackRest: v1beta1.PGBackRestArchive{ - Repos: []v1beta1.PGBackRestRepo{{ - Name: "repo1", - Volume: &v1beta1.RepoPVC{ - VolumeClaimSpec: v1.PersistentVolumeClaimSpec{ - AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteMany}, - Resources: v1.ResourceRequirements{ - Requests: map[v1.ResourceName]resource.Quantity{ - v1.ResourceStorage: resource.MustParse("1Gi"), - }, - }, + InstanceSets: []v1beta1.PostgresInstanceSetSpec{{ + Name: "red", + Replicas: initialize.Int32(1), + DataVolumeClaimSpec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + Resources: corev1.VolumeResourceRequirements{ + Limits: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceStorage: resource.MustParse("1Gi"), + }}}, + }, { + Name: "blue", + Replicas: initialize.Int32(1), + }}}} + + t.Run("BadRequestNoBackup", func(t *testing.T) { + recorder := events.NewRecorder(t, runtime.Scheme) + reconciler := &Reconciler{Recorder: recorder} + ctx, logs := setupLogCapture(ctx) + + value := reconciler.storeDesiredRequest(ctx, &cluster, "red", "woot", "") + + assert.Equal(t, value, "") + assert.Equal(t, len(recorder.Events), 0) + assert.Equal(t, len(*logs), 1) + assert.Assert(t, cmp.Contains((*logs)[0], "Unable to parse pgData volume request from status")) + }) + + t.Run("BadRequestWithBackup", func(t *testing.T) { + recorder := events.NewRecorder(t, runtime.Scheme) + reconciler := &Reconciler{Recorder: recorder} + ctx, logs := setupLogCapture(ctx) + + value := reconciler.storeDesiredRequest(ctx, &cluster, "red", "foo", "1Gi") + + assert.Equal(t, value, "1Gi") + assert.Equal(t, len(recorder.Events), 0) + assert.Equal(t, len(*logs), 1) + assert.Assert(t, cmp.Contains((*logs)[0], "Unable to parse pgData volume request from status (foo) for rhino/red")) + }) + + t.Run("NoLimitNoEvent", func(t *testing.T) { + recorder := events.NewRecorder(t, runtime.Scheme) + reconciler := &Reconciler{Recorder: recorder} + ctx, logs := setupLogCapture(ctx) + + value := reconciler.storeDesiredRequest(ctx, &cluster, "blue", "1Gi", "") + + assert.Equal(t, value, "1Gi") + assert.Equal(t, len(*logs), 0) + assert.Equal(t, len(recorder.Events), 0) + }) + + t.Run("BadBackupRequest", func(t *testing.T) { + recorder := events.NewRecorder(t, runtime.Scheme) + reconciler := &Reconciler{Recorder: recorder} + ctx, logs := setupLogCapture(ctx) + + value := reconciler.storeDesiredRequest(ctx, &cluster, "red", "2Gi", "bar") + + assert.Equal(t, value, "2Gi") + assert.Equal(t, len(*logs), 1) + assert.Assert(t, cmp.Contains((*logs)[0], "Unable to parse pgData volume request from status backup (bar) for rhino/red")) + assert.Equal(t, len(recorder.Events), 1) + assert.Equal(t, recorder.Events[0].Regarding.Name, cluster.Name) + assert.Equal(t, recorder.Events[0].Reason, "VolumeAutoGrow") + assert.Equal(t, recorder.Events[0].Note, "pgData volume expansion to 2Gi requested for rhino/red.") + }) + + t.Run("ValueUpdateWithEvent", func(t *testing.T) { + recorder := events.NewRecorder(t, runtime.Scheme) + reconciler := &Reconciler{Recorder: recorder} + ctx, logs := setupLogCapture(ctx) + + value := reconciler.storeDesiredRequest(ctx, &cluster, "red", "1Gi", "") + + assert.Equal(t, value, "1Gi") + assert.Equal(t, len(*logs), 0) + assert.Equal(t, len(recorder.Events), 1) + assert.Equal(t, recorder.Events[0].Regarding.Name, cluster.Name) + assert.Equal(t, recorder.Events[0].Reason, "VolumeAutoGrow") + assert.Equal(t, recorder.Events[0].Note, "pgData volume expansion to 1Gi requested for rhino/red.") + }) + + t.Run("NoLimitNoEvent", func(t *testing.T) { + recorder := events.NewRecorder(t, runtime.Scheme) + reconciler := &Reconciler{Recorder: recorder} + ctx, logs := setupLogCapture(ctx) + + value := reconciler.storeDesiredRequest(ctx, &cluster, "blue", "1Gi", "") + + assert.Equal(t, value, "1Gi") + assert.Equal(t, len(*logs), 0) + assert.Equal(t, len(recorder.Events), 0) + }) +} + +func TestWritablePod(t *testing.T) { + container := "container" + + t.Run("empty observed", func(t *testing.T) { + observed := &observedInstances{} + + pod, instance := observed.writablePod("container") + assert.Assert(t, pod == nil) + assert.Assert(t, instance == nil) + }) + t.Run("terminating", func(t *testing.T) { + instances := []*Instance{ + { + Name: "instance", + Pods: []*corev1.Pod{{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "namespace", + Name: "pod", + Annotations: map[string]string{ + "status": `{"role":"master"}`, + }, + DeletionTimestamp: &metav1.Time{}, + }, + Status: corev1.PodStatus{ + ContainerStatuses: []corev1.ContainerStatus{{ + Name: container, + State: corev1.ContainerState{ + Running: new(corev1.ContainerStateRunning), }, + }}, + }, + }}, + Runner: &appsv1.StatefulSet{}, + }, + } + observed := &observedInstances{forCluster: instances} + + terminating, known := observed.forCluster[0].IsTerminating() + assert.Assert(t, terminating && known) + + pod, instance := observed.writablePod("container") + assert.Assert(t, pod == nil) + assert.Assert(t, instance == nil) + }) + t.Run("not running", func(t *testing.T) { + instances := []*Instance{ + { + Name: "instance", + Pods: []*corev1.Pod{{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "namespace", + Name: "pod", + Annotations: map[string]string{ + "status": `{"role":"master"}`, }, - }, { - Name: "repo2", - Volume: &v1beta1.RepoPVC{ - VolumeClaimSpec: v1.PersistentVolumeClaimSpec{ - AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteMany}, - Resources: v1.ResourceRequirements{ - Requests: map[v1.ResourceName]resource.Quantity{ - v1.ResourceStorage: resource.MustParse("2Gi"), - }, - }, + }, + Status: corev1.PodStatus{ + ContainerStatuses: []corev1.ContainerStatus{{ + Name: container, + State: corev1.ContainerState{ + Waiting: new(corev1.ContainerStateWaiting)}, + }}, + }, + }}, + Runner: &appsv1.StatefulSet{}, + }, + } + observed := &observedInstances{forCluster: instances} + + running, known := observed.forCluster[0].IsRunning(container) + assert.Check(t, !running && known) + + pod, instance := observed.writablePod("container") + assert.Assert(t, pod == nil) + assert.Assert(t, instance == nil) + }) + t.Run("not writable", func(t *testing.T) { + instances := []*Instance{ + { + Name: "instance", + Pods: []*corev1.Pod{{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "namespace", + Name: "pod", + Annotations: map[string]string{ + "status": `{"role":"replica"}`, + }, + }, + Status: corev1.PodStatus{ + ContainerStatuses: []corev1.ContainerStatus{{ + Name: container, + State: corev1.ContainerState{ + Running: new(corev1.ContainerStateRunning), }, + }}, + }, + }}, + Runner: &appsv1.StatefulSet{}, + }, + } + observed := &observedInstances{forCluster: instances} + + writable, known := observed.forCluster[0].IsWritable() + assert.Check(t, !writable && known) + + pod, instance := observed.writablePod("container") + assert.Assert(t, pod == nil) + assert.Assert(t, instance == nil) + }) + t.Run("writable instance exists", func(t *testing.T) { + instances := []*Instance{ + { + Name: "instance", + Pods: []*corev1.Pod{{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "namespace", + Name: "pod", + Annotations: map[string]string{ + "status": `{"role":"master"}`, }, - }}, - }, + }, + Status: corev1.PodStatus{ + ContainerStatuses: []corev1.ContainerStatus{{ + Name: container, + State: corev1.ContainerState{ + Running: new(corev1.ContainerStateRunning), + }, + }}, + }, + }}, + Runner: &appsv1.StatefulSet{}, }, - }, - } + } + observed := &observedInstances{forCluster: instances} + + terminating, known := observed.forCluster[0].IsTerminating() + assert.Check(t, !terminating && known) + writable, known := observed.forCluster[0].IsWritable() + assert.Check(t, writable && known) + running, known := observed.forCluster[0].IsRunning(container) + assert.Check(t, running && known) + + pod, instance := observed.writablePod("container") + assert.Assert(t, pod != nil) + assert.Assert(t, instance != nil) + }) +} - testCases := []struct { - dedicatedRepoHostEnabled bool - sshConfig *v1.ConfigMapProjection - sshSecret *v1.SecretProjection - }{{ - dedicatedRepoHostEnabled: false, - }, { - dedicatedRepoHostEnabled: true, - sshConfig: &v1.ConfigMapProjection{ - LocalObjectReference: v1.LocalObjectReference{Name: "cust-ssh-config.conf"}}, - sshSecret: &v1.SecretProjection{ - LocalObjectReference: v1.LocalObjectReference{Name: "cust-ssh-secret.conf"}}, - }, { - dedicatedRepoHostEnabled: true, - sshConfig: &v1.ConfigMapProjection{ - LocalObjectReference: v1.LocalObjectReference{Name: "cust-ssh-config.conf"}}, - sshSecret: &v1.SecretProjection{ - LocalObjectReference: v1.LocalObjectReference{Name: "cust-ssh-secret.conf"}}, - }} +func TestAddPGBackRestToInstancePodSpec(t *testing.T) { + t.Parallel() - for _, tc := range testCases { - dedicated := tc.dedicatedRepoHostEnabled - customConfig := (tc.sshConfig != nil) - customSecret := (tc.sshSecret != nil) - t.Run(fmt.Sprintf("dedicated:%t", dedicated), func(t *testing.T) { - - template := &v1.PodTemplateSpec{ - Spec: v1.PodSpec{ - Containers: []v1.Container{{Name: naming.ContainerDatabase}}, - }, - } + ctx := context.Background() + cluster := v1beta1.PostgresCluster{} + cluster.Name = "hippo" + cluster.Default() - pgBackRestConfigContainers := []string{naming.ContainerDatabase} - if dedicated { - pgBackRestConfigContainers = append(pgBackRestConfigContainers, - naming.PGBackRestRepoContainerName) - if customConfig || customSecret { - if postgresCluster.Spec.Backups.PGBackRest.RepoHost == nil { - postgresCluster.Spec.Backups.PGBackRest.RepoHost = &v1beta1.PGBackRestRepoHost{} - } - postgresCluster.Spec.Backups.PGBackRest.RepoHost.SSHConfiguration = tc.sshConfig - postgresCluster.Spec.Backups.PGBackRest.RepoHost.SSHSecret = tc.sshSecret - } - } + certificates := corev1.Secret{} + certificates.Name = "some-secret" - err := addPGBackRestToInstancePodSpec(postgresCluster, template) - assert.NilError(t, err) + pod := corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "database"}, + {Name: "other"}, + }, + Volumes: []corev1.Volume{ + {Name: "other"}, + {Name: "postgres-data"}, + {Name: "postgres-wal"}, + }, + } - // if a repo host is configured, then verify SSH is enabled - if dedicated { - - // verify the ssh volume - var foundSSHVolume bool - var sshVolume v1.Volume - for _, v := range template.Spec.Volumes { - if v.Name == naming.PGBackRestSSHVolume { - foundSSHVolume = true - sshVolume = v - break - } - } - assert.Assert(t, foundSSHVolume) - - // verify the ssh config and secret - var foundSSHConfigVolume, foundSSHSecretVolume bool - defaultConfigName := naming.PGBackRestSSHConfig(postgresCluster).Name - defaultSecretName := naming.PGBackRestSSHSecret(postgresCluster).Name - for _, s := range sshVolume.Projected.Sources { - if s.ConfigMap != nil { - if (!customConfig && s.ConfigMap.Name == defaultConfigName) || - (customConfig && s.ConfigMap.Name == tc.sshConfig.Name) { - foundSSHConfigVolume = true - } - } else if s.Secret != nil { - if (!customSecret && s.Secret.Name == defaultSecretName) || - (customSecret && s.Secret.Name == tc.sshSecret.Name) { - foundSSHSecretVolume = true - } - } - } - assert.Assert(t, foundSSHConfigVolume) - assert.Assert(t, foundSSHSecretVolume) - - // verify that pgbackrest container is present and that the proper SSH volume mount in - // present in all containers - var foundSSHContainer bool - for _, c := range template.Spec.Containers { - if c.Name == naming.PGBackRestRepoContainerName { - foundSSHContainer = true - } - var foundVolumeMount bool - for _, vm := range c.VolumeMounts { - if vm.Name == naming.PGBackRestSSHVolume && vm.MountPath == "/etc/ssh" && - vm.ReadOnly == true { - foundVolumeMount = true - break - } - } - assert.Assert(t, foundVolumeMount) - } - assert.Assert(t, foundSSHContainer) - } + t.Run("NoVolumeRepo", func(t *testing.T) { + cluster := cluster.DeepCopy() + cluster.Spec.Backups.PGBackRest.Repos = nil + + out := pod.DeepCopy() + addPGBackRestToInstancePodSpec(ctx, cluster, &certificates, out) + + // Only Containers and Volumes fields have changed. + assert.DeepEqual(t, pod, *out, cmpopts.IgnoreFields(pod, "Containers", "Volumes")) + + // Only database container has mounts. + // Other containers are ignored. + assert.Assert(t, cmp.MarshalMatches(out.Containers, ` +- name: database + resources: {} + volumeMounts: + - mountPath: /etc/pgbackrest/conf.d + name: pgbackrest-config + readOnly: true +- name: other + resources: {} +- command: + - pgbackrest + - server + livenessProbe: + exec: + command: + - pgbackrest + - server-ping + name: pgbackrest + resources: {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /etc/pgbackrest/server + name: pgbackrest-server + readOnly: true + - mountPath: /pgdata + name: postgres-data + - mountPath: /pgwal + name: postgres-wal + - mountPath: /etc/pgbackrest/conf.d + name: pgbackrest-config + readOnly: true +- command: + - bash + - -ceu + - -- + - |- + monitor() { + exec {fd}<> <(:||:) + until read -r -t 5 -u "${fd}"; do + if + [[ "${filename}" -nt "/proc/self/fd/${fd}" ]] && + pkill -HUP --exact --parent=0 pgbackrest + then + exec {fd}>&- && exec {fd}<> <(:||:) + stat --dereference --format='Loaded configuration dated %y' "${filename}" + elif + { [[ "${directory}" -nt "/proc/self/fd/${fd}" ]] || + [[ "${authority}" -nt "/proc/self/fd/${fd}" ]] + } && + pkill -HUP --exact --parent=0 pgbackrest + then + exec {fd}>&- && exec {fd}<> <(:||:) + stat --format='Loaded certificates dated %y' "${directory}" + fi + done + }; export directory="$1" authority="$2" filename="$3"; export -f monitor; exec -a "$0" bash -ceu monitor + - pgbackrest-config + - /etc/pgbackrest/server + - /etc/pgbackrest/conf.d/~postgres-operator/tls-ca.crt + - /etc/pgbackrest/conf.d/~postgres-operator_server.conf + name: pgbackrest-config + resources: {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /etc/pgbackrest/server + name: pgbackrest-server + readOnly: true + - mountPath: /etc/pgbackrest/conf.d + name: pgbackrest-config + readOnly: true + `)) + + // Instance configuration files with certificates. + // Other volumes are ignored. + assert.Assert(t, cmp.MarshalMatches(out.Volumes, ` +- name: other +- name: postgres-data +- name: postgres-wal +- name: pgbackrest-server + projected: + sources: + - secret: + items: + - key: pgbackrest-server.crt + path: server-tls.crt + - key: pgbackrest-server.key + mode: 384 + path: server-tls.key + name: some-secret +- name: pgbackrest-config + projected: + sources: + - configMap: + items: + - key: pgbackrest_instance.conf + path: pgbackrest_instance.conf + - key: config-hash + path: config-hash + - key: pgbackrest-server.conf + path: ~postgres-operator_server.conf + name: hippo-pgbackrest-config + - secret: + items: + - key: pgbackrest.ca-roots + path: ~postgres-operator/tls-ca.crt + - key: pgbackrest-client.crt + path: ~postgres-operator/client-tls.crt + - key: pgbackrest-client.key + mode: 384 + path: ~postgres-operator/client-tls.key + name: hippo-pgbackrest + `)) + }) - var foundConfigVolume bool - var configVolume v1.Volume - for _, v := range template.Spec.Volumes { - if v.Name == pgbackrest.ConfigVol { - foundConfigVolume = true - configVolume = v - break - } - } - assert.Assert(t, foundConfigVolume) - - var foundConfigProjection bool - defaultConfigName := naming.PGBackRestConfig(postgresCluster).Name - for _, s := range configVolume.Projected.Sources { - if s.ConfigMap != nil { - if s.ConfigMap.Name == defaultConfigName { - foundConfigProjection = true - } - } - } - assert.Assert(t, foundConfigProjection) + t.Run("OneVolumeRepo", func(t *testing.T) { + alwaysExpect := func(t testing.TB, result *corev1.PodSpec) { + // Only Containers and Volumes fields have changed. + assert.DeepEqual(t, pod, *result, cmpopts.IgnoreFields(pod, "Containers", "Volumes")) + + // Instance configuration files plus client and server certificates. + // The server certificate comes from the instance Secret. + // Other volumes are untouched. + assert.Assert(t, cmp.MarshalMatches(result.Volumes, ` +- name: other +- name: postgres-data +- name: postgres-wal +- name: pgbackrest-server + projected: + sources: + - secret: + items: + - key: pgbackrest-server.crt + path: server-tls.crt + - key: pgbackrest-server.key + mode: 384 + path: server-tls.key + name: some-secret +- name: pgbackrest-config + projected: + sources: + - configMap: + items: + - key: pgbackrest_instance.conf + path: pgbackrest_instance.conf + - key: config-hash + path: config-hash + - key: pgbackrest-server.conf + path: ~postgres-operator_server.conf + name: hippo-pgbackrest-config + - secret: + items: + - key: pgbackrest.ca-roots + path: ~postgres-operator/tls-ca.crt + - key: pgbackrest-client.crt + path: ~postgres-operator/client-tls.crt + - key: pgbackrest-client.key + mode: 384 + path: ~postgres-operator/client-tls.key + name: hippo-pgbackrest + `)) + } - for _, container := range pgBackRestConfigContainers { - var foundContainer bool - for _, c := range template.Spec.Containers { - if c.Name == container { - foundContainer = true - } - var foundVolumeMount bool - for _, vm := range c.VolumeMounts { - if vm.Name == pgbackrest.ConfigVol && vm.MountPath == pgbackrest.ConfigDir { - foundVolumeMount = true - break - } - } - assert.Assert(t, foundVolumeMount) - } - assert.Assert(t, foundContainer) + cluster := cluster.DeepCopy() + cluster.Spec.Backups.PGBackRest.Repos = []v1beta1.PGBackRestRepo{ + { + Name: "repo1", + Volume: new(v1beta1.RepoPVC), + }, + } + + out := pod.DeepCopy() + addPGBackRestToInstancePodSpec(ctx, cluster, &certificates, out) + alwaysExpect(t, out) + + // The TLS server is added and configuration mounted. + // It has PostgreSQL volumes mounted while other volumes are ignored. + assert.Assert(t, cmp.MarshalMatches(out.Containers, ` +- name: database + resources: {} + volumeMounts: + - mountPath: /etc/pgbackrest/conf.d + name: pgbackrest-config + readOnly: true +- name: other + resources: {} +- command: + - pgbackrest + - server + livenessProbe: + exec: + command: + - pgbackrest + - server-ping + name: pgbackrest + resources: {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /etc/pgbackrest/server + name: pgbackrest-server + readOnly: true + - mountPath: /pgdata + name: postgres-data + - mountPath: /pgwal + name: postgres-wal + - mountPath: /etc/pgbackrest/conf.d + name: pgbackrest-config + readOnly: true +- command: + - bash + - -ceu + - -- + - |- + monitor() { + exec {fd}<> <(:||:) + until read -r -t 5 -u "${fd}"; do + if + [[ "${filename}" -nt "/proc/self/fd/${fd}" ]] && + pkill -HUP --exact --parent=0 pgbackrest + then + exec {fd}>&- && exec {fd}<> <(:||:) + stat --dereference --format='Loaded configuration dated %y' "${filename}" + elif + { [[ "${directory}" -nt "/proc/self/fd/${fd}" ]] || + [[ "${authority}" -nt "/proc/self/fd/${fd}" ]] + } && + pkill -HUP --exact --parent=0 pgbackrest + then + exec {fd}>&- && exec {fd}<> <(:||:) + stat --format='Loaded certificates dated %y' "${directory}" + fi + done + }; export directory="$1" authority="$2" filename="$3"; export -f monitor; exec -a "$0" bash -ceu monitor + - pgbackrest-config + - /etc/pgbackrest/server + - /etc/pgbackrest/conf.d/~postgres-operator/tls-ca.crt + - /etc/pgbackrest/conf.d/~postgres-operator_server.conf + name: pgbackrest-config + resources: {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /etc/pgbackrest/server + name: pgbackrest-server + readOnly: true + - mountPath: /etc/pgbackrest/conf.d + name: pgbackrest-config + readOnly: true + `)) + + t.Run("CustomResources", func(t *testing.T) { + cluster := cluster.DeepCopy() + cluster.Spec.Backups.PGBackRest.Sidecars = &v1beta1.PGBackRestSidecars{ + PGBackRest: &v1beta1.Sidecar{ + Resources: &corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("5m"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("9Mi"), + }, + }, + }, } + + before := out.DeepCopy() + out := pod.DeepCopy() + addPGBackRestToInstancePodSpec(ctx, cluster, &certificates, out) + alwaysExpect(t, out) + + // Only the TLS server container changed. + assert.Equal(t, len(before.Containers), len(out.Containers)) + assert.Assert(t, len(before.Containers) > 2) + assert.DeepEqual(t, before.Containers[:2], out.Containers[:2]) + + // It has the custom resources. + assert.Assert(t, cmp.MarshalMatches(out.Containers[2:], ` +- command: + - pgbackrest + - server + livenessProbe: + exec: + command: + - pgbackrest + - server-ping + name: pgbackrest + resources: + limits: + memory: 9Mi + requests: + cpu: 5m + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /etc/pgbackrest/server + name: pgbackrest-server + readOnly: true + - mountPath: /pgdata + name: postgres-data + - mountPath: /pgwal + name: postgres-wal + - mountPath: /etc/pgbackrest/conf.d + name: pgbackrest-config + readOnly: true +- command: + - bash + - -ceu + - -- + - |- + monitor() { + exec {fd}<> <(:||:) + until read -r -t 5 -u "${fd}"; do + if + [[ "${filename}" -nt "/proc/self/fd/${fd}" ]] && + pkill -HUP --exact --parent=0 pgbackrest + then + exec {fd}>&- && exec {fd}<> <(:||:) + stat --dereference --format='Loaded configuration dated %y' "${filename}" + elif + { [[ "${directory}" -nt "/proc/self/fd/${fd}" ]] || + [[ "${authority}" -nt "/proc/self/fd/${fd}" ]] + } && + pkill -HUP --exact --parent=0 pgbackrest + then + exec {fd}>&- && exec {fd}<> <(:||:) + stat --format='Loaded certificates dated %y' "${directory}" + fi + done + }; export directory="$1" authority="$2" filename="$3"; export -f monitor; exec -a "$0" bash -ceu monitor + - pgbackrest-config + - /etc/pgbackrest/server + - /etc/pgbackrest/conf.d/~postgres-operator/tls-ca.crt + - /etc/pgbackrest/conf.d/~postgres-operator_server.conf + name: pgbackrest-config + resources: {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /etc/pgbackrest/server + name: pgbackrest-server + readOnly: true + - mountPath: /etc/pgbackrest/conf.d + name: pgbackrest-config + readOnly: true + `)) }) - } + }) + } func TestPodsToKeep(t *testing.T) { for _, test := range []struct { name string - instances []v1.Pod + instances []corev1.Pod want map[string]int - checks func(*testing.T, []v1.Pod) + checks func(*testing.T, []corev1.Pod) }{ { name: "RemoveSetWithMasterOnly", - instances: []v1.Pod{ + instances: []corev1.Pod{ { ObjectMeta: metav1.ObjectMeta{ Name: "daisy-asdf", @@ -474,12 +979,12 @@ func TestPodsToKeep(t *testing.T) { }, }, want: map[string]int{}, - checks: func(t *testing.T, p []v1.Pod) { + checks: func(t *testing.T, p []corev1.Pod) { assert.Equal(t, len(p), 0) }, }, { name: "RemoveSetWithReplicaOnly", - instances: []v1.Pod{ + instances: []corev1.Pod{ { ObjectMeta: metav1.ObjectMeta{ Name: "daisy-asdf", @@ -491,12 +996,12 @@ func TestPodsToKeep(t *testing.T) { }, }, want: map[string]int{}, - checks: func(t *testing.T, p []v1.Pod) { + checks: func(t *testing.T, p []corev1.Pod) { assert.Equal(t, len(p), 0) }, }, { name: "KeepMasterOnly", - instances: []v1.Pod{ + instances: []corev1.Pod{ { ObjectMeta: metav1.ObjectMeta{ Name: "daisy-asdf", @@ -510,12 +1015,12 @@ func TestPodsToKeep(t *testing.T) { want: map[string]int{ "daisy": 1, }, - checks: func(t *testing.T, p []v1.Pod) { + checks: func(t *testing.T, p []corev1.Pod) { assert.Equal(t, len(p), 1) }, }, { name: "KeepNoRoleLabels", - instances: []v1.Pod{ + instances: []corev1.Pod{ { ObjectMeta: metav1.ObjectMeta{ Name: "daisy-asdf", @@ -528,12 +1033,12 @@ func TestPodsToKeep(t *testing.T) { want: map[string]int{ "daisy": 1, }, - checks: func(t *testing.T, p []v1.Pod) { + checks: func(t *testing.T, p []corev1.Pod) { assert.Equal(t, len(p), 1) }, }, { name: "RemoveSetWithNoRoleLabels", - instances: []v1.Pod{ + instances: []corev1.Pod{ { ObjectMeta: metav1.ObjectMeta{ Name: "daisy-asdf", @@ -544,12 +1049,12 @@ func TestPodsToKeep(t *testing.T) { }, }, want: map[string]int{}, - checks: func(t *testing.T, p []v1.Pod) { + checks: func(t *testing.T, p []corev1.Pod) { assert.Equal(t, len(p), 0) }, }, { name: "KeepUnknownRoleLabel", - instances: []v1.Pod{ + instances: []corev1.Pod{ { ObjectMeta: metav1.ObjectMeta{ Name: "daisy-asdf", @@ -563,12 +1068,12 @@ func TestPodsToKeep(t *testing.T) { want: map[string]int{ "daisy": 1, }, - checks: func(t *testing.T, p []v1.Pod) { + checks: func(t *testing.T, p []corev1.Pod) { assert.Equal(t, len(p), 1) }, }, { name: "RemoveSetWithUnknownRoleLabel", - instances: []v1.Pod{ + instances: []corev1.Pod{ { ObjectMeta: metav1.ObjectMeta{ Name: "daisy-asdf", @@ -580,12 +1085,12 @@ func TestPodsToKeep(t *testing.T) { }, }, want: map[string]int{}, - checks: func(t *testing.T, p []v1.Pod) { + checks: func(t *testing.T, p []corev1.Pod) { assert.Equal(t, len(p), 0) }, }, { name: "MasterLastInSet", - instances: []v1.Pod{ + instances: []corev1.Pod{ { ObjectMeta: metav1.ObjectMeta{ Name: "daisy-asdf", @@ -608,13 +1113,13 @@ func TestPodsToKeep(t *testing.T) { want: map[string]int{ "daisy": 1, }, - checks: func(t *testing.T, p []v1.Pod) { + checks: func(t *testing.T, p []corev1.Pod) { assert.Equal(t, len(p), 1) assert.Equal(t, p[0].Labels[naming.LabelRole], "master") }, }, { name: "ScaleDownSetWithMaster", - instances: []v1.Pod{ + instances: []corev1.Pod{ { ObjectMeta: metav1.ObjectMeta{ Name: "max-asdf", @@ -656,7 +1161,7 @@ func TestPodsToKeep(t *testing.T) { "max": 1, "daisy": 1, }, - checks: func(t *testing.T, p []v1.Pod) { + checks: func(t *testing.T, p []corev1.Pod) { assert.Equal(t, len(p), 2) assert.Equal(t, p[0].Labels[naming.LabelRole], "master") assert.Equal(t, p[0].Labels[naming.LabelInstanceSet], "daisy") @@ -665,7 +1170,7 @@ func TestPodsToKeep(t *testing.T) { }, }, { name: "ScaleDownSetWithoutMaster", - instances: []v1.Pod{ + instances: []corev1.Pod{ { ObjectMeta: metav1.ObjectMeta{ Name: "max-asdf", @@ -707,7 +1212,7 @@ func TestPodsToKeep(t *testing.T) { "max": 1, "daisy": 2, }, - checks: func(t *testing.T, p []v1.Pod) { + checks: func(t *testing.T, p []corev1.Pod) { assert.Equal(t, len(p), 3) assert.Equal(t, p[0].Labels[naming.LabelRole], "master") assert.Equal(t, p[0].Labels[naming.LabelInstanceSet], "max") @@ -718,7 +1223,7 @@ func TestPodsToKeep(t *testing.T) { }, }, { name: "ScaleMasterSetToZero", - instances: []v1.Pod{ + instances: []corev1.Pod{ { ObjectMeta: metav1.ObjectMeta{ Name: "max-asdf", @@ -751,7 +1256,7 @@ func TestPodsToKeep(t *testing.T) { "max": 0, "daisy": 2, }, - checks: func(t *testing.T, p []v1.Pod) { + checks: func(t *testing.T, p []corev1.Pod) { assert.Equal(t, len(p), 2) assert.Equal(t, p[0].Labels[naming.LabelRole], "replica") assert.Equal(t, p[0].Labels[naming.LabelInstanceSet], "daisy") @@ -760,7 +1265,7 @@ func TestPodsToKeep(t *testing.T) { }, }, { name: "RemoveMasterInstanceSet", - instances: []v1.Pod{ + instances: []corev1.Pod{ { ObjectMeta: metav1.ObjectMeta{ Name: "max-asdf", @@ -801,7 +1306,7 @@ func TestPodsToKeep(t *testing.T) { want: map[string]int{ "daisy": 3, }, - checks: func(t *testing.T, p []v1.Pod) { + checks: func(t *testing.T, p []corev1.Pod) { assert.Equal(t, len(p), 3) assert.Equal(t, p[0].Labels[naming.LabelRole], "replica") assert.Equal(t, p[0].Labels[naming.LabelInstanceSet], "daisy") @@ -823,29 +1328,24 @@ func TestPodsToKeep(t *testing.T) { } func TestDeleteInstance(t *testing.T) { - env, cc, config := setupTestEnv(t, ControllerName) - t.Cleanup(func() { teardownTestEnv(t, env) }) - - reconciler := &Reconciler{} - ctx, cancel := setupManager(t, config, func(mgr manager.Manager) { - reconciler = &Reconciler{ - Client: cc, - Owner: client.FieldOwner(t.Name()), - Recorder: mgr.GetEventRecorderFor(ControllerName), - Tracer: otel.Tracer(t.Name()), - } - }) - t.Cleanup(func() { teardownManager(cancel, t) }) + if strings.EqualFold(os.Getenv("USE_EXISTING_CLUSTER"), "true") { + t.Skip("FLAKE: other controllers (PVC, STS) update objects causing conflicts when we deleteControlled") + } - ns := &v1.Namespace{} - ns.GenerateName = "postgres-operator-test-" - ns.Labels = map[string]string{"postgres-operator-test": t.Name()} - assert.NilError(t, reconciler.Client.Create(ctx, ns)) - t.Cleanup(func() { assert.Check(t, reconciler.Client.Delete(ctx, ns)) }) + ctx := context.Background() + _, cc := setupKubernetes(t) + require.ParallelCapacity(t, 1) + + reconciler := &Reconciler{ + Client: cc, + Owner: client.FieldOwner(t.Name()), + Recorder: new(record.FakeRecorder), + Tracer: otel.Tracer(t.Name()), + } // Define, Create, and Reconcile a cluster to get an instance running in kube cluster := testCluster() - cluster.Namespace = ns.Name + cluster.Namespace = setupNamespace(t, cc).Name assert.NilError(t, errors.WithStack(reconciler.Client.Create(ctx, cluster))) t.Cleanup(func() { @@ -893,8 +1393,9 @@ func TestDeleteInstance(t *testing.T) { for _, gvk := range gvks { t.Run(gvk.Kind, func(t *testing.T) { - uList := &unstructured.UnstructuredList{} - err := wait.Poll(time.Second*3, Scale(time.Second*30), func() (bool, error) { + ctx := context.Background() + err := wait.PollUntilContextTimeout(ctx, time.Second*3, Scale(time.Second*30), false, func(ctx context.Context) (bool, error) { + uList := &unstructured.UnstructuredList{} uList.SetGroupVersionKind(gvk) assert.NilError(t, errors.WithStack(reconciler.Client.List(ctx, uList, client.InNamespace(cluster.Namespace), @@ -969,12 +1470,22 @@ func TestGenerateInstanceStatefulSetIntent(t *testing.T) { name: "custom tolerations", ip: intentParams{ spec: &v1beta1.PostgresInstanceSetSpec{ - Tolerations: []v1.Toleration{}, + Tolerations: []corev1.Toleration{}, }, }, run: func(t *testing.T, ss *appsv1.StatefulSet) { assert.Assert(t, ss.Spec.Template.Spec.Tolerations != nil) }, + }, { + name: "custom topology spread constraints", + ip: intentParams{ + spec: &v1beta1.PostgresInstanceSetSpec{ + TopologySpreadConstraints: []corev1.TopologySpreadConstraint{}, + }, + }, + run: func(t *testing.T, ss *appsv1.StatefulSet) { + assert.Assert(t, ss.Spec.Template.Spec.TopologySpreadConstraints != nil) + }, }, { name: "shutdown replica", ip: intentParams{ @@ -1039,7 +1550,167 @@ func TestGenerateInstanceStatefulSetIntent(t *testing.T) { assert.Equal(t, ss.Spec.Template.Spec.ImagePullSecrets[0].Name, "myImagePullSecret") }, + }, { + name: "check pod priority", + ip: intentParams{ + spec: &v1beta1.PostgresInstanceSetSpec{ + PriorityClassName: initialize.String("some-priority-class"), + }, + }, + run: func(t *testing.T, ss *appsv1.StatefulSet) { + assert.Equal(t, ss.Spec.Template.Spec.PriorityClassName, + "some-priority-class") + }, + }, { + name: "check default scheduling constraints are added", + run: func(t *testing.T, ss *appsv1.StatefulSet) { + assert.Equal(t, len(ss.Spec.Template.Spec.TopologySpreadConstraints), 2) + assert.Assert(t, cmp.MarshalMatches(ss.Spec.Template.Spec.TopologySpreadConstraints, ` +- labelSelector: + matchExpressions: + - key: postgres-operator.crunchydata.com/data + operator: In + values: + - postgres + - pgbackrest + matchLabels: + postgres-operator.crunchydata.com/cluster: hippo + maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway +- labelSelector: + matchExpressions: + - key: postgres-operator.crunchydata.com/data + operator: In + values: + - postgres + - pgbackrest + matchLabels: + postgres-operator.crunchydata.com/cluster: hippo + maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway + `)) + }, + }, { + name: "check default scheduling constraints are appended to existing", + ip: intentParams{ + spec: &v1beta1.PostgresInstanceSetSpec{ + Name: "instance1", + TopologySpreadConstraints: []corev1.TopologySpreadConstraint{{ + MaxSkew: int32(1), + TopologyKey: "kubernetes.io/hostname", + WhenUnsatisfiable: corev1.ScheduleAnyway, + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + {Key: naming.LabelCluster, Operator: "In", Values: []string{"somename"}}, + {Key: naming.LabelData, Operator: "Exists"}, + }, + }, + }}, + }, + }, + run: func(t *testing.T, ss *appsv1.StatefulSet) { + assert.Equal(t, len(ss.Spec.Template.Spec.TopologySpreadConstraints), 3) + assert.Assert(t, cmp.MarshalMatches(ss.Spec.Template.Spec.TopologySpreadConstraints, ` +- labelSelector: + matchExpressions: + - key: postgres-operator.crunchydata.com/cluster + operator: In + values: + - somename + - key: postgres-operator.crunchydata.com/data + operator: Exists + maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway +- labelSelector: + matchExpressions: + - key: postgres-operator.crunchydata.com/data + operator: In + values: + - postgres + - pgbackrest + matchLabels: + postgres-operator.crunchydata.com/cluster: hippo + maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway +- labelSelector: + matchExpressions: + - key: postgres-operator.crunchydata.com/data + operator: In + values: + - postgres + - pgbackrest + matchLabels: + postgres-operator.crunchydata.com/cluster: hippo + maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway + `)) + }, + }, { + name: "check defined constraint when defaults disabled", + ip: intentParams{ + cluster: &v1beta1.PostgresCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "hippo", + }, + Spec: v1beta1.PostgresClusterSpec{ + PostgresVersion: 13, + DisableDefaultPodScheduling: initialize.Bool(true), + InstanceSets: []v1beta1.PostgresInstanceSetSpec{{ + Name: "instance1", + Replicas: initialize.Int32(1), + DataVolumeClaimSpec: testVolumeClaimSpec(), + TopologySpreadConstraints: []corev1.TopologySpreadConstraint{{ + MaxSkew: int32(1), + TopologyKey: "kubernetes.io/hostname", + WhenUnsatisfiable: corev1.ScheduleAnyway, + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + {Key: naming.LabelCluster, Operator: "In", Values: []string{"somename"}}, + {Key: naming.LabelData, Operator: "Exists"}, + }, + }, + }}, + }}, + }, + }, + spec: &v1beta1.PostgresInstanceSetSpec{ + Name: "instance1", + TopologySpreadConstraints: []corev1.TopologySpreadConstraint{{ + MaxSkew: int32(1), + TopologyKey: "kubernetes.io/hostname", + WhenUnsatisfiable: corev1.ScheduleAnyway, + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + {Key: naming.LabelCluster, Operator: "In", Values: []string{"somename"}}, + {Key: naming.LabelData, Operator: "Exists"}, + }, + }, + }}, + }, + }, + run: func(t *testing.T, ss *appsv1.StatefulSet) { + assert.Equal(t, len(ss.Spec.Template.Spec.TopologySpreadConstraints), 1) + assert.Assert(t, cmp.MarshalMatches(ss.Spec.Template.Spec.TopologySpreadConstraints, + `- labelSelector: + matchExpressions: + - key: postgres-operator.crunchydata.com/cluster + operator: In + values: + - somename + - key: postgres-operator.crunchydata.com/data + operator: Exists + maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway +`)) + }, }} { + test := test t.Run(test.name, func(t *testing.T) { cluster := test.ip.cluster @@ -1075,6 +1746,10 @@ func TestGenerateInstanceStatefulSetIntent(t *testing.T) { ) test.run(t, sts) + + if assert.Check(t, sts.Spec.Template.Spec.EnableServiceLinks != nil) { + assert.Equal(t, *sts.Spec.Template.Spec.EnableServiceLinks, false) + } }) } } @@ -1084,7 +1759,7 @@ func TestFindAvailableInstanceNames(t *testing.T) { testCases := []struct { set v1beta1.PostgresInstanceSetSpec fakeObservedInstances *observedInstances - fakeClusterVolumes []v1.PersistentVolumeClaim + fakeClusterVolumes []corev1.PersistentVolumeClaim expectedInstanceNames []string }{{ set: v1beta1.PostgresInstanceSetSpec{Name: "instance1"}, @@ -1093,9 +1768,9 @@ func TestFindAvailableInstanceNames(t *testing.T) { InstanceSets: []v1beta1.PostgresInstanceSetSpec{{}}, }}, []appsv1.StatefulSet{{}}, - []v1.Pod{}, + []corev1.Pod{}, ), - fakeClusterVolumes: []v1.PersistentVolumeClaim{{}}, + fakeClusterVolumes: []corev1.PersistentVolumeClaim{{}}, expectedInstanceNames: []string{}, }, { set: v1beta1.PostgresInstanceSetSpec{Name: "instance1"}, @@ -1107,9 +1782,9 @@ func TestFindAvailableInstanceNames(t *testing.T) { Name: "instance1-abc", Labels: map[string]string{ naming.LabelInstanceSet: "instance1"}}}}, - []v1.Pod{}, + []corev1.Pod{}, ), - fakeClusterVolumes: []v1.PersistentVolumeClaim{{ObjectMeta: metav1.ObjectMeta{ + fakeClusterVolumes: []corev1.PersistentVolumeClaim{{ObjectMeta: metav1.ObjectMeta{ Name: "instance1-abc-def", Labels: map[string]string{ naming.LabelRole: naming.RolePostgresData, @@ -1126,9 +1801,9 @@ func TestFindAvailableInstanceNames(t *testing.T) { Name: "instance1-abc", Labels: map[string]string{ naming.LabelInstanceSet: "instance1"}}}}, - []v1.Pod{}, + []corev1.Pod{}, ), - fakeClusterVolumes: []v1.PersistentVolumeClaim{}, + fakeClusterVolumes: []corev1.PersistentVolumeClaim{}, expectedInstanceNames: []string{}, }, { set: v1beta1.PostgresInstanceSetSpec{Name: "instance1"}, @@ -1140,9 +1815,9 @@ func TestFindAvailableInstanceNames(t *testing.T) { Name: "instance1-abc", Labels: map[string]string{ naming.LabelInstanceSet: "instance1"}}}}, - []v1.Pod{}, + []corev1.Pod{}, ), - fakeClusterVolumes: []v1.PersistentVolumeClaim{ + fakeClusterVolumes: []corev1.PersistentVolumeClaim{ {ObjectMeta: metav1.ObjectMeta{ Name: "instance1-abc-def", Labels: map[string]string{ @@ -1167,9 +1842,9 @@ func TestFindAvailableInstanceNames(t *testing.T) { Name: "instance1-abc", Labels: map[string]string{ naming.LabelInstanceSet: "instance1"}}}}, - []v1.Pod{}, + []corev1.Pod{}, ), - fakeClusterVolumes: []v1.PersistentVolumeClaim{{ObjectMeta: metav1.ObjectMeta{ + fakeClusterVolumes: []corev1.PersistentVolumeClaim{{ObjectMeta: metav1.ObjectMeta{ Name: "instance1-abc-def", Labels: map[string]string{ naming.LabelRole: naming.RolePostgresData, @@ -1178,7 +1853,7 @@ func TestFindAvailableInstanceNames(t *testing.T) { expectedInstanceNames: []string{"instance1-def"}, }, { set: v1beta1.PostgresInstanceSetSpec{Name: "instance1", - WALVolumeClaimSpec: &v1.PersistentVolumeClaimSpec{}}, + WALVolumeClaimSpec: &corev1.PersistentVolumeClaimSpec{}}, fakeObservedInstances: newObservedInstances( &v1beta1.PostgresCluster{Spec: v1beta1.PostgresClusterSpec{ InstanceSets: []v1beta1.PostgresInstanceSetSpec{{Name: "instance1"}}, @@ -1187,9 +1862,9 @@ func TestFindAvailableInstanceNames(t *testing.T) { Name: "instance1-abc", Labels: map[string]string{ naming.LabelInstanceSet: "instance1"}}}}, - []v1.Pod{}, + []corev1.Pod{}, ), - fakeClusterVolumes: []v1.PersistentVolumeClaim{ + fakeClusterVolumes: []corev1.PersistentVolumeClaim{ {ObjectMeta: metav1.ObjectMeta{ Name: "instance1-abc-def", Labels: map[string]string{ @@ -1205,15 +1880,15 @@ func TestFindAvailableInstanceNames(t *testing.T) { expectedInstanceNames: []string{}, }, { set: v1beta1.PostgresInstanceSetSpec{Name: "instance1", - WALVolumeClaimSpec: &v1.PersistentVolumeClaimSpec{}}, + WALVolumeClaimSpec: &corev1.PersistentVolumeClaimSpec{}}, fakeObservedInstances: newObservedInstances( &v1beta1.PostgresCluster{Spec: v1beta1.PostgresClusterSpec{ InstanceSets: []v1beta1.PostgresInstanceSetSpec{{Name: "instance1"}}, }}, []appsv1.StatefulSet{}, - []v1.Pod{}, + []corev1.Pod{}, ), - fakeClusterVolumes: []v1.PersistentVolumeClaim{ + fakeClusterVolumes: []corev1.PersistentVolumeClaim{ {ObjectMeta: metav1.ObjectMeta{ Name: "instance1-def-ghi", Labels: map[string]string{ @@ -1229,15 +1904,15 @@ func TestFindAvailableInstanceNames(t *testing.T) { expectedInstanceNames: []string{"instance1-def"}, }, { set: v1beta1.PostgresInstanceSetSpec{Name: "instance1", - WALVolumeClaimSpec: &v1.PersistentVolumeClaimSpec{}}, + WALVolumeClaimSpec: &corev1.PersistentVolumeClaimSpec{}}, fakeObservedInstances: newObservedInstances( &v1beta1.PostgresCluster{Spec: v1beta1.PostgresClusterSpec{ InstanceSets: []v1beta1.PostgresInstanceSetSpec{{Name: "instance1"}}, }}, []appsv1.StatefulSet{}, - []v1.Pod{}, + []corev1.Pod{}, ), - fakeClusterVolumes: []v1.PersistentVolumeClaim{{ObjectMeta: metav1.ObjectMeta{ + fakeClusterVolumes: []corev1.PersistentVolumeClaim{{ObjectMeta: metav1.ObjectMeta{ Name: "instance1-def-ghi", Labels: map[string]string{ naming.LabelRole: naming.RolePostgresData, @@ -1260,3 +1935,218 @@ func TestFindAvailableInstanceNames(t *testing.T) { }) } } + +func TestReconcileInstanceSetPodDisruptionBudget(t *testing.T) { + ctx := context.Background() + _, cc := setupKubernetes(t) + require.ParallelCapacity(t, 1) + + r := &Reconciler{ + Client: cc, + Owner: client.FieldOwner(t.Name()), + } + + foundPDB := func( + cluster *v1beta1.PostgresCluster, + spec *v1beta1.PostgresInstanceSetSpec, + ) bool { + got := &policyv1.PodDisruptionBudget{} + err := r.Client.Get(ctx, + naming.AsObjectKey(naming.InstanceSet(cluster, spec)), + got) + return !apierrors.IsNotFound(err) + + } + + ns := setupNamespace(t, cc) + + t.Run("empty", func(t *testing.T) { + cluster := &v1beta1.PostgresCluster{} + spec := &v1beta1.PostgresInstanceSetSpec{} + + assert.Error(t, r.reconcileInstanceSetPodDisruptionBudget(ctx, cluster, spec), + "Replicas should be defined") + }) + + t.Run("not created", func(t *testing.T) { + cluster := testCluster() + cluster.Namespace = ns.Name + spec := &cluster.Spec.InstanceSets[0] + spec.MinAvailable = initialize.Pointer(intstr.FromInt32(0)) + assert.NilError(t, r.reconcileInstanceSetPodDisruptionBudget(ctx, cluster, spec)) + assert.Assert(t, !foundPDB(cluster, spec)) + }) + + t.Run("int created", func(t *testing.T) { + cluster := testCluster() + cluster.Namespace = ns.Name + spec := &cluster.Spec.InstanceSets[0] + spec.MinAvailable = initialize.Pointer(intstr.FromInt32(1)) + + assert.NilError(t, r.Client.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + + assert.NilError(t, r.reconcileInstanceSetPodDisruptionBudget(ctx, cluster, spec)) + assert.Assert(t, foundPDB(cluster, spec)) + + t.Run("deleted", func(t *testing.T) { + spec.MinAvailable = initialize.Pointer(intstr.FromInt32(0)) + err := r.reconcileInstanceSetPodDisruptionBudget(ctx, cluster, spec) + if apierrors.IsConflict(err) { + // When running in an existing environment another controller will sometimes update + // the object. This leads to an error where the ResourceVersion of the object does + // not match what we expect. When we run into this conflict, try to reconcile the + // object again. + err = r.reconcileInstanceSetPodDisruptionBudget(ctx, cluster, spec) + } + assert.NilError(t, err, errors.Unwrap(err)) + assert.Assert(t, !foundPDB(cluster, spec)) + }) + }) + + t.Run("str created", func(t *testing.T) { + cluster := testCluster() + cluster.Namespace = ns.Name + spec := &cluster.Spec.InstanceSets[0] + spec.MinAvailable = initialize.Pointer(intstr.FromString("50%")) + + assert.NilError(t, r.Client.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + + assert.NilError(t, r.reconcileInstanceSetPodDisruptionBudget(ctx, cluster, spec)) + assert.Assert(t, foundPDB(cluster, spec)) + + t.Run("deleted", func(t *testing.T) { + spec.MinAvailable = initialize.Pointer(intstr.FromString("0%")) + err := r.reconcileInstanceSetPodDisruptionBudget(ctx, cluster, spec) + if apierrors.IsConflict(err) { + // When running in an existing environment another controller will sometimes update + // the object. This leads to an error where the ResourceVersion of the object does + // not match what we expect. When we run into this conflict, try to reconcile the + // object again. + err = r.reconcileInstanceSetPodDisruptionBudget(ctx, cluster, spec) + } + assert.NilError(t, err, errors.Unwrap(err)) + assert.Assert(t, !foundPDB(cluster, spec)) + }) + + t.Run("delete with 00%", func(t *testing.T) { + spec.MinAvailable = initialize.Pointer(intstr.FromString("50%")) + + assert.NilError(t, r.reconcileInstanceSetPodDisruptionBudget(ctx, cluster, spec)) + assert.Assert(t, foundPDB(cluster, spec)) + + t.Run("deleted", func(t *testing.T) { + spec.MinAvailable = initialize.Pointer(intstr.FromString("00%")) + err := r.reconcileInstanceSetPodDisruptionBudget(ctx, cluster, spec) + if apierrors.IsConflict(err) { + // When running in an existing environment another controller will sometimes update + // the object. This leads to an error where the ResourceVersion of the object does + // not match what we expect. When we run into this conflict, try to reconcile the + // object again. + t.Log("conflict:", err) + err = r.reconcileInstanceSetPodDisruptionBudget(ctx, cluster, spec) + } + assert.NilError(t, err, "\n%#v", errors.Unwrap(err)) + assert.Assert(t, !foundPDB(cluster, spec)) + }) + }) + }) +} + +func TestCleanupDisruptionBudgets(t *testing.T) { + ctx := context.Background() + _, cc := setupKubernetes(t) + require.ParallelCapacity(t, 1) + + r := &Reconciler{ + Client: cc, + Owner: client.FieldOwner(t.Name()), + } + + ns := setupNamespace(t, cc) + + generatePDB := func( + t *testing.T, + cluster *v1beta1.PostgresCluster, + spec *v1beta1.PostgresInstanceSetSpec, + minAvailable *intstr.IntOrString, + ) *policyv1.PodDisruptionBudget { + meta := naming.InstanceSet(cluster, spec) + meta.Labels = map[string]string{ + naming.LabelCluster: cluster.Name, + naming.LabelInstanceSet: spec.Name, + } + pdb, err := r.generatePodDisruptionBudget( + cluster, + meta, + minAvailable, + naming.ClusterInstanceSet(cluster.Name, spec.Name), + ) + assert.NilError(t, err) + return pdb + } + + createPDB := func( + pdb *policyv1.PodDisruptionBudget, + ) error { + return r.Client.Create(ctx, pdb) + } + + foundPDB := func( + pdb *policyv1.PodDisruptionBudget, + ) bool { + return !apierrors.IsNotFound( + r.Client.Get(ctx, client.ObjectKeyFromObject(pdb), + &policyv1.PodDisruptionBudget{})) + } + + t.Run("pdbs not found", func(t *testing.T) { + cluster := testCluster() + assert.NilError(t, r.cleanupPodDisruptionBudgets(ctx, cluster)) + }) + + t.Run("pdbs found", func(t *testing.T) { + cluster := testCluster() + cluster.Namespace = ns.Name + spec := &cluster.Spec.InstanceSets[0] + spec.MinAvailable = initialize.Pointer(intstr.FromInt32(1)) + + assert.NilError(t, r.Client.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + + expectedPDB := generatePDB(t, cluster, spec, + initialize.Pointer(intstr.FromInt32(1))) + assert.NilError(t, createPDB(expectedPDB)) + + t.Run("no instances were removed", func(t *testing.T) { + assert.Assert(t, foundPDB(expectedPDB)) + assert.NilError(t, r.cleanupPodDisruptionBudgets(ctx, cluster)) + assert.Assert(t, foundPDB(expectedPDB)) + }) + + t.Run("cleanup leftover pdb", func(t *testing.T) { + leftoverPDB := generatePDB(t, cluster, &v1beta1.PostgresInstanceSetSpec{ + Name: "old-instance", + Replicas: initialize.Int32(1), + }, initialize.Pointer(intstr.FromInt32(1))) + assert.NilError(t, createPDB(leftoverPDB)) + + assert.Assert(t, foundPDB(expectedPDB)) + assert.Assert(t, foundPDB(leftoverPDB)) + err := r.cleanupPodDisruptionBudgets(ctx, cluster) + + // The disruption controller updates the status of a PDB any time a + // related Pod changes. When this happens, the resourceVersion of + // the PDB does not match what we expect and we get a conflict. Retry. + if apierrors.IsConflict(err) { + t.Log("conflict:", err) + err = r.cleanupPodDisruptionBudgets(ctx, cluster) + } + + assert.NilError(t, err, "\n%#v", errors.Unwrap(err)) + assert.Assert(t, foundPDB(expectedPDB)) + assert.Assert(t, !foundPDB(leftoverPDB)) + }) + }) +} diff --git a/internal/controller/postgrescluster/patroni.go b/internal/controller/postgrescluster/patroni.go index e90df310f5..1c5ac93eed 100644 --- a/internal/controller/postgrescluster/patroni.go +++ b/internal/controller/postgrescluster/patroni.go @@ -1,34 +1,22 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster import ( "context" + "fmt" "io" "time" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/yaml" + "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/logging" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/internal/patroni" @@ -37,7 +25,7 @@ import ( "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) -// +kubebuilder:rbac:groups="",resources=endpoints,verbs=deletecollection +// +kubebuilder:rbac:groups="",resources="endpoints",verbs={deletecollection} func (r *Reconciler) deletePatroniArtifacts( ctx context.Context, cluster *v1beta1.PostgresCluster, @@ -49,7 +37,7 @@ func (r *Reconciler) deletePatroniArtifacts( selector, err := naming.AsSelector(naming.ClusterPatronis(cluster)) if err == nil { err = errors.WithStack( - r.Client.DeleteAllOf(ctx, &v1.Endpoints{}, + r.Client.DeleteAllOf(ctx, &corev1.Endpoints{}, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selector}, )) @@ -58,7 +46,87 @@ func (r *Reconciler) deletePatroniArtifacts( return err } -// +kubebuilder:rbac:groups="",resources=services,verbs=create;patch +func (r *Reconciler) handlePatroniRestarts( + ctx context.Context, cluster *v1beta1.PostgresCluster, instances *observedInstances, +) error { + const container = naming.ContainerDatabase + var primaryNeedsRestart, replicaNeedsRestart *Instance + + // Look for one primary and one replica that need to restart. Ignore + // containers that are terminating or not running; Kubernetes will start + // them again, and calls to their Patroni API will likely be interrupted anyway. + for _, instance := range instances.forCluster { + if len(instance.Pods) > 0 && patroni.PodRequiresRestart(instance.Pods[0]) { + if terminating, known := instance.IsTerminating(); terminating || !known { + continue + } + if running, known := instance.IsRunning(container); !running || !known { + continue + } + + if primary, _ := instance.IsPrimary(); primary { + primaryNeedsRestart = instance + } else { + replicaNeedsRestart = instance + } + if primaryNeedsRestart != nil && replicaNeedsRestart != nil { + break + } + } + } + + // When the primary instance needs to restart, restart it and return early. + // Some PostgreSQL settings must be changed on the primary before any + // progress can be made on the replicas, e.g. decreasing "max_connections". + // Another reconcile will trigger when an instance with pending restarts + // updates its status in DCS. See [Reconciler.watchPods]. + // + // NOTE: In Patroni v2.1.1, regardless of the PostgreSQL parameter, the + // primary indicates it needs to restart one "loop_wait" *after* the + // replicas indicate it. So, even though we consider the primary ahead of + // replicas here, replicas will typically restart first because we see them + // first. + if primaryNeedsRestart != nil { + exec := patroni.Executor(func( + ctx context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, + ) error { + pod := primaryNeedsRestart.Pods[0] + return r.PodExec(ctx, pod.Namespace, pod.Name, container, stdin, stdout, stderr, command...) + }) + + return errors.WithStack(exec.RestartPendingMembers(ctx, "master", naming.PatroniScope(cluster))) + } + + // When the primary does not need to restart but a replica does, restart all + // replicas that still need it. + // + // NOTE: This does not always clear the "needs restart" indicator on a replica. + // Patroni sets that when a parameter must be increased to match the minimum + // required of data on disk. When that happens, restarts occur (i.e. downtime) + // but the affected parameter cannot change until the replica has replayed + // the new minimum from the primary, e.g. decreasing "max_connections". + // - https://github.com/zalando/patroni/blob/v2.1.1/patroni/postgresql/config.py#L1069 + // + // TODO(cbandy): The above could interact badly with delayed replication. + // When we offer per-instance PostgreSQL configuration, we may need to revisit + // how we decide when to restart. + // - https://www.postgresql.org/docs/current/runtime-config-replication.html + if replicaNeedsRestart != nil { + exec := patroni.Executor(func( + ctx context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, + ) error { + pod := replicaNeedsRestart.Pods[0] + return r.PodExec(ctx, pod.Namespace, pod.Name, container, stdin, stdout, stderr, command...) + }) + + return errors.WithStack(exec.RestartPendingMembers(ctx, "replica", naming.PatroniScope(cluster))) + } + + // Nothing needs to restart. + return nil +} + +// +kubebuilder:rbac:groups="",resources="services",verbs={create,patch} // reconcilePatroniDistributedConfiguration sets labels and ownership on the // objects Patroni creates for its distributed configuration. @@ -72,8 +140,8 @@ func (r *Reconciler) reconcilePatroniDistributedConfiguration( // - https://releases.k8s.io/v1.16.0/pkg/controller/endpoint/endpoints_controller.go#L547 // - https://releases.k8s.io/v1.20.0/pkg/controller/endpoint/endpoints_controller.go#L580 // - https://github.com/zalando/patroni/blob/v2.0.1/patroni/dcs/kubernetes.py#L865-L881 - dcsService := &v1.Service{ObjectMeta: naming.PatroniDistributedConfiguration(cluster)} - dcsService.SetGroupVersionKind(v1.SchemeGroupVersion.WithKind("Service")) + dcsService := &corev1.Service{ObjectMeta: naming.PatroniDistributedConfiguration(cluster)} + dcsService.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("Service")) err := errors.WithStack(r.setControllerReference(cluster, dcsService)) @@ -88,7 +156,7 @@ func (r *Reconciler) reconcilePatroniDistributedConfiguration( // Allocate no IP address (headless) and create no Endpoints. // - https://docs.k8s.io/concepts/services-networking/service/#headless-services - dcsService.Spec.ClusterIP = v1.ClusterIPNone + dcsService.Spec.ClusterIP = corev1.ClusterIPNone dcsService.Spec.Selector = nil if err == nil { @@ -101,7 +169,7 @@ func (r *Reconciler) reconcilePatroniDistributedConfiguration( return err } -// +kubebuilder:rbac:resources=pods,verbs=get;list +// +kubebuilder:rbac:resources="pods",verbs={get,list} func (r *Reconciler) reconcilePatroniDynamicConfiguration( ctx context.Context, cluster *v1beta1.PostgresCluster, instances *observedInstances, @@ -113,7 +181,7 @@ func (r *Reconciler) reconcilePatroniDynamicConfiguration( return nil } - var pod *v1.Pod + var pod *corev1.Pod for _, instance := range instances.forCluster { if terminating, known := instance.IsTerminating(); !terminating && known { running, known := instance.IsRunning(naming.ContainerDatabase) @@ -132,17 +200,14 @@ func (r *Reconciler) reconcilePatroniDynamicConfiguration( // NOTE(cbandy): Despite the guards above, calling PodExec may still fail // due to a missing or stopped container. - exec := func(_ context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string) error { - return r.PodExec(pod.Namespace, pod.Name, naming.ContainerDatabase, stdin, stdout, stderr, command...) + exec := func(ctx context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string) error { + return r.PodExec(ctx, pod.Namespace, pod.Name, naming.ContainerDatabase, stdin, stdout, stderr, command...) } - // Deserialize the schemaless field. There will be no error because the - // Kubernetes API has already ensured it is a JSON object. - configuration := make(map[string]interface{}) - _ = yaml.Unmarshal( - cluster.Spec.Patroni.DynamicConfiguration.Raw, &configuration, - ) - + var configuration map[string]any + if cluster.Spec.Patroni != nil { + configuration = cluster.Spec.Patroni.DynamicConfiguration + } configuration = patroni.DynamicConfiguration(cluster, configuration, pgHBAs, pgParameters) return errors.WithStack( @@ -160,7 +225,17 @@ func (r *Reconciler) generatePatroniLeaderLeaseService( service.Annotations = naming.Merge( cluster.Spec.Metadata.GetAnnotationsOrNil()) service.Labels = naming.Merge( - cluster.Spec.Metadata.GetLabelsOrNil(), + cluster.Spec.Metadata.GetLabelsOrNil()) + + if spec := cluster.Spec.Service; spec != nil { + service.Annotations = naming.Merge(service.Annotations, + spec.Metadata.GetAnnotationsOrNil()) + service.Labels = naming.Merge(service.Labels, + spec.Metadata.GetLabelsOrNil()) + } + + // add our labels last so they aren't overwritten + service.Labels = naming.Merge(service.Labels, map[string]string{ naming.LabelCluster: cluster.Name, naming.LabelPatroni: naming.PatroniScope(cluster), @@ -170,21 +245,39 @@ func (r *Reconciler) generatePatroniLeaderLeaseService( // Patroni will ensure that they always route to the elected leader. // - https://docs.k8s.io/concepts/services-networking/service/#services-without-selectors service.Spec.Selector = nil - if cluster.Spec.Service != nil { - service.Spec.Type = corev1.ServiceType(cluster.Spec.Service.Type) - } else { - service.Spec.Type = corev1.ServiceTypeClusterIP - } // The TargetPort must be the name (not the number) of the PostgreSQL // ContainerPort. This name allows the port number to differ between // instances, which can happen during a rolling update. - service.Spec.Ports = []corev1.ServicePort{{ + servicePort := corev1.ServicePort{ Name: naming.PortPostgreSQL, Port: *cluster.Spec.Port, Protocol: corev1.ProtocolTCP, TargetPort: intstr.FromString(naming.PortPostgreSQL), - }} + } + + if spec := cluster.Spec.Service; spec == nil { + service.Spec.Type = corev1.ServiceTypeClusterIP + } else { + service.Spec.Type = corev1.ServiceType(spec.Type) + if spec.NodePort != nil { + if service.Spec.Type == corev1.ServiceTypeClusterIP { + // The NodePort can only be set when the Service type is NodePort or + // LoadBalancer. However, due to a known issue prior to Kubernetes + // 1.20, we clear these errors during our apply. To preserve the + // appropriate behavior, we log an Event and return an error. + // TODO(tjmoore4): Once Validation Rules are available, this check + // and event could potentially be removed in favor of that validation + r.Recorder.Eventf(cluster, corev1.EventTypeWarning, "MisconfiguredClusterIP", + "NodePort cannot be set with type ClusterIP on Service %q", service.Name) + return nil, fmt.Errorf("NodePort cannot be set with type ClusterIP on Service %q", service.Name) + } + servicePort.NodePort = *spec.NodePort + } + service.Spec.ExternalTrafficPolicy = initialize.FromPointer(spec.ExternalTrafficPolicy) + service.Spec.InternalTrafficPolicy = spec.InternalTrafficPolicy + } + service.Spec.Ports = []corev1.ServicePort{servicePort} err := errors.WithStack(r.setControllerReference(cluster, service)) return service, err @@ -209,14 +302,14 @@ func (r *Reconciler) reconcilePatroniLeaderLease( return service, err } -// +kubebuilder:rbac:groups="",resources=endpoints,verbs=get +// +kubebuilder:rbac:groups="",resources="endpoints",verbs={get} // reconcilePatroniStatus populates cluster.Status.Patroni with observations. func (r *Reconciler) reconcilePatroniStatus( ctx context.Context, cluster *v1beta1.PostgresCluster, observedInstances *observedInstances, -) (reconcile.Result, error) { - result := reconcile.Result{} +) (time.Duration, error) { + var requeue time.Duration log := logging.FromContext(ctx) var readyInstance bool @@ -226,16 +319,14 @@ func (r *Reconciler) reconcilePatroniStatus( } } - dcs := &v1.Endpoints{ObjectMeta: naming.PatroniDistributedConfiguration(cluster)} + dcs := &corev1.Endpoints{ObjectMeta: naming.PatroniDistributedConfiguration(cluster)} err := errors.WithStack(client.IgnoreNotFound( r.Client.Get(ctx, client.ObjectKeyFromObject(dcs), dcs))) if err == nil { if dcs.Annotations["initialize"] != "" { // After bootstrap, Patroni writes the cluster system identifier to DCS. - cluster.Status.Patroni = &v1beta1.PatroniStatus{ - SystemIdentifier: dcs.Annotations["initialize"], - } + cluster.Status.Patroni.SystemIdentifier = dcs.Annotations["initialize"] } else if readyInstance { // While we typically expect a value for the initialize key to be present in the // Endpoints above by the time the StatefulSet for any instance indicates "ready" @@ -245,12 +336,11 @@ func (r *Reconciler) reconcilePatroniStatus( // is detected in the cluster we assume this is the case, and simply log a message and // requeue in order to try again until the expected value is found. log.Info("detected ready instance but no initialize value") - result.RequeueAfter = 1 * time.Second - return result, nil + requeue = time.Second } } - return result, err + return requeue, err } // reconcileReplicationSecret creates a secret containing the TLS @@ -260,47 +350,41 @@ func (r *Reconciler) reconcilePatroniStatus( // account and enable cert authentication for that user func (r *Reconciler) reconcileReplicationSecret( ctx context.Context, cluster *v1beta1.PostgresCluster, - rootCACert *pki.RootCertificateAuthority, -) (*v1.Secret, error) { + root *pki.RootCertificateAuthority, +) (*corev1.Secret, error) { // if a custom postgrescluster secret is provided, just return it if cluster.Spec.CustomReplicationClientTLSSecret != nil { - custom := &v1.Secret{ObjectMeta: metav1.ObjectMeta{ + custom := &corev1.Secret{ObjectMeta: metav1.ObjectMeta{ Name: cluster.Spec.CustomReplicationClientTLSSecret.Name, Namespace: cluster.Namespace, }} err := errors.WithStack(r.Client.Get(ctx, client.ObjectKeyFromObject(custom), custom)) - if err == nil { - return custom, err - } - return nil, err + return custom, err } - existing := &v1.Secret{ObjectMeta: naming.ReplicationClientCertSecret(cluster)} + existing := &corev1.Secret{ObjectMeta: naming.ReplicationClientCertSecret(cluster)} err := errors.WithStack(client.IgnoreNotFound( r.Client.Get(ctx, client.ObjectKeyFromObject(existing), existing))) - clientLeaf := pki.NewLeafCertificate("", nil, nil) - clientLeaf.DNSNames = []string{postgres.ReplicationUser} - clientLeaf.CommonName = clientLeaf.DNSNames[0] + leaf := &pki.LeafCertificate{} + commonName := postgres.ReplicationUser + dnsNames := []string{commonName} - if data, ok := existing.Data[naming.ReplicationCert]; err == nil && ok { - clientLeaf.Certificate, err = pki.ParseCertificate(data) - err = errors.WithStack(err) - } - if data, ok := existing.Data[naming.ReplicationPrivateKey]; err == nil && ok { - clientLeaf.PrivateKey, err = pki.ParsePrivateKey(data) - err = errors.WithStack(err) - } + if err == nil { + // Unmarshal and validate the stored leaf. These first errors can + // be ignored because they result in an invalid leaf which is then + // correctly regenerated. + _ = leaf.Certificate.UnmarshalText(existing.Data[naming.ReplicationCert]) + _ = leaf.PrivateKey.UnmarshalText(existing.Data[naming.ReplicationPrivateKey]) - // if there is an error or the client leaf certificate is bad, generate a new one - if err != nil || pki.LeafCertIsBad(ctx, clientLeaf, rootCACert, cluster.Namespace) { - err = errors.WithStack(clientLeaf.Generate(rootCACert)) + leaf, err = root.RegenerateLeafWhenNecessary(leaf, commonName, dnsNames) + err = errors.WithStack(err) } - intent := &v1.Secret{ObjectMeta: naming.ReplicationClientCertSecret(cluster)} - intent.SetGroupVersionKind(v1.SchemeGroupVersion.WithKind("Secret")) + intent := &corev1.Secret{ObjectMeta: naming.ReplicationClientCertSecret(cluster)} + intent.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("Secret")) intent.Data = make(map[string][]byte) // set labels and annotations @@ -317,34 +401,31 @@ func (r *Reconciler) reconcileReplicationSecret( return nil, err } if err == nil { - intent.Data[naming.ReplicationCert], err = clientLeaf.Certificate.MarshalText() + intent.Data[naming.ReplicationCert], err = leaf.Certificate.MarshalText() err = errors.WithStack(err) } if err == nil { - intent.Data[naming.ReplicationPrivateKey], err = clientLeaf.PrivateKey.MarshalText() + intent.Data[naming.ReplicationPrivateKey], err = leaf.PrivateKey.MarshalText() err = errors.WithStack(err) } if err == nil { - intent.Data[naming.ReplicationCACert], err = rootCACert.Certificate.MarshalText() + intent.Data[naming.ReplicationCACert], err = root.Certificate.MarshalText() err = errors.WithStack(err) } if err == nil { err = errors.WithStack(r.apply(ctx, intent)) } - if err == nil { - return intent, err - } - return nil, err + return intent, err } // replicationCertSecretProjection returns a secret projection of the postgrescluster's // client certificate and key to include in the instance configuration volume. -func replicationCertSecretProjection(certificate *v1.Secret) *v1.SecretProjection { - return &v1.SecretProjection{ - LocalObjectReference: v1.LocalObjectReference{ +func replicationCertSecretProjection(certificate *corev1.Secret) *corev1.SecretProjection { + return &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{ Name: certificate.Name, }, - Items: []v1.KeyToPath{ + Items: []corev1.KeyToPath{ { Key: naming.ReplicationCert, Path: naming.ReplicationCertPath, @@ -360,3 +441,164 @@ func replicationCertSecretProjection(certificate *v1.Secret) *v1.SecretProjectio }, } } + +func (r *Reconciler) reconcilePatroniSwitchover(ctx context.Context, + cluster *v1beta1.PostgresCluster, instances *observedInstances) error { + log := logging.FromContext(ctx) + + // If switchover is not enabled, clear out the Patroni switchover status fields + // which might have been set by previous switchovers. + // This also gives the user a way to easily recover and try again: if the operator + // runs into a problem with a switchover, turning `cluster.Spec.Patroni.Switchover` + // to `false` will clear the fields before another attempt + if cluster.Spec.Patroni == nil || + cluster.Spec.Patroni.Switchover == nil || + !cluster.Spec.Patroni.Switchover.Enabled { + cluster.Status.Patroni.Switchover = nil + cluster.Status.Patroni.SwitchoverTimeline = nil + return nil + } + + annotation := cluster.GetAnnotations()[naming.PatroniSwitchover] + spec := cluster.Spec.Patroni.Switchover + status := cluster.Status.Patroni.Switchover + + // If the status has been updated with the trigger annotation, the requested + // switchover has been successful, and the `SwitchoverTimeline` field can be cleared + if annotation == "" || (status != nil && *status == annotation) { + cluster.Status.Patroni.SwitchoverTimeline = nil + return nil + } + + // If we've reached this point, we assume a switchover request or in progress + // and need to make sure the prerequisites are met, e.g., more than one pod, + // a running instance to issue the switchover command to, etc. + if len(instances.forCluster) <= 1 { + // TODO: event + // TODO: Possible webhook validation + return errors.New("Need more than one instance to switchover") + } + + // TODO: Add webhook validation that requires a targetInstance when requesting failover + if spec.Type == v1beta1.PatroniSwitchoverTypeFailover { + if spec.TargetInstance == nil || *spec.TargetInstance == "" { + // TODO: event + return errors.New("TargetInstance required when running failover") + } + } + + // Determine if user is specifying a target instance. Validate the + // provided instance has been observed in the cluster. + var targetInstance *Instance + if spec.TargetInstance != nil && *spec.TargetInstance != "" { + for _, instance := range instances.forCluster { + if *spec.TargetInstance == instance.Name { + targetInstance = instance + } + } + if targetInstance == nil { + // TODO: event + return errors.New("TargetInstance was specified but not found in the cluster") + } + if len(targetInstance.Pods) != 1 { + // We expect that a target instance should have one associated pod. + return errors.Errorf( + "TargetInstance should have one pod. Pods (%d)", len(targetInstance.Pods)) + } + } else { + log.V(1).Info("TargetInstance not provided") + } + + // Find a running Pod that can be used to define a PodExec function. + var runningPod *corev1.Pod + for _, instance := range instances.forCluster { + if running, known := instance.IsRunning(naming.ContainerDatabase); running && + known && len(instance.Pods) == 1 { + + runningPod = instance.Pods[0] + break + } + } + if runningPod == nil { + return errors.New("Could not find a running pod when attempting switchover.") + } + exec := func(_ context.Context, stdin io.Reader, stdout, stderr io.Writer, + command ...string) error { + return r.PodExec(ctx, runningPod.Namespace, runningPod.Name, naming.ContainerDatabase, stdin, + stdout, stderr, command...) + } + + // To ensure idempotency, the operator verifies that the timeline reported by Patroni + // matches the timeline that was present when the switchover was first requested. + // TODO(benjaminjb): consider pulling the timeline from the pod annotation; manual experiments + // have shown that the annotation on the Leader pod is up to date during a switchover, but + // missing from the Replica pods. + timeline, err := patroni.Executor(exec).GetTimeline(ctx) + + if err != nil { + return err + } + + if timeline == 0 { + return errors.New("error getting and parsing current timeline") + } + + statusTimeline := cluster.Status.Patroni.SwitchoverTimeline + + // If the `SwitchoverTimeline` field is empty, this is the first reconcile after + // a switchover has been requested and we need to fill in the field with the current TL + // as reported by Patroni. + // We return from here without calling for an explicit requeue, but since we're updating + // the object, we will reconcile this again for the actual switchover/failover action. + if statusTimeline == nil || (statusTimeline != nil && *statusTimeline == 0) { + log.V(1).Info("Setting SwitchoverTimeline", "timeline", timeline) + cluster.Status.Patroni.SwitchoverTimeline = &timeline + return nil + } + + // If the `SwitchoverTimeline` field does not match the current timeline as reported by Patroni, + // then we assume a switchover has been completed, and we have reached this point because the + // cache does not yet have the updated `cluster.Status.Patroni.Switchover` field. + if statusTimeline != nil && *statusTimeline != timeline { + log.V(1).Info("SwitchoverTimeline does not match current timeline, assuming already completed switchover") + cluster.Status.Patroni.Switchover = initialize.String(annotation) + cluster.Status.Patroni.SwitchoverTimeline = nil + return nil + } + + // We have the pod executor, now we need to figure out which API call to use + // In the default case we will be using SwitchoverAndWait. This API call uses + // a Patronictl switchover to move to the target instance. + action := func(ctx context.Context, exec patroni.Executor, next string) (bool, error) { + success, err := exec.SwitchoverAndWait(ctx, next) + return success, errors.WithStack(err) + } + + if spec.Type == v1beta1.PatroniSwitchoverTypeFailover { + // When a failover has been requested we use FailoverAndWait to change the primary. + action = func(ctx context.Context, exec patroni.Executor, next string) (bool, error) { + success, err := exec.FailoverAndWait(ctx, next) + return success, errors.WithStack(err) + } + } + + // If target instance has not been provided, we will pass in an empty string to patronictl + nextPrimary := "" + if targetInstance != nil { + nextPrimary = targetInstance.Pods[0].Name + } + + success, err := action(ctx, exec, nextPrimary) + if err = errors.WithStack(err); err == nil && !success { + err = errors.New("unable to switchover") + } + + // If we've reached this point, a switchover has successfully been triggered + // and we set the status accordingly. + if err == nil { + cluster.Status.Patroni.Switchover = initialize.String(annotation) + cluster.Status.Patroni.SwitchoverTimeline = nil + } + + return err +} diff --git a/internal/controller/postgrescluster/patroni_test.go b/internal/controller/postgrescluster/patroni_test.go index 045fe6a1f3..b2a457685b 100644 --- a/internal/controller/postgrescluster/patroni_test.go +++ b/internal/controller/postgrescluster/patroni_test.go @@ -1,53 +1,44 @@ -// +build envtest +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - import ( "context" "fmt" + "io" "os" "strconv" "strings" "testing" "time" - "go.opentelemetry.io/otel" + "github.com/pkg/errors" "gotest.tools/v3/assert" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" - v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/internal/testing/cmp" + "github.com/crunchydata/postgres-operator/internal/testing/require" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) func TestGeneratePatroniLeaderLeaseService(t *testing.T) { - env, cc, _ := setupTestEnv(t, ControllerName) - t.Cleanup(func() { teardownTestEnv(t, env) }) + _, cc := setupKubernetes(t) + require.ParallelCapacity(t, 0) - reconciler := &Reconciler{Client: cc} + reconciler := &Reconciler{ + Client: cc, + Recorder: new(record.FakeRecorder), + } cluster := &v1beta1.PostgresCluster{} cluster.Namespace = "ns1" @@ -55,11 +46,11 @@ func TestGeneratePatroniLeaderLeaseService(t *testing.T) { cluster.Spec.Port = initialize.Int32(9876) alwaysExpect := func(t testing.TB, service *corev1.Service) { - assert.Assert(t, marshalMatches(service.TypeMeta, ` + assert.Assert(t, cmp.MarshalMatches(service.TypeMeta, ` apiVersion: v1 kind: Service `)) - assert.Assert(t, marshalMatches(service.ObjectMeta, ` + assert.Assert(t, cmp.MarshalMatches(service.ObjectMeta, ` creationTimestamp: null labels: postgres-operator.crunchydata.com/cluster: pg2 @@ -74,12 +65,6 @@ ownerReferences: name: pg2 uid: "" `)) - assert.Assert(t, marshalMatches(service.Spec.Ports, ` -- name: postgres - port: 9876 - protocol: TCP - targetPort: postgres - `)) // Always gets a ClusterIP (never None). assert.Equal(t, service.Spec.ClusterIP, "") @@ -91,9 +76,14 @@ ownerReferences: service, err := reconciler.generatePatroniLeaderLeaseService(cluster) assert.NilError(t, err) alwaysExpect(t, service) - // Defaults to ClusterIP. assert.Equal(t, service.Spec.Type, corev1.ServiceTypeClusterIP) + assert.Assert(t, cmp.MarshalMatches(service.Spec.Ports, ` +- name: postgres + port: 9876 + protocol: TCP + targetPort: postgres + `)) }) t.Run("AnnotationsLabels", func(t *testing.T) { @@ -118,6 +108,36 @@ ownerReferences: "postgres-operator.crunchydata.com/patroni": "pg2-ha", }) + // Labels not in the selector. + assert.Assert(t, service.Spec.Selector == nil, + "got %v", service.Spec.Selector) + + // Add metadata to individual service + cluster.Spec.Service = &v1beta1.ServiceSpec{ + Metadata: &v1beta1.Metadata{ + Annotations: map[string]string{"c": "v3"}, + Labels: map[string]string{"d": "v4", + "postgres-operator.crunchydata.com/cluster": "wrongName"}, + }, + } + + service, err = reconciler.generatePatroniLeaderLeaseService(cluster) + assert.NilError(t, err) + + // Annotations present in the metadata. + assert.DeepEqual(t, service.ObjectMeta.Annotations, map[string]string{ + "a": "v1", + "c": "v3", + }) + + // Labels present in the metadata. + assert.DeepEqual(t, service.ObjectMeta.Labels, map[string]string{ + "b": "v2", + "d": "v4", + "postgres-operator.crunchydata.com/cluster": "pg2", + "postgres-operator.crunchydata.com/patroni": "pg2-ha", + }) + // Labels not in the selector. assert.Assert(t, service.Spec.Selector == nil, "got %v", service.Spec.Selector) @@ -147,28 +167,75 @@ ownerReferences: assert.NilError(t, err) alwaysExpect(t, service) test.Expect(t, service) + assert.Assert(t, cmp.MarshalMatches(service.Spec.Ports, ` +- name: postgres + port: 9876 + protocol: TCP + targetPort: postgres + `)) + }) + } + + typesAndPort := []struct { + Description string + Type string + NodePort *int32 + Expect func(testing.TB, *corev1.Service, error) + }{ + {Description: "ClusterIP with Port 32000", Type: "ClusterIP", + NodePort: initialize.Int32(32000), Expect: func(t testing.TB, service *corev1.Service, err error) { + assert.ErrorContains(t, err, "NodePort cannot be set with type ClusterIP on Service \"pg2-ha\"") + assert.Assert(t, service == nil) + }}, + {Description: "NodePort with Port 32001", Type: "NodePort", + NodePort: initialize.Int32(32001), Expect: func(t testing.TB, service *corev1.Service, err error) { + assert.NilError(t, err) + alwaysExpect(t, service) + assert.Equal(t, service.Spec.Type, corev1.ServiceTypeNodePort) + assert.Assert(t, cmp.MarshalMatches(service.Spec.Ports, ` +- name: postgres + nodePort: 32001 + port: 9876 + protocol: TCP + targetPort: postgres +`)) + }}, + {Description: "LoadBalancer with Port 32002", Type: "LoadBalancer", + NodePort: initialize.Int32(32002), Expect: func(t testing.TB, service *corev1.Service, err error) { + assert.Equal(t, service.Spec.Type, corev1.ServiceTypeLoadBalancer) + assert.NilError(t, err) + alwaysExpect(t, service) + assert.Assert(t, cmp.MarshalMatches(service.Spec.Ports, ` +- name: postgres + nodePort: 32002 + port: 9876 + protocol: TCP + targetPort: postgres +`)) + }}, + } + + for _, test := range typesAndPort { + t.Run(test.Description, func(t *testing.T) { + cluster := cluster.DeepCopy() + cluster.Spec.Service = &v1beta1.ServiceSpec{Type: test.Type, NodePort: test.NodePort} + + service, err := reconciler.generatePatroniLeaderLeaseService(cluster) + test.Expect(t, service, err) }) } } func TestReconcilePatroniLeaderLease(t *testing.T) { ctx := context.Background() - env, cc, _ := setupTestEnv(t, ControllerName) - t.Cleanup(func() { teardownTestEnv(t, env) }) - - ns := &corev1.Namespace{} - ns.GenerateName = "postgres-operator-test-" - assert.NilError(t, cc.Create(ctx, ns)) - t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, ns)) }) + _, cc := setupKubernetes(t) + require.ParallelCapacity(t, 1) + ns := setupNamespace(t, cc) reconciler := &Reconciler{Client: cc, Owner: client.FieldOwner(t.Name())} - cluster := &v1beta1.PostgresCluster{} + cluster := testCluster() cluster.Namespace = ns.Name - cluster.Name = "pg2" - cluster.Spec.PostgresVersion = 12 - cluster.Spec.InstanceSets = []v1beta1.PostgresInstanceSetSpec{{}} - assert.NilError(t, cc.Create(ctx, cluster)) t.Run("NoServiceSpec", func(t *testing.T) { @@ -202,10 +269,21 @@ func TestReconcilePatroniLeaderLease(t *testing.T) { // CRD validation looks only at the new/incoming value of fields. Confirm // that each ServiceType can change to any other ServiceType. Forbidding // certain transitions requires a validating webhook. + serviceTypeChangeClusterCounter := 0 for _, beforeType := range serviceTypes { for _, changeType := range serviceTypes { t.Run(beforeType+"To"+changeType, func(t *testing.T) { - cluster := cluster.DeepCopy() + // Creating fresh clusters for these tests + cluster := testCluster() + cluster.Namespace = ns.Name + + // Note (dsessler): Adding a number to each cluster name to make cluster/service + // names unique to work around an intermittent race condition where a service + // from a prior test has not been deleted yet when the next test runs, causing + // the test to fail due to non-matching IP addresses. + cluster.Name += "-" + strconv.Itoa(serviceTypeChangeClusterCounter) + assert.NilError(t, cc.Create(ctx, cluster)) + cluster.Spec.Service = &v1beta1.ServiceSpec{Type: beforeType} before, err := reconciler.reconcilePatroniLeaderLease(ctx, cluster) @@ -215,9 +293,20 @@ func TestReconcilePatroniLeaderLease(t *testing.T) { cluster.Spec.Service.Type = changeType after, err := reconciler.reconcilePatroniLeaderLease(ctx, cluster) - assert.NilError(t, err) + + // LoadBalancers are provisioned by a separate controller that + // updates the Service soon after creation. The API may return + // a conflict error when we race to update it, even though we + // don't send a resourceVersion in our payload. Retry. + if apierrors.IsConflict(err) { + t.Log("conflict:", err) + after, err = reconciler.reconcilePatroniLeaderLease(ctx, cluster) + } + + assert.NilError(t, err, "\n%#v", errors.Unwrap(err)) assert.Equal(t, after.Spec.ClusterIP, before.Spec.ClusterIP, "expected to keep the same ClusterIP") + serviceTypeChangeClusterCounter++ }) } } @@ -229,38 +318,23 @@ func TestPatroniReplicationSecret(t *testing.T) { t.Skip("USE_EXISTING_CLUSTER: Test fails due to garbage collection") } - // setup the test environment and ensure a clean teardown - tEnv, tClient, cfg := setupTestEnv(t, ControllerName) - t.Cleanup(func() { teardownTestEnv(t, tEnv) }) + _, tClient := setupKubernetes(t) + require.ParallelCapacity(t, 0) - r := &Reconciler{} - ctx, cancel := setupManager(t, cfg, func(mgr manager.Manager) { - r = &Reconciler{ - Client: tClient, - Recorder: mgr.GetEventRecorderFor(ControllerName), - Tracer: otel.Tracer(ControllerName), - Owner: ControllerName, - } - }) - t.Cleanup(func() { teardownManager(cancel, t) }) + ctx := context.Background() + r := &Reconciler{Client: tClient, Owner: client.FieldOwner(t.Name())} // test postgrescluster values var ( clusterName = "hippocluster" - namespace = "postgres-operator-test-" + rand.String(6) clusterUID = types.UID("hippouid") ) - ns := &corev1.Namespace{} - ns.Name = namespace - assert.NilError(t, tClient.Create(ctx, ns)) - t.Cleanup(func() { assert.Check(t, tClient.Delete(ctx, ns)) }) - // create a PostgresCluster to test with postgresCluster := &v1beta1.PostgresCluster{ ObjectMeta: metav1.ObjectMeta{ Name: clusterName, - Namespace: namespace, + Namespace: setupNamespace(t, tClient).Name, UID: clusterUID, }, } @@ -311,11 +385,11 @@ func TestPatroniReplicationSecret(t *testing.T) { t.Run("check replication certificate secret projection", func(t *testing.T) { // example auto-generated secret projection - testSecretProjection := &v1.SecretProjection{ - LocalObjectReference: v1.LocalObjectReference{ + testSecretProjection := &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{ Name: naming.ReplicationClientCertSecret(postgresCluster).Name, }, - Items: []v1.KeyToPath{ + Items: []corev1.KeyToPath{ { Key: naming.ReplicationCert, Path: naming.ReplicationCertPath, @@ -348,21 +422,12 @@ func TestPatroniReplicationSecret(t *testing.T) { func TestReconcilePatroniStatus(t *testing.T) { ctx := context.Background() + _, tClient := setupKubernetes(t) + require.ParallelCapacity(t, 0) - tEnv, tClient, cfg := setupTestEnv(t, ControllerName) - t.Cleanup(func() { teardownTestEnv(t, tEnv) }) - r := &Reconciler{} - ctx, cancel := setupManager(t, cfg, func(mgr manager.Manager) { - r = &Reconciler{ - Client: mgr.GetClient(), - Recorder: mgr.GetEventRecorderFor(ControllerName), - Tracer: otel.Tracer(ControllerName), - Owner: ControllerName, - } - }) - t.Cleanup(func() { teardownManager(cancel, t) }) + ns := setupNamespace(t, tClient) + r := &Reconciler{Client: tClient, Owner: client.FieldOwner(t.Name())} - namespace := "test-reconcile-patroni-status" systemIdentifier := "6952526174828511264" createResources := func(index, readyReplicas int, writeAnnotation bool) (*v1beta1.PostgresCluster, *observedInstances) { @@ -381,13 +446,13 @@ func TestReconcilePatroniStatus(t *testing.T) { postgresCluster := &v1beta1.PostgresCluster{ ObjectMeta: metav1.ObjectMeta{ Name: clusterName, - Namespace: namespace, + Namespace: ns.Name, }, } runner := &appsv1.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, + Namespace: ns.Name, Name: instanceName, Labels: labels, }, @@ -416,9 +481,9 @@ func TestReconcilePatroniStatus(t *testing.T) { Name: instanceName, Runner: runner, } for i := 0; i < readyReplicas; i++ { - instance.Pods = append(instance.Pods, &v1.Pod{ - Status: v1.PodStatus{ - Conditions: []v1.PodCondition{{ + instance.Pods = append(instance.Pods, &corev1.Pod{ + Status: corev1.PodStatus{ + Conditions: []corev1.PodCondition{{ Type: corev1.PodReady, Status: corev1.ConditionTrue, Reason: "test", @@ -433,10 +498,6 @@ func TestReconcilePatroniStatus(t *testing.T) { return postgresCluster, observedInstances } - ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} - assert.NilError(t, tClient.Create(ctx, ns)) - t.Cleanup(func() { assert.Check(t, tClient.Delete(ctx, ns)) }) - testsCases := []struct { requeueExpected bool readyReplicas int @@ -452,14 +513,493 @@ func TestReconcilePatroniStatus(t *testing.T) { t.Run(fmt.Sprintf("%+v", tc), func(t *testing.T) { postgresCluster, observedInstances := createResources(i, tc.readyReplicas, tc.writeAnnotation) - result, err := r.reconcilePatroniStatus(ctx, postgresCluster, observedInstances) + requeue, err := r.reconcilePatroniStatus(ctx, postgresCluster, observedInstances) if tc.requeueExpected { assert.NilError(t, err) - assert.Assert(t, result.RequeueAfter == 1*time.Second) + assert.Equal(t, requeue, time.Second) } else { assert.NilError(t, err) - assert.DeepEqual(t, result, reconcile.Result{}) + assert.Equal(t, requeue, time.Duration(0)) } }) } } + +func TestReconcilePatroniSwitchover(t *testing.T) { + _, client := setupKubernetes(t) + require.ParallelCapacity(t, 0) + + var called, failover, callError, callFails bool + var timelineCallNoLeader, timelineCall bool + r := Reconciler{ + Client: client, + PodExec: func(ctx context.Context, namespace, pod, container string, + stdin io.Reader, stdout, stderr io.Writer, command ...string) error { + called = true + switch { + case timelineCall: + timelineCall = false + stdout.Write([]byte(`[{"Cluster": "hippo-ha", "Member": "hippo-instance1-67mc-0", "Host": "hippo-instance1-67mc-0.hippo-pods", "Role": "Leader", "State": "running", "TL": 4}, {"Cluster": "hippo-ha", "Member": "hippo-instance1-ltcf-0", "Host": "hippo-instance1-ltcf-0.hippo-pods", "Role": "Replica", "State": "running", "TL": 4, "Lag in MB": 0}]`)) + case timelineCallNoLeader: + stdout.Write([]byte(`[{"Cluster": "hippo-ha", "Member": "hippo-instance1-ltcf-0", "Host": "hippo-instance1-ltcf-0.hippo-pods", "Role": "Replica", "State": "running", "TL": 4, "Lag in MB": 0}]`)) + case callError: + return errors.New("boom") + case callFails: + stdout.Write([]byte("bang")) + case failover: + stdout.Write([]byte("failed over")) + default: + stdout.Write([]byte("switched over")) + } + return nil + }, + } + + ctx := context.Background() + + getObserved := func() *observedInstances { + instances := []*Instance{{ + Name: "target", + Pods: []*corev1.Pod{{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod", + }, + Status: corev1.PodStatus{ + ContainerStatuses: []corev1.ContainerStatus{{ + Name: naming.ContainerDatabase, + State: corev1.ContainerState{ + Running: new(corev1.ContainerStateRunning), + }, + }}, + }, + }}, + Runner: &appsv1.StatefulSet{}, + }, { + Name: "other", + Pods: []*corev1.Pod{{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod", + }, + Status: corev1.PodStatus{ + ContainerStatuses: []corev1.ContainerStatus{{ + Name: naming.ContainerDatabase, + State: corev1.ContainerState{ + Running: new(corev1.ContainerStateRunning), + }, + }}, + }, + }}, + Runner: &appsv1.StatefulSet{}, + }} + return &observedInstances{forCluster: instances} + } + + t.Run("empty", func(t *testing.T) { + cluster := testCluster() + observed := newObservedInstances(cluster, nil, nil) + assert.NilError(t, r.reconcilePatroniSwitchover(ctx, cluster, observed)) + }) + + t.Run("early validation", func(t *testing.T) { + for _, test := range []struct { + desc string + enabled bool + trigger string + status string + soType string + target string + check func(*testing.T, error, *v1beta1.PostgresCluster) + }{ + { + desc: "Switchover not enabled", + enabled: false, + check: func(t *testing.T, err error, cluster *v1beta1.PostgresCluster) { + assert.NilError(t, err) + assert.Assert(t, cluster.Status.Patroni.SwitchoverTimeline == nil) + assert.Assert(t, cluster.Status.Patroni.Switchover == nil) + }, + }, + { + desc: "Switchover trigger annotation not found", + enabled: true, + check: func(t *testing.T, err error, cluster *v1beta1.PostgresCluster) { + assert.NilError(t, err) + assert.Assert(t, cluster.Status.Patroni.SwitchoverTimeline == nil) + assert.Assert(t, cluster.Status.Patroni.Switchover == nil) + }, + }, + { + desc: "Status matches trigger annotation", + enabled: true, trigger: "triggered", status: "triggered", + check: func(t *testing.T, err error, cluster *v1beta1.PostgresCluster) { + assert.NilError(t, err) + assert.Assert(t, cluster.Status.Patroni.SwitchoverTimeline == nil) + assert.Equal(t, *cluster.Status.Patroni.Switchover, "triggered") + }, + }, + { + desc: "failover requested without a target", + enabled: true, trigger: "triggered", soType: "Failover", + check: func(t *testing.T, err error, cluster *v1beta1.PostgresCluster) { + assert.Error(t, err, "TargetInstance required when running failover") + assert.Equal(t, *cluster.Status.Patroni.SwitchoverTimeline, int64(2)) + assert.Assert(t, cluster.Status.Patroni.Switchover == nil) + }, + }, + { + desc: "target instance was specified but not found", + enabled: true, trigger: "triggered", target: "bad-target", + check: func(t *testing.T, err error, cluster *v1beta1.PostgresCluster) { + assert.Error(t, err, "TargetInstance was specified but not found in the cluster") + assert.Equal(t, *cluster.Status.Patroni.SwitchoverTimeline, int64(2)) + assert.Assert(t, cluster.Status.Patroni.Switchover == nil) + }, + }, + } { + t.Run(test.desc, func(t *testing.T) { + cluster := testCluster() + cluster.Spec.InstanceSets = []v1beta1.PostgresInstanceSetSpec{{ + Name: "target", + Replicas: initialize.Int32(2), + DataVolumeClaimSpec: testVolumeClaimSpec(), + }} + if test.enabled { + cluster.Spec.Patroni = &v1beta1.PatroniSpec{ + Switchover: &v1beta1.PatroniSwitchover{ + Enabled: true, + }, + } + } + if test.trigger != "" { + cluster.Annotations = map[string]string{ + naming.PatroniSwitchover: test.trigger, + } + } + if test.status != "" { + cluster.Status = v1beta1.PostgresClusterStatus{ + Patroni: v1beta1.PatroniStatus{ + Switchover: initialize.String(test.status), + }, + } + } + if test.soType != "" { + cluster.Spec.Patroni.Switchover.Type = test.soType + } + if test.target != "" { + cluster.Spec.Patroni.Switchover.TargetInstance = initialize.String(test.target) + } + cluster.Status.Patroni.SwitchoverTimeline = initialize.Int64(2) + test.check(t, r.reconcilePatroniSwitchover(ctx, cluster, getObserved()), cluster) + }) + } + }) + + t.Run("validate target instance", func(t *testing.T) { + cluster := testCluster() + cluster.Annotations = map[string]string{ + naming.PatroniSwitchover: "trigger", + } + cluster.Spec.Patroni = &v1beta1.PatroniSpec{ + Switchover: &v1beta1.PatroniSwitchover{ + Enabled: true, + TargetInstance: initialize.String("target"), + }, + } + + t.Run("has no pods", func(t *testing.T) { + instances := []*Instance{{ + Name: "target", + }, { + Name: "target2", + }} + observed := &observedInstances{forCluster: instances} + + assert.Error(t, r.reconcilePatroniSwitchover(ctx, cluster, observed), + "TargetInstance should have one pod. Pods (0)") + }) + + t.Run("not running", func(t *testing.T) { + instances := []*Instance{{ + Name: "target", + Pods: []*corev1.Pod{{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod", + }, + }}, + Runner: &appsv1.StatefulSet{}, + }, {Name: "other"}} + instances[0].Pods[0].Status = corev1.PodStatus{ + ContainerStatuses: []corev1.ContainerStatus{{ + Name: naming.ContainerDatabase, + State: corev1.ContainerState{ + Terminated: new(corev1.ContainerStateTerminated), + }, + }}, + } + observed := &observedInstances{forCluster: instances} + + assert.Error(t, r.reconcilePatroniSwitchover(ctx, cluster, observed), + "Could not find a running pod when attempting switchover.") + }) + }) + + t.Run("need replica to switch", func(t *testing.T) { + cluster := testCluster() + cluster.Annotations = map[string]string{ + naming.PatroniSwitchover: "trigger", + } + cluster.Spec.Patroni = &v1beta1.PatroniSpec{ + Switchover: &v1beta1.PatroniSwitchover{ + Enabled: true, + TargetInstance: initialize.String("target"), + }, + } + + observed := &observedInstances{forCluster: []*Instance{{ + Name: "target", + }}} + assert.Error(t, r.reconcilePatroniSwitchover(ctx, cluster, observed), + "Need more than one instance to switchover") + }) + + t.Run("timeline getting call errors", func(t *testing.T) { + cluster := testCluster() + cluster.Annotations = map[string]string{ + naming.PatroniSwitchover: "trigger", + } + cluster.Spec.Patroni = &v1beta1.PatroniSpec{ + Switchover: &v1beta1.PatroniSwitchover{ + Enabled: true, + }, + } + cluster.Spec.InstanceSets = []v1beta1.PostgresInstanceSetSpec{{ + Name: "target", + Replicas: initialize.Int32(2), + DataVolumeClaimSpec: testVolumeClaimSpec(), + }} + timelineCall, timelineCallNoLeader = false, false + called, failover, callError, callFails = false, false, true, false + err := r.reconcilePatroniSwitchover(ctx, cluster, getObserved()) + assert.Error(t, err, "boom") + assert.Assert(t, called) + assert.Assert(t, cluster.Status.Patroni.Switchover == nil) + }) + + t.Run("timeline getting call returns no leader", func(t *testing.T) { + cluster := testCluster() + cluster.Annotations = map[string]string{ + naming.PatroniSwitchover: "trigger", + } + cluster.Spec.Patroni = &v1beta1.PatroniSpec{ + Switchover: &v1beta1.PatroniSwitchover{ + Enabled: true, + }, + } + cluster.Spec.InstanceSets = []v1beta1.PostgresInstanceSetSpec{{ + Name: "target", + Replicas: initialize.Int32(2), + DataVolumeClaimSpec: testVolumeClaimSpec(), + }} + timelineCall, timelineCallNoLeader = false, true + called, failover, callError, callFails = false, false, false, false + err := r.reconcilePatroniSwitchover(ctx, cluster, getObserved()) + assert.Error(t, err, "error getting and parsing current timeline") + assert.Assert(t, called) + assert.Assert(t, cluster.Status.Patroni.Switchover == nil) + }) + + t.Run("timeline set", func(t *testing.T) { + cluster := testCluster() + cluster.Annotations = map[string]string{ + naming.PatroniSwitchover: "trigger", + } + cluster.Spec.Patroni = &v1beta1.PatroniSpec{ + Switchover: &v1beta1.PatroniSwitchover{ + Enabled: true, + }, + } + cluster.Spec.InstanceSets = []v1beta1.PostgresInstanceSetSpec{{ + Name: "target", + Replicas: initialize.Int32(2), + DataVolumeClaimSpec: testVolumeClaimSpec(), + }} + timelineCall, timelineCallNoLeader = true, false + called, failover, callError, callFails = false, false, false, false + err := r.reconcilePatroniSwitchover(ctx, cluster, getObserved()) + assert.NilError(t, err) + assert.Assert(t, called) + assert.Equal(t, *cluster.Status.Patroni.SwitchoverTimeline, int64(4)) + }) + + t.Run("timeline mismatch, timeline cleared", func(t *testing.T) { + cluster := testCluster() + cluster.Annotations = map[string]string{ + naming.PatroniSwitchover: "trigger", + } + cluster.Spec.Patroni = &v1beta1.PatroniSpec{ + Switchover: &v1beta1.PatroniSwitchover{ + Enabled: true, + }, + } + cluster.Spec.InstanceSets = []v1beta1.PostgresInstanceSetSpec{{ + Name: "target", + Replicas: initialize.Int32(2), + DataVolumeClaimSpec: testVolumeClaimSpec(), + }} + cluster.Status.Patroni.SwitchoverTimeline = initialize.Int64(11) + timelineCall, timelineCallNoLeader = true, false + called, failover, callError, callFails = false, false, false, false + err := r.reconcilePatroniSwitchover(ctx, cluster, getObserved()) + assert.NilError(t, err) + assert.Assert(t, called) + assert.Assert(t, cluster.Status.Patroni.SwitchoverTimeline == nil) + }) + + t.Run("timeline cleared when status is updated", func(t *testing.T) { + cluster := testCluster() + cluster.Annotations = map[string]string{ + naming.PatroniSwitchover: "trigger", + } + cluster.Spec.Patroni = &v1beta1.PatroniSpec{ + Switchover: &v1beta1.PatroniSwitchover{ + Enabled: true, + }, + } + cluster.Spec.InstanceSets = []v1beta1.PostgresInstanceSetSpec{{ + Name: "target", + Replicas: initialize.Int32(2), + DataVolumeClaimSpec: testVolumeClaimSpec(), + }} + cluster.Status.Patroni.SwitchoverTimeline = initialize.Int64(11) + timelineCall, timelineCallNoLeader = true, false + called, failover, callError, callFails = false, false, false, false + err := r.reconcilePatroniSwitchover(ctx, cluster, getObserved()) + assert.NilError(t, err) + assert.Assert(t, called) + assert.Assert(t, cluster.Status.Patroni.SwitchoverTimeline == nil) + }) + + t.Run("switchover call fails", func(t *testing.T) { + cluster := testCluster() + cluster.Annotations = map[string]string{ + naming.PatroniSwitchover: "trigger", + } + cluster.Spec.Patroni = &v1beta1.PatroniSpec{ + Switchover: &v1beta1.PatroniSwitchover{ + Enabled: true, + }, + } + cluster.Spec.InstanceSets = []v1beta1.PostgresInstanceSetSpec{{ + Name: "target", + Replicas: initialize.Int32(2), + DataVolumeClaimSpec: testVolumeClaimSpec(), + }} + cluster.Status.Patroni.SwitchoverTimeline = initialize.Int64(4) + timelineCall, timelineCallNoLeader = true, false + called, failover, callError, callFails = false, false, false, true + err := r.reconcilePatroniSwitchover(ctx, cluster, getObserved()) + assert.Error(t, err, "unable to switchover") + assert.Assert(t, called) + assert.Assert(t, cluster.Status.Patroni.Switchover == nil) + assert.Equal(t, *cluster.Status.Patroni.SwitchoverTimeline, int64(4)) + }) + + t.Run("switchover call errors", func(t *testing.T) { + cluster := testCluster() + cluster.Annotations = map[string]string{ + naming.PatroniSwitchover: "trigger", + } + cluster.Spec.Patroni = &v1beta1.PatroniSpec{ + Switchover: &v1beta1.PatroniSwitchover{ + Enabled: true, + }, + } + cluster.Spec.InstanceSets = []v1beta1.PostgresInstanceSetSpec{{ + Name: "target", + Replicas: initialize.Int32(2), + DataVolumeClaimSpec: testVolumeClaimSpec(), + }} + cluster.Status.Patroni.SwitchoverTimeline = initialize.Int64(4) + timelineCall, timelineCallNoLeader = true, false + called, failover, callError, callFails = false, false, true, false + err := r.reconcilePatroniSwitchover(ctx, cluster, getObserved()) + assert.Error(t, err, "boom") + assert.Assert(t, called) + assert.Assert(t, cluster.Status.Patroni.Switchover == nil) + }) + + t.Run("switchover called", func(t *testing.T) { + cluster := testCluster() + cluster.Annotations = map[string]string{ + naming.PatroniSwitchover: "trigger", + } + cluster.Spec.Patroni = &v1beta1.PatroniSpec{ + Switchover: &v1beta1.PatroniSwitchover{ + Enabled: true, + }, + } + cluster.Spec.InstanceSets = []v1beta1.PostgresInstanceSetSpec{{ + Name: "target", + Replicas: initialize.Int32(2), + DataVolumeClaimSpec: testVolumeClaimSpec(), + }} + cluster.Status.Patroni.SwitchoverTimeline = initialize.Int64(4) + timelineCall, timelineCallNoLeader = true, false + called, failover, callError, callFails = false, false, false, false + assert.NilError(t, r.reconcilePatroniSwitchover(ctx, cluster, getObserved())) + assert.Assert(t, called) + assert.Equal(t, *cluster.Status.Patroni.Switchover, "trigger") + assert.Assert(t, cluster.Status.Patroni.SwitchoverTimeline == nil) + }) + + t.Run("targeted switchover called", func(t *testing.T) { + cluster := testCluster() + cluster.Annotations = map[string]string{ + naming.PatroniSwitchover: "trigger", + } + cluster.Spec.Patroni = &v1beta1.PatroniSpec{ + Switchover: &v1beta1.PatroniSwitchover{ + Enabled: true, + TargetInstance: initialize.String("target"), + }, + } + cluster.Spec.InstanceSets = []v1beta1.PostgresInstanceSetSpec{{ + Name: "target", + Replicas: initialize.Int32(2), + DataVolumeClaimSpec: testVolumeClaimSpec(), + }} + cluster.Status.Patroni.SwitchoverTimeline = initialize.Int64(4) + timelineCall, timelineCallNoLeader = true, false + called, failover, callError, callFails = false, false, false, false + assert.NilError(t, r.reconcilePatroniSwitchover(ctx, cluster, getObserved())) + assert.Assert(t, called) + assert.Equal(t, *cluster.Status.Patroni.Switchover, "trigger") + assert.Assert(t, cluster.Status.Patroni.SwitchoverTimeline == nil) + }) + + t.Run("targeted failover called", func(t *testing.T) { + cluster := testCluster() + cluster.Annotations = map[string]string{ + naming.PatroniSwitchover: "trigger", + } + cluster.Spec.Patroni = &v1beta1.PatroniSpec{ + Switchover: &v1beta1.PatroniSwitchover{ + Enabled: true, + Type: "Failover", + TargetInstance: initialize.String("target"), + }, + } + cluster.Spec.InstanceSets = []v1beta1.PostgresInstanceSetSpec{{ + Name: "target", + Replicas: initialize.Int32(2), + DataVolumeClaimSpec: testVolumeClaimSpec(), + }} + cluster.Status.Patroni.SwitchoverTimeline = initialize.Int64(4) + timelineCall, timelineCallNoLeader = true, false + called, failover, callError, callFails = false, true, false, false + assert.NilError(t, r.reconcilePatroniSwitchover(ctx, cluster, getObserved())) + assert.Assert(t, called) + assert.Equal(t, *cluster.Status.Patroni.Switchover, "trigger") + assert.Assert(t, cluster.Status.Patroni.SwitchoverTimeline == nil) + }) +} diff --git a/internal/controller/postgrescluster/pgadmin.go b/internal/controller/postgrescluster/pgadmin.go new file mode 100644 index 0000000000..c0a936ba1f --- /dev/null +++ b/internal/controller/postgrescluster/pgadmin.go @@ -0,0 +1,503 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package postgrescluster + +import ( + "context" + "fmt" + "io" + + "github.com/pkg/errors" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/crunchydata/postgres-operator/internal/config" + "github.com/crunchydata/postgres-operator/internal/initialize" + "github.com/crunchydata/postgres-operator/internal/logging" + "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/internal/pgadmin" + "github.com/crunchydata/postgres-operator/internal/postgres" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +// reconcilePGAdmin writes the objects necessary to run a pgAdmin Pod. +func (r *Reconciler) reconcilePGAdmin( + ctx context.Context, cluster *v1beta1.PostgresCluster, +) error { + // NOTE: [Reconciler.reconcilePGAdminUsers] is called in [Reconciler.reconcilePostgresUsers]. + + // TODO(tjmoore4): Currently, the returned service is only used in tests, + // but it may be useful during upcoming feature enhancements. If not, we + // may consider removing the service return altogether and refactoring + // this function to only return errors. + _, err := r.reconcilePGAdminService(ctx, cluster) + + var configmap *corev1.ConfigMap + var dataVolume *corev1.PersistentVolumeClaim + + if err == nil { + configmap, err = r.reconcilePGAdminConfigMap(ctx, cluster) + } + if err == nil { + dataVolume, err = r.reconcilePGAdminDataVolume(ctx, cluster) + } + if err == nil { + err = r.reconcilePGAdminStatefulSet(ctx, cluster, configmap, dataVolume) + } + return err +} + +// generatePGAdminConfigMap returns a v1.ConfigMap for pgAdmin. +func (r *Reconciler) generatePGAdminConfigMap( + cluster *v1beta1.PostgresCluster) (*corev1.ConfigMap, bool, error, +) { + configmap := &corev1.ConfigMap{ObjectMeta: naming.ClusterPGAdmin(cluster)} + configmap.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("ConfigMap")) + + if cluster.Spec.UserInterface == nil || cluster.Spec.UserInterface.PGAdmin == nil { + return configmap, false, nil + } + + configmap.Annotations = naming.Merge( + cluster.Spec.Metadata.GetAnnotationsOrNil(), + cluster.Spec.UserInterface.PGAdmin.Metadata.GetAnnotationsOrNil()) + configmap.Labels = naming.Merge( + cluster.Spec.Metadata.GetLabelsOrNil(), + cluster.Spec.UserInterface.PGAdmin.Metadata.GetLabelsOrNil(), + map[string]string{ + naming.LabelCluster: cluster.Name, + naming.LabelRole: naming.RolePGAdmin, + }) + + err := errors.WithStack(pgadmin.ConfigMap(cluster, configmap)) + if err == nil { + err = errors.WithStack(r.setControllerReference(cluster, configmap)) + } + + return configmap, true, err +} + +// +kubebuilder:rbac:groups="",resources="configmaps",verbs={get} +// +kubebuilder:rbac:groups="",resources="configmaps",verbs={create,delete,patch} + +// reconcilePGAdminConfigMap writes the ConfigMap for pgAdmin. +func (r *Reconciler) reconcilePGAdminConfigMap( + ctx context.Context, cluster *v1beta1.PostgresCluster, +) (*corev1.ConfigMap, error) { + configmap, specified, err := r.generatePGAdminConfigMap(cluster) + + if err == nil && !specified { + // pgAdmin is disabled; delete the ConfigMap if it exists. Check the + // client cache first using Get. + key := client.ObjectKeyFromObject(configmap) + err := errors.WithStack(r.Client.Get(ctx, key, configmap)) + if err == nil { + err = errors.WithStack(r.deleteControlled(ctx, cluster, configmap)) + } + return nil, client.IgnoreNotFound(err) + } + + if err == nil { + err = errors.WithStack(r.apply(ctx, configmap)) + } + return configmap, err +} + +// generatePGAdminService returns a v1.Service that exposes pgAdmin pods. +// The ServiceType comes from the cluster user interface spec. +func (r *Reconciler) generatePGAdminService( + cluster *v1beta1.PostgresCluster) (*corev1.Service, bool, error, +) { + service := &corev1.Service{ObjectMeta: naming.ClusterPGAdmin(cluster)} + service.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("Service")) + + if cluster.Spec.UserInterface == nil || cluster.Spec.UserInterface.PGAdmin == nil { + return service, false, nil + } + + service.Annotations = naming.Merge( + cluster.Spec.Metadata.GetAnnotationsOrNil(), + cluster.Spec.UserInterface.PGAdmin.Metadata.GetAnnotationsOrNil()) + service.Labels = naming.Merge( + cluster.Spec.Metadata.GetLabelsOrNil(), + cluster.Spec.UserInterface.PGAdmin.Metadata.GetLabelsOrNil()) + + if spec := cluster.Spec.UserInterface.PGAdmin.Service; spec != nil { + service.Annotations = naming.Merge(service.Annotations, + spec.Metadata.GetAnnotationsOrNil()) + service.Labels = naming.Merge(service.Labels, + spec.Metadata.GetLabelsOrNil()) + } + + // add our labels last so they aren't overwritten + service.Labels = naming.Merge(service.Labels, + map[string]string{ + naming.LabelCluster: cluster.Name, + naming.LabelRole: naming.RolePGAdmin, + }) + + // Allocate an IP address and/or node port and let Kubernetes manage the + // Endpoints by selecting Pods with the pgAdmin role. + // - https://docs.k8s.io/concepts/services-networking/service/#defining-a-service + service.Spec.Selector = map[string]string{ + naming.LabelCluster: cluster.Name, + naming.LabelRole: naming.RolePGAdmin, + } + + // The TargetPort must be the name (not the number) of the pgAdmin + // ContainerPort. This name allows the port number to differ between Pods, + // which can happen during a rolling update. + // + // TODO(tjmoore4): A custom service port is not currently supported as this + // requires updates to the pgAdmin service configuration. + servicePort := corev1.ServicePort{ + Name: naming.PortPGAdmin, + Port: 5050, + Protocol: corev1.ProtocolTCP, + TargetPort: intstr.FromString(naming.PortPGAdmin), + } + + if spec := cluster.Spec.UserInterface.PGAdmin.Service; spec == nil { + service.Spec.Type = corev1.ServiceTypeClusterIP + } else { + service.Spec.Type = corev1.ServiceType(spec.Type) + if spec.NodePort != nil { + if service.Spec.Type == corev1.ServiceTypeClusterIP { + // The NodePort can only be set when the Service type is NodePort or + // LoadBalancer. However, due to a known issue prior to Kubernetes + // 1.20, we clear these errors during our apply. To preserve the + // appropriate behavior, we log an Event and return an error. + // TODO(tjmoore4): Once Validation Rules are available, this check + // and event could potentially be removed in favor of that validation + r.Recorder.Eventf(cluster, corev1.EventTypeWarning, "MisconfiguredClusterIP", + "NodePort cannot be set with type ClusterIP on Service %q", service.Name) + return nil, true, fmt.Errorf("NodePort cannot be set with type ClusterIP on Service %q", service.Name) + } + servicePort.NodePort = *spec.NodePort + } + service.Spec.ExternalTrafficPolicy = initialize.FromPointer(spec.ExternalTrafficPolicy) + service.Spec.InternalTrafficPolicy = spec.InternalTrafficPolicy + } + service.Spec.Ports = []corev1.ServicePort{servicePort} + + err := errors.WithStack(r.setControllerReference(cluster, service)) + + return service, true, err +} + +// +kubebuilder:rbac:groups="",resources="services",verbs={get} +// +kubebuilder:rbac:groups="",resources="services",verbs={create,delete,patch} + +// reconcilePGAdminService writes the Service that resolves to pgAdmin. +func (r *Reconciler) reconcilePGAdminService( + ctx context.Context, cluster *v1beta1.PostgresCluster, +) (*corev1.Service, error) { + service, specified, err := r.generatePGAdminService(cluster) + + if err == nil && !specified { + // pgAdmin is disabled; delete the Service if it exists. Check the client + // cache first using Get. + key := client.ObjectKeyFromObject(service) + err := errors.WithStack(r.Client.Get(ctx, key, service)) + if err == nil { + err = errors.WithStack(r.deleteControlled(ctx, cluster, service)) + } + return nil, client.IgnoreNotFound(err) + } + + if err == nil { + err = errors.WithStack(r.apply(ctx, service)) + } + return service, err +} + +// +kubebuilder:rbac:groups="apps",resources="statefulsets",verbs={get} +// +kubebuilder:rbac:groups="apps",resources="statefulsets",verbs={create,delete,patch} + +// reconcilePGAdminStatefulSet writes the StatefulSet that runs pgAdmin. +func (r *Reconciler) reconcilePGAdminStatefulSet( + ctx context.Context, cluster *v1beta1.PostgresCluster, + configmap *corev1.ConfigMap, dataVolume *corev1.PersistentVolumeClaim, +) error { + sts := &appsv1.StatefulSet{ObjectMeta: naming.ClusterPGAdmin(cluster)} + sts.SetGroupVersionKind(appsv1.SchemeGroupVersion.WithKind("StatefulSet")) + + if cluster.Spec.UserInterface == nil || cluster.Spec.UserInterface.PGAdmin == nil { + // pgAdmin is disabled; delete the Deployment if it exists. Check the + // client cache first using Get. + key := client.ObjectKeyFromObject(sts) + err := errors.WithStack(r.Client.Get(ctx, key, sts)) + if err == nil { + err = errors.WithStack(r.deleteControlled(ctx, cluster, sts)) + } + return client.IgnoreNotFound(err) + } + + sts.Annotations = naming.Merge( + cluster.Spec.Metadata.GetAnnotationsOrNil(), + cluster.Spec.UserInterface.PGAdmin.Metadata.GetAnnotationsOrNil()) + sts.Labels = naming.Merge( + cluster.Spec.Metadata.GetLabelsOrNil(), + cluster.Spec.UserInterface.PGAdmin.Metadata.GetLabelsOrNil(), + map[string]string{ + naming.LabelCluster: cluster.Name, + naming.LabelRole: naming.RolePGAdmin, + naming.LabelData: naming.DataPGAdmin, + }) + sts.Spec.Selector = &metav1.LabelSelector{ + MatchLabels: map[string]string{ + naming.LabelCluster: cluster.Name, + naming.LabelRole: naming.RolePGAdmin, + }, + } + sts.Spec.Template.Annotations = naming.Merge( + cluster.Spec.Metadata.GetAnnotationsOrNil(), + cluster.Spec.UserInterface.PGAdmin.Metadata.GetAnnotationsOrNil()) + sts.Spec.Template.Labels = naming.Merge( + cluster.Spec.Metadata.GetLabelsOrNil(), + cluster.Spec.UserInterface.PGAdmin.Metadata.GetLabelsOrNil(), + map[string]string{ + naming.LabelCluster: cluster.Name, + naming.LabelRole: naming.RolePGAdmin, + naming.LabelData: naming.DataPGAdmin, + }) + + // if the shutdown flag is set, set pgAdmin replicas to 0 + if cluster.Spec.Shutdown != nil && *cluster.Spec.Shutdown { + sts.Spec.Replicas = initialize.Int32(0) + } else { + sts.Spec.Replicas = cluster.Spec.UserInterface.PGAdmin.Replicas + } + + // Don't clutter the namespace with extra ControllerRevisions. + sts.Spec.RevisionHistoryLimit = initialize.Int32(0) + + // Give the Pod a stable DNS record based on its name. + // - https://docs.k8s.io/concepts/workloads/controllers/statefulset/#stable-network-id + // - https://docs.k8s.io/concepts/services-networking/dns-pod-service/#pods + sts.Spec.ServiceName = naming.ClusterPodService(cluster).Name + + // Use StatefulSet's "RollingUpdate" strategy and "Parallel" policy to roll + // out changes to pods even when not Running or not Ready. + // - https://docs.k8s.io/concepts/workloads/controllers/statefulset/#rolling-updates + // - https://docs.k8s.io/concepts/workloads/controllers/statefulset/#forced-rollback + // - https://kep.k8s.io/3541 + sts.Spec.PodManagementPolicy = appsv1.ParallelPodManagement + sts.Spec.UpdateStrategy.Type = appsv1.RollingUpdateStatefulSetStrategyType + + // Use scheduling constraints from the cluster spec. + sts.Spec.Template.Spec.Affinity = cluster.Spec.UserInterface.PGAdmin.Affinity + sts.Spec.Template.Spec.Tolerations = cluster.Spec.UserInterface.PGAdmin.Tolerations + sts.Spec.Template.Spec.PriorityClassName = + initialize.FromPointer(cluster.Spec.UserInterface.PGAdmin.PriorityClassName) + sts.Spec.Template.Spec.TopologySpreadConstraints = + cluster.Spec.UserInterface.PGAdmin.TopologySpreadConstraints + + // Restart containers any time they stop, die, are killed, etc. + // - https://docs.k8s.io/concepts/workloads/pods/pod-lifecycle/#restart-policy + sts.Spec.Template.Spec.RestartPolicy = corev1.RestartPolicyAlways + + // pgAdmin does not make any Kubernetes API calls. Use the default + // ServiceAccount and do not mount its credentials. + sts.Spec.Template.Spec.AutomountServiceAccountToken = initialize.Bool(false) + + // Do not add environment variables describing services in this namespace. + sts.Spec.Template.Spec.EnableServiceLinks = initialize.Bool(false) + + sts.Spec.Template.Spec.SecurityContext = postgres.PodSecurityContext(cluster) + + // set the image pull secrets, if any exist + sts.Spec.Template.Spec.ImagePullSecrets = cluster.Spec.ImagePullSecrets + + // Previous versions of PGO used a StatefulSet Pod Management Policy that could leave the Pod + // in a failed state. When we see that it has the wrong policy, we will delete the StatefulSet + // and then recreate it with the correct policy, as this is not a property that can be patched. + // When we delete the StatefulSet, we will leave its Pods in place. They will be claimed by + // the StatefulSet that gets created in the next reconcile. + existing := &appsv1.StatefulSet{} + if err := errors.WithStack(r.Client.Get(ctx, client.ObjectKeyFromObject(sts), existing)); err != nil { + if !apierrors.IsNotFound(err) { + return err + } + } else { + if existing.Spec.PodManagementPolicy != sts.Spec.PodManagementPolicy { + // We want to delete the STS without affecting the Pods, so we set the PropagationPolicy to Orphan. + // The orphaned Pods will be claimed by the StatefulSet that will be created in the next reconcile. + uid := existing.GetUID() + version := existing.GetResourceVersion() + exactly := client.Preconditions{UID: &uid, ResourceVersion: &version} + propagate := client.PropagationPolicy(metav1.DeletePropagationOrphan) + + return errors.WithStack(client.IgnoreNotFound(r.Client.Delete(ctx, existing, exactly, propagate))) + } + } + + if err := errors.WithStack(r.setControllerReference(cluster, sts)); err != nil { + return err + } + + pgadmin.Pod(cluster, configmap, &sts.Spec.Template.Spec, dataVolume) + + // add nss_wrapper init container and add nss_wrapper env vars to the pgAdmin + // container + addNSSWrapper( + config.PGAdminContainerImage(cluster), + cluster.Spec.ImagePullPolicy, + &sts.Spec.Template) + + // add an emptyDir volume to the PodTemplateSpec and an associated '/tmp' + // volume mount to all containers included within that spec + addTMPEmptyDir(&sts.Spec.Template) + + return errors.WithStack(r.apply(ctx, sts)) +} + +// +kubebuilder:rbac:groups="",resources="persistentvolumeclaims",verbs={create,patch} + +// reconcilePGAdminDataVolume writes the PersistentVolumeClaim for instance's +// pgAdmin data volume. +func (r *Reconciler) reconcilePGAdminDataVolume( + ctx context.Context, cluster *v1beta1.PostgresCluster, +) (*corev1.PersistentVolumeClaim, error) { + + labelMap := map[string]string{ + naming.LabelCluster: cluster.Name, + naming.LabelRole: naming.RolePGAdmin, + naming.LabelData: naming.DataPGAdmin, + } + + pvc := &corev1.PersistentVolumeClaim{ObjectMeta: naming.ClusterPGAdmin(cluster)} + pvc.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("PersistentVolumeClaim")) + + if cluster.Spec.UserInterface == nil || cluster.Spec.UserInterface.PGAdmin == nil { + // pgAdmin is disabled; delete the PVC if it exists. Check the client + // cache first using Get. + key := client.ObjectKeyFromObject(pvc) + err := errors.WithStack(r.Client.Get(ctx, key, pvc)) + if err == nil { + err = errors.WithStack(r.deleteControlled(ctx, cluster, pvc)) + } + return nil, client.IgnoreNotFound(err) + } + + pvc.Annotations = naming.Merge( + cluster.Spec.Metadata.GetAnnotationsOrNil(), + ) + pvc.Labels = naming.Merge( + cluster.Spec.Metadata.GetLabelsOrNil(), + labelMap, + ) + pvc.Spec = cluster.Spec.UserInterface.PGAdmin.DataVolumeClaimSpec + + err := errors.WithStack(r.setControllerReference(cluster, pvc)) + + if err == nil { + err = r.handlePersistentVolumeClaimError(cluster, + errors.WithStack(r.apply(ctx, pvc))) + } + + return pvc, err +} + +// +kubebuilder:rbac:groups="",resources="pods",verbs={get} + +// reconcilePGAdminUsers creates users inside of pgAdmin. +func (r *Reconciler) reconcilePGAdminUsers( + ctx context.Context, cluster *v1beta1.PostgresCluster, + specUsers []v1beta1.PostgresUserSpec, userSecrets map[string]*corev1.Secret, +) error { + const container = naming.ContainerPGAdmin + var podExecutor pgadmin.Executor + + if cluster.Spec.UserInterface == nil || cluster.Spec.UserInterface.PGAdmin == nil { + // pgAdmin is disabled; clear its status. + // TODO(cbandy): Revisit this approach when there is more than one UI. + cluster.Status.UserInterface = nil + return nil + } + + // Find the running pgAdmin container. When there is none, return early. + + pod := &corev1.Pod{ObjectMeta: naming.ClusterPGAdmin(cluster)} + pod.Name += "-0" + + err := errors.WithStack(r.Client.Get(ctx, client.ObjectKeyFromObject(pod), pod)) + if err != nil { + return client.IgnoreNotFound(err) + } + + var running bool + for _, status := range pod.Status.ContainerStatuses { + if status.Name == container { + running = status.State.Running != nil + } + } + if terminating := pod.DeletionTimestamp != nil; running && !terminating { + ctx = logging.NewContext(ctx, logging.FromContext(ctx).WithValues("pod", pod.Name)) + + podExecutor = func( + ctx context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, + ) error { + return r.PodExec(ctx, pod.Namespace, pod.Name, container, stdin, stdout, stderr, command...) + } + } + if podExecutor == nil { + return nil + } + + // Calculate a hash of the commands that should be executed in pgAdmin. + + passwords := make(map[string]string, len(userSecrets)) + for userName := range userSecrets { + passwords[userName] = string(userSecrets[userName].Data["password"]) + } + + write := func(ctx context.Context, exec pgadmin.Executor) error { + return pgadmin.WriteUsersInPGAdmin(ctx, cluster, exec, specUsers, passwords) + } + + revision, err := safeHash32(func(hasher io.Writer) error { + // Discard log messages about executing. + return write(logging.NewContext(ctx, logging.Discard()), func( + _ context.Context, stdin io.Reader, _, _ io.Writer, command ...string, + ) error { + _, err := fmt.Fprint(hasher, command) + if err == nil && stdin != nil { + _, err = io.Copy(hasher, stdin) + } + return err + }) + }) + + if err == nil && + cluster.Status.UserInterface != nil && + cluster.Status.UserInterface.PGAdmin.UsersRevision == revision { + // The necessary commands have already been run; there's nothing more to do. + + // TODO(cbandy): Give the user a way to trigger execution regardless. + // The value of an annotation could influence the hash, for example. + return nil + } + + // Run the necessary commands and record their hash in cluster.Status. + // Include the hash in any log messages. + + if err == nil { + log := logging.FromContext(ctx).WithValues("revision", revision) + err = errors.WithStack(write(logging.NewContext(ctx, log), podExecutor)) + } + if err == nil { + if cluster.Status.UserInterface == nil { + cluster.Status.UserInterface = new(v1beta1.PostgresUserInterfaceStatus) + } + cluster.Status.UserInterface.PGAdmin.UsersRevision = revision + } + + return err +} diff --git a/internal/controller/postgrescluster/pgadmin_test.go b/internal/controller/postgrescluster/pgadmin_test.go new file mode 100644 index 0000000000..92ec6f42f1 --- /dev/null +++ b/internal/controller/postgrescluster/pgadmin_test.go @@ -0,0 +1,881 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package postgrescluster + +import ( + "context" + "io" + "strconv" + "testing" + + "github.com/pkg/errors" + "gotest.tools/v3/assert" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + "github.com/crunchydata/postgres-operator/internal/initialize" + "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/internal/testing/cmp" + "github.com/crunchydata/postgres-operator/internal/testing/require" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +func TestGeneratePGAdminConfigMap(t *testing.T) { + _, cc := setupKubernetes(t) + require.ParallelCapacity(t, 0) + + reconciler := &Reconciler{Client: cc} + + cluster := &v1beta1.PostgresCluster{} + cluster.Namespace = "some-ns" + cluster.Name = "pg1" + + t.Run("Unspecified", func(t *testing.T) { + for _, spec := range []*v1beta1.UserInterfaceSpec{ + nil, new(v1beta1.UserInterfaceSpec), + } { + cluster := cluster.DeepCopy() + cluster.Spec.UserInterface = spec + + configmap, specified, err := reconciler.generatePGAdminConfigMap(cluster) + assert.NilError(t, err) + assert.Assert(t, !specified) + + assert.Equal(t, configmap.Namespace, cluster.Namespace) + assert.Equal(t, configmap.Name, "pg1-pgadmin") + } + }) + + cluster.Spec.UserInterface = &v1beta1.UserInterfaceSpec{ + PGAdmin: &v1beta1.PGAdminPodSpec{}, + } + + t.Run("Data,ObjectMeta,TypeMeta", func(t *testing.T) { + cluster := cluster.DeepCopy() + + configmap, specified, err := reconciler.generatePGAdminConfigMap(cluster) + assert.NilError(t, err) + assert.Assert(t, specified) + + assert.Assert(t, cmp.MarshalMatches(configmap.TypeMeta, ` +apiVersion: v1 +kind: ConfigMap + `)) + assert.Assert(t, cmp.MarshalMatches(configmap.ObjectMeta, ` +creationTimestamp: null +labels: + postgres-operator.crunchydata.com/cluster: pg1 + postgres-operator.crunchydata.com/role: pgadmin +name: pg1-pgadmin +namespace: some-ns +ownerReferences: +- apiVersion: postgres-operator.crunchydata.com/v1beta1 + blockOwnerDeletion: true + controller: true + kind: PostgresCluster + name: pg1 + uid: "" + `)) + + assert.Assert(t, len(configmap.Data) > 0, "expected some configuration") + }) + + t.Run("Annotations,Labels", func(t *testing.T) { + cluster := cluster.DeepCopy() + cluster.Spec.Metadata = &v1beta1.Metadata{ + Annotations: map[string]string{"a": "v1", "b": "v2"}, + Labels: map[string]string{"c": "v3", "d": "v4"}, + } + cluster.Spec.UserInterface.PGAdmin.Metadata = &v1beta1.Metadata{ + Annotations: map[string]string{"a": "v5", "e": "v6"}, + Labels: map[string]string{"c": "v7", "f": "v8"}, + } + + configmap, specified, err := reconciler.generatePGAdminConfigMap(cluster) + assert.NilError(t, err) + assert.Assert(t, specified) + + // Annotations present in the metadata. + assert.DeepEqual(t, configmap.ObjectMeta.Annotations, map[string]string{ + "a": "v5", "b": "v2", "e": "v6", + }) + + // Labels present in the metadata. + assert.DeepEqual(t, configmap.ObjectMeta.Labels, map[string]string{ + "c": "v7", "d": "v4", "f": "v8", + "postgres-operator.crunchydata.com/cluster": "pg1", + "postgres-operator.crunchydata.com/role": "pgadmin", + }) + }) +} + +func TestGeneratePGAdminService(t *testing.T) { + _, cc := setupKubernetes(t) + require.ParallelCapacity(t, 0) + + reconciler := &Reconciler{ + Client: cc, + Recorder: new(record.FakeRecorder), + } + + cluster := &v1beta1.PostgresCluster{} + cluster.Namespace = "my-ns" + cluster.Name = "my-cluster" + + t.Run("Unspecified", func(t *testing.T) { + for _, spec := range []*v1beta1.UserInterfaceSpec{ + nil, new(v1beta1.UserInterfaceSpec), + } { + cluster := cluster.DeepCopy() + cluster.Spec.UserInterface = spec + + service, specified, err := reconciler.generatePGAdminService(cluster) + assert.NilError(t, err) + assert.Assert(t, !specified) + + assert.Assert(t, cmp.MarshalMatches(service.ObjectMeta, ` +creationTimestamp: null +name: my-cluster-pgadmin +namespace: my-ns + `)) + } + }) + + cluster.Spec.UserInterface = &v1beta1.UserInterfaceSpec{ + PGAdmin: &v1beta1.PGAdminPodSpec{}, + } + + alwaysExpect := func(t testing.TB, service *corev1.Service) { + assert.Assert(t, cmp.MarshalMatches(service.TypeMeta, ` +apiVersion: v1 +kind: Service + `)) + assert.Assert(t, cmp.MarshalMatches(service.ObjectMeta, ` +creationTimestamp: null +labels: + postgres-operator.crunchydata.com/cluster: my-cluster + postgres-operator.crunchydata.com/role: pgadmin +name: my-cluster-pgadmin +namespace: my-ns +ownerReferences: +- apiVersion: postgres-operator.crunchydata.com/v1beta1 + blockOwnerDeletion: true + controller: true + kind: PostgresCluster + name: my-cluster + uid: "" + `)) + + // Always gets a ClusterIP (never None). + assert.Equal(t, service.Spec.ClusterIP, "") + assert.DeepEqual(t, service.Spec.Selector, map[string]string{ + "postgres-operator.crunchydata.com/cluster": "my-cluster", + "postgres-operator.crunchydata.com/role": "pgadmin", + }) + } + + t.Run("AnnotationsLabels", func(t *testing.T) { + cluster := cluster.DeepCopy() + cluster.Spec.Metadata = &v1beta1.Metadata{ + Annotations: map[string]string{"a": "v1"}, + Labels: map[string]string{"b": "v2"}, + } + + service, specified, err := reconciler.generatePGAdminService(cluster) + assert.NilError(t, err) + assert.Assert(t, specified) + + // Annotations present in the metadata. + assert.DeepEqual(t, service.ObjectMeta.Annotations, map[string]string{ + "a": "v1", + }) + + // Labels present in the metadata. + assert.DeepEqual(t, service.ObjectMeta.Labels, map[string]string{ + "b": "v2", + "postgres-operator.crunchydata.com/cluster": "my-cluster", + "postgres-operator.crunchydata.com/role": "pgadmin", + }) + + // Labels not in the selector. + assert.DeepEqual(t, service.Spec.Selector, map[string]string{ + "postgres-operator.crunchydata.com/cluster": "my-cluster", + "postgres-operator.crunchydata.com/role": "pgadmin", + }) + + // Add metadata to individual service + cluster.Spec.UserInterface.PGAdmin.Service = &v1beta1.ServiceSpec{ + Metadata: &v1beta1.Metadata{ + Annotations: map[string]string{"c": "v3"}, + Labels: map[string]string{"d": "v4", + "postgres-operator.crunchydata.com/cluster": "wrongName"}, + }, + } + + service, specified, err = reconciler.generatePGAdminService(cluster) + assert.NilError(t, err) + assert.Assert(t, specified) + + // Annotations present in the metadata. + assert.DeepEqual(t, service.ObjectMeta.Annotations, map[string]string{ + "a": "v1", + "c": "v3", + }) + + // Labels present in the metadata. + assert.DeepEqual(t, service.ObjectMeta.Labels, map[string]string{ + "b": "v2", + "d": "v4", + "postgres-operator.crunchydata.com/cluster": "my-cluster", + "postgres-operator.crunchydata.com/role": "pgadmin", + }) + + // Labels not in the selector. + assert.DeepEqual(t, service.Spec.Selector, map[string]string{ + "postgres-operator.crunchydata.com/cluster": "my-cluster", + "postgres-operator.crunchydata.com/role": "pgadmin", + }) + }) + + t.Run("NoServiceSpec", func(t *testing.T) { + service, specified, err := reconciler.generatePGAdminService(cluster) + assert.NilError(t, err) + assert.Assert(t, specified) + alwaysExpect(t, service) + // Defaults to ClusterIP. + assert.Equal(t, service.Spec.Type, corev1.ServiceTypeClusterIP) + assert.Assert(t, cmp.MarshalMatches(service.Spec.Ports, ` +- name: pgadmin + port: 5050 + protocol: TCP + targetPort: pgadmin +`)) + }) + + types := []struct { + Type string + Expect func(testing.TB, *corev1.Service) + }{ + {Type: "ClusterIP", Expect: func(t testing.TB, service *corev1.Service) { + assert.Equal(t, service.Spec.Type, corev1.ServiceTypeClusterIP) + }}, + {Type: "NodePort", Expect: func(t testing.TB, service *corev1.Service) { + assert.Equal(t, service.Spec.Type, corev1.ServiceTypeNodePort) + }}, + {Type: "LoadBalancer", Expect: func(t testing.TB, service *corev1.Service) { + assert.Equal(t, service.Spec.Type, corev1.ServiceTypeLoadBalancer) + }}, + } + + for _, test := range types { + t.Run(test.Type, func(t *testing.T) { + cluster := cluster.DeepCopy() + cluster.Spec.UserInterface.PGAdmin.Service = &v1beta1.ServiceSpec{Type: test.Type} + + service, specified, err := reconciler.generatePGAdminService(cluster) + assert.NilError(t, err) + assert.Assert(t, specified) + alwaysExpect(t, service) + test.Expect(t, service) + assert.Assert(t, cmp.MarshalMatches(service.Spec.Ports, ` +- name: pgadmin + port: 5050 + protocol: TCP + targetPort: pgadmin +`)) + }) + } + + typesAndPort := []struct { + Description string + Type string + NodePort *int32 + Expect func(testing.TB, *corev1.Service, error) + }{ + {Description: "ClusterIP with Port 32000", Type: "ClusterIP", + NodePort: initialize.Int32(32000), Expect: func(t testing.TB, service *corev1.Service, err error) { + assert.ErrorContains(t, err, "NodePort cannot be set with type ClusterIP on Service \"my-cluster-pgadmin\"") + assert.Assert(t, service == nil) + }}, + {Description: "NodePort with Port 32001", Type: "NodePort", + NodePort: initialize.Int32(32001), Expect: func(t testing.TB, service *corev1.Service, err error) { + assert.NilError(t, err) + assert.Equal(t, service.Spec.Type, corev1.ServiceTypeNodePort) + alwaysExpect(t, service) + assert.Assert(t, cmp.MarshalMatches(service.Spec.Ports, ` +- name: pgadmin + nodePort: 32001 + port: 5050 + protocol: TCP + targetPort: pgadmin +`)) + }}, + {Description: "LoadBalancer with Port 32002", Type: "LoadBalancer", + NodePort: initialize.Int32(32002), Expect: func(t testing.TB, service *corev1.Service, err error) { + assert.NilError(t, err) + assert.Equal(t, service.Spec.Type, corev1.ServiceTypeLoadBalancer) + alwaysExpect(t, service) + assert.Assert(t, cmp.MarshalMatches(service.Spec.Ports, ` +- name: pgadmin + nodePort: 32002 + port: 5050 + protocol: TCP + targetPort: pgadmin +`)) + }}, + } + + for _, test := range typesAndPort { + t.Run(test.Description, func(t *testing.T) { + cluster := cluster.DeepCopy() + cluster.Spec.UserInterface.PGAdmin.Service = + &v1beta1.ServiceSpec{Type: test.Type, NodePort: test.NodePort} + + service, specified, err := reconciler.generatePGAdminService(cluster) + test.Expect(t, service, err) + // whether or not an error is encountered, 'specified' is true because + // the service *should* exist + assert.Assert(t, specified) + + }) + } +} + +func TestReconcilePGAdminService(t *testing.T) { + ctx := context.Background() + _, cc := setupKubernetes(t) + require.ParallelCapacity(t, 1) + + reconciler := &Reconciler{Client: cc, Owner: client.FieldOwner(t.Name())} + + cluster := testCluster() + cluster.Namespace = setupNamespace(t, cc).Name + assert.NilError(t, cc.Create(ctx, cluster)) + + t.Run("Unspecified", func(t *testing.T) { + cluster := cluster.DeepCopy() + cluster.Spec.UserInterface = nil + + service, err := reconciler.reconcilePGAdminService(ctx, cluster) + assert.NilError(t, err) + assert.Assert(t, service == nil) + }) + + cluster.Spec.UserInterface = &v1beta1.UserInterfaceSpec{ + PGAdmin: &v1beta1.PGAdminPodSpec{}, + } + + t.Run("NoServiceSpec", func(t *testing.T) { + service, err := reconciler.reconcilePGAdminService(ctx, cluster) + assert.NilError(t, err) + assert.Assert(t, service != nil) + t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, service)) }) + + assert.Assert(t, service.Spec.ClusterIP != "", + "expected to be assigned a ClusterIP") + }) + + serviceTypes := []string{"ClusterIP", "NodePort", "LoadBalancer"} + + // Confirm that each ServiceType can be reconciled. + for _, serviceType := range serviceTypes { + t.Run(serviceType, func(t *testing.T) { + cluster := cluster.DeepCopy() + cluster.Spec.UserInterface.PGAdmin.Service = &v1beta1.ServiceSpec{Type: serviceType} + + service, err := reconciler.reconcilePGAdminService(ctx, cluster) + assert.NilError(t, err) + assert.Assert(t, service != nil) + t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, service)) }) + + assert.Assert(t, service.Spec.ClusterIP != "", + "expected to be assigned a ClusterIP") + }) + } + + // CRD validation looks only at the new/incoming value of fields. Confirm + // that each ServiceType can change to any other ServiceType. Forbidding + // certain transitions requires a validating webhook. + serviceTypeChangeClusterCounter := 0 + for _, beforeType := range serviceTypes { + for _, changeType := range serviceTypes { + t.Run(beforeType+"To"+changeType, func(t *testing.T) { + // Creating fresh clusters for these tests + clusterNamespace := cluster.Namespace + cluster := testCluster() + cluster.Namespace = clusterNamespace + + // Note (dsessler): Adding a number to each cluster name to make cluster/service + // names unique to work around an intermittent race condition where a service + // from a prior test has not been deleted yet when the next test runs, causing + // the test to fail due to non-matching IP addresses. + cluster.Name += "-" + strconv.Itoa(serviceTypeChangeClusterCounter) + assert.NilError(t, cc.Create(ctx, cluster)) + + cluster.Spec.UserInterface = &v1beta1.UserInterfaceSpec{ + PGAdmin: &v1beta1.PGAdminPodSpec{}, + } + cluster.Spec.UserInterface.PGAdmin.Service = &v1beta1.ServiceSpec{Type: beforeType} + + before, err := reconciler.reconcilePGAdminService(ctx, cluster) + assert.NilError(t, err) + t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, before)) }) + + cluster.Spec.UserInterface.PGAdmin.Service.Type = changeType + + after, err := reconciler.reconcilePGAdminService(ctx, cluster) + + // LoadBalancers are provisioned by a separate controller that + // updates the Service soon after creation. The API may return + // a conflict error when we race to update it, even though we + // don't send a resourceVersion in our payload. Retry. + if apierrors.IsConflict(err) { + t.Log("conflict:", err) + after, err = reconciler.reconcilePGAdminService(ctx, cluster) + } + + assert.NilError(t, err, "\n%#v", errors.Unwrap(err)) + assert.Equal(t, after.Spec.ClusterIP, before.Spec.ClusterIP, + "expected to keep the same ClusterIP") + serviceTypeChangeClusterCounter++ + }) + } + } +} + +func TestReconcilePGAdminStatefulSet(t *testing.T) { + ctx := context.Background() + _, cc := setupKubernetes(t) + require.ParallelCapacity(t, 1) + + reconciler := &Reconciler{Client: cc, Owner: client.FieldOwner(t.Name())} + + ns := setupNamespace(t, cc) + cluster := pgAdminTestCluster(*ns) + + assert.NilError(t, cc.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, cluster)) }) + + configmap := &corev1.ConfigMap{} + configmap.Name = "test-cm" + + pvc := &corev1.PersistentVolumeClaim{} + pvc.Name = "test-pvc" + + t.Run("verify StatefulSet", func(t *testing.T) { + err := reconciler.reconcilePGAdminStatefulSet(ctx, cluster, configmap, pvc) + assert.NilError(t, err) + + selector, err := naming.AsSelector(metav1.LabelSelector{ + MatchLabels: map[string]string{ + naming.LabelCluster: cluster.Name, + }, + }) + assert.NilError(t, err) + + list := appsv1.StatefulSetList{} + assert.NilError(t, cc.List(ctx, &list, client.InNamespace(cluster.Namespace), + client.MatchingLabelsSelector{Selector: selector})) + assert.Equal(t, len(list.Items), 1) + assert.Equal(t, list.Items[0].Spec.ServiceName, "test-cluster-pods") + + template := list.Items[0].Spec.Template.DeepCopy() + + // Containers and Volumes should be populated. + assert.Assert(t, len(template.Spec.Containers) != 0) + assert.Assert(t, len(template.Spec.InitContainers) != 0) + assert.Assert(t, len(template.Spec.Volumes) != 0) + + // Ignore Containers and Volumes in the comparison below. + template.Spec.Containers = nil + template.Spec.InitContainers = nil + template.Spec.Volumes = nil + + assert.Assert(t, cmp.MarshalMatches(template.ObjectMeta, ` +creationTimestamp: null +labels: + postgres-operator.crunchydata.com/cluster: test-cluster + postgres-operator.crunchydata.com/data: pgadmin + postgres-operator.crunchydata.com/role: pgadmin + `)) + + compare := ` +automountServiceAccountToken: false +containers: null +dnsPolicy: ClusterFirst +enableServiceLinks: false +restartPolicy: Always +schedulerName: default-scheduler +securityContext: + fsGroup: 26 + fsGroupChangePolicy: OnRootMismatch +terminationGracePeriodSeconds: 30 + ` + + assert.Assert(t, cmp.MarshalMatches(template.Spec, compare)) + }) + + t.Run("verify customized deployment", func(t *testing.T) { + + customcluster := pgAdminTestCluster(*ns) + + // add pod level customizations + customcluster.Name = "custom-cluster" + + // annotation and label + customcluster.Spec.UserInterface.PGAdmin.Metadata = &v1beta1.Metadata{ + Annotations: map[string]string{ + "annotation1": "annotationvalue", + }, + Labels: map[string]string{ + "label1": "labelvalue", + }, + } + + // scheduling constraints + customcluster.Spec.UserInterface.PGAdmin.Affinity = &corev1.Affinity{ + NodeAffinity: &corev1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ + NodeSelectorTerms: []corev1.NodeSelectorTerm{{ + MatchExpressions: []corev1.NodeSelectorRequirement{{ + Key: "key", + Operator: "Exists", + }}, + }}, + }, + }, + } + customcluster.Spec.UserInterface.PGAdmin.Tolerations = []corev1.Toleration{ + {Key: "sometoleration"}, + } + + if cluster.Spec.UserInterface.PGAdmin.PriorityClassName != nil { + customcluster.Spec.UserInterface.PGAdmin.PriorityClassName = initialize.String("testpriorityclass") + } + + customcluster.Spec.UserInterface.PGAdmin.TopologySpreadConstraints = []corev1.TopologySpreadConstraint{ + { + MaxSkew: int32(1), + TopologyKey: "fakekey", + WhenUnsatisfiable: corev1.ScheduleAnyway, + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + {Key: naming.LabelCluster, Operator: "In", Values: []string{"somename"}}, + {Key: naming.LabelData, Operator: "Exists"}, + }, + }, + }, + } + + // set an image pull secret + customcluster.Spec.ImagePullSecrets = []corev1.LocalObjectReference{{ + Name: "myImagePullSecret"}} + + assert.NilError(t, cc.Create(ctx, customcluster)) + t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, customcluster)) }) + + err := reconciler.reconcilePGAdminStatefulSet(ctx, customcluster, configmap, pvc) + assert.NilError(t, err) + + selector, err := naming.AsSelector(metav1.LabelSelector{ + MatchLabels: map[string]string{ + naming.LabelCluster: customcluster.Name, + }, + }) + assert.NilError(t, err) + + list := appsv1.StatefulSetList{} + assert.NilError(t, cc.List(ctx, &list, client.InNamespace(customcluster.Namespace), + client.MatchingLabelsSelector{Selector: selector})) + assert.Equal(t, len(list.Items), 1) + assert.Equal(t, list.Items[0].Spec.ServiceName, "custom-cluster-pods") + + template := list.Items[0].Spec.Template.DeepCopy() + + // Containers and Volumes should be populated. + assert.Assert(t, len(template.Spec.Containers) != 0) + assert.Assert(t, len(template.Spec.InitContainers) != 0) + assert.Assert(t, len(template.Spec.Volumes) != 0) + + // Ignore Containers and Volumes in the comparison below. + template.Spec.Containers = nil + template.Spec.InitContainers = nil + template.Spec.Volumes = nil + + assert.Assert(t, cmp.MarshalMatches(template.ObjectMeta, ` +annotations: + annotation1: annotationvalue +creationTimestamp: null +labels: + label1: labelvalue + postgres-operator.crunchydata.com/cluster: custom-cluster + postgres-operator.crunchydata.com/data: pgadmin + postgres-operator.crunchydata.com/role: pgadmin + `)) + + compare := ` +affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: key + operator: Exists +automountServiceAccountToken: false +containers: null +dnsPolicy: ClusterFirst +enableServiceLinks: false +imagePullSecrets: +- name: myImagePullSecret +restartPolicy: Always +schedulerName: default-scheduler +securityContext: + fsGroup: 26 + fsGroupChangePolicy: OnRootMismatch +terminationGracePeriodSeconds: 30 +tolerations: +- key: sometoleration +topologySpreadConstraints: +- labelSelector: + matchExpressions: + - key: postgres-operator.crunchydata.com/cluster + operator: In + values: + - somename + - key: postgres-operator.crunchydata.com/data + operator: Exists + maxSkew: 1 + topologyKey: fakekey + whenUnsatisfiable: ScheduleAnyway +` + + assert.Assert(t, cmp.MarshalMatches(template.Spec, compare)) + }) +} + +func TestReconcilePGAdminDataVolume(t *testing.T) { + ctx := context.Background() + _, tClient := setupKubernetes(t) + require.ParallelCapacity(t, 1) + + reconciler := &Reconciler{ + Client: tClient, + Owner: client.FieldOwner(t.Name()), + } + + ns := setupNamespace(t, tClient) + cluster := pgAdminTestCluster(*ns) + + assert.NilError(t, tClient.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, tClient.Delete(ctx, cluster)) }) + + t.Run("DataVolume", func(t *testing.T) { + pvc, err := reconciler.reconcilePGAdminDataVolume(ctx, cluster) + assert.NilError(t, err) + + assert.Assert(t, metav1.IsControlledBy(pvc, cluster)) + + assert.Equal(t, pvc.Labels[naming.LabelCluster], cluster.Name) + assert.Equal(t, pvc.Labels[naming.LabelRole], naming.RolePGAdmin) + assert.Equal(t, pvc.Labels[naming.LabelData], naming.DataPGAdmin) + + assert.Assert(t, cmp.MarshalMatches(pvc.Spec, ` +accessModes: +- ReadWriteOnce +resources: + requests: + storage: 1Gi +storageClassName: storage-class-for-data +volumeMode: Filesystem + `)) + }) +} + +func TestReconcilePGAdminUsers(t *testing.T) { + ctx := context.Background() + + t.Run("Disabled", func(t *testing.T) { + r := new(Reconciler) + cluster := new(v1beta1.PostgresCluster) + assert.NilError(t, r.reconcilePGAdminUsers(ctx, cluster, nil, nil)) + }) + + // pgAdmin enabled + cluster := &v1beta1.PostgresCluster{} + cluster.Namespace = "ns1" + cluster.Name = "pgc1" + cluster.Spec.Port = initialize.Int32(5432) + cluster.Spec.UserInterface = + &v1beta1.UserInterfaceSpec{PGAdmin: &v1beta1.PGAdminPodSpec{}} + + t.Run("NoPods", func(t *testing.T) { + r := new(Reconciler) + r.Client = fake.NewClientBuilder().Build() + assert.NilError(t, r.reconcilePGAdminUsers(ctx, cluster, nil, nil)) + }) + + // Pod in the namespace + pod := corev1.Pod{} + pod.Namespace = cluster.Namespace + pod.Name = cluster.Name + "-pgadmin-0" + + t.Run("ContainerNotRunning", func(t *testing.T) { + pod := pod.DeepCopy() + + pod.DeletionTimestamp = nil + pod.Status.ContainerStatuses = nil + + r := new(Reconciler) + r.Client = fake.NewClientBuilder().WithObjects(pod).Build() + + assert.NilError(t, r.reconcilePGAdminUsers(ctx, cluster, nil, nil)) + }) + + t.Run("PodTerminating", func(t *testing.T) { + pod := pod.DeepCopy() + + // Must add finalizer when adding deletion timestamp otherwise fake client will panic: + // https://github.com/kubernetes-sigs/controller-runtime/pull/2316 + pod.Finalizers = append(pod.Finalizers, "some-finalizer") + + pod.DeletionTimestamp = new(metav1.Time) + *pod.DeletionTimestamp = metav1.Now() + pod.Status.ContainerStatuses = + []corev1.ContainerStatus{{Name: naming.ContainerPGAdmin}} + pod.Status.ContainerStatuses[0].State.Running = + new(corev1.ContainerStateRunning) + + r := new(Reconciler) + r.Client = fake.NewClientBuilder().WithObjects(pod).Build() + + assert.NilError(t, r.reconcilePGAdminUsers(ctx, cluster, nil, nil)) + }) + + t.Run("PodHealthy", func(t *testing.T) { + cluster := cluster.DeepCopy() + pod := pod.DeepCopy() + + pod.DeletionTimestamp = nil + pod.Status.ContainerStatuses = + []corev1.ContainerStatus{{Name: naming.ContainerPGAdmin}} + pod.Status.ContainerStatuses[0].State.Running = + new(corev1.ContainerStateRunning) + + r := new(Reconciler) + r.Client = fake.NewClientBuilder().WithObjects(pod).Build() + + calls := 0 + r.PodExec = func( + ctx context.Context, namespace, pod, container string, + stdin io.Reader, stdout, stderr io.Writer, command ...string, + ) error { + calls++ + + assert.Equal(t, pod, "pgc1-pgadmin-0") + assert.Equal(t, namespace, cluster.Namespace) + assert.Equal(t, container, naming.ContainerPGAdmin) + + return nil + } + + assert.NilError(t, r.reconcilePGAdminUsers(ctx, cluster, nil, nil)) + assert.Equal(t, calls, 1, "PodExec should be called once") + + assert.NilError(t, r.reconcilePGAdminUsers(ctx, cluster, nil, nil)) + assert.Equal(t, calls, 1, "PodExec should not be called again") + + // Do the thing when users change. + users := []v1beta1.PostgresUserSpec{{Name: "u1"}} + + assert.NilError(t, r.reconcilePGAdminUsers(ctx, cluster, users, nil)) + assert.Equal(t, calls, 2, "PodExec should be called once") + + assert.NilError(t, r.reconcilePGAdminUsers(ctx, cluster, users, nil)) + assert.Equal(t, calls, 2, "PodExec should not be called again") + + // Do the thing when passwords change. + passwords := map[string]*corev1.Secret{ + "u1": {Data: map[string][]byte{"password": []byte(`something`)}}, + } + + assert.NilError(t, r.reconcilePGAdminUsers(ctx, cluster, users, passwords)) + assert.Equal(t, calls, 3, "PodExec should be called once") + + assert.NilError(t, r.reconcilePGAdminUsers(ctx, cluster, users, passwords)) + assert.Equal(t, calls, 3, "PodExec should not be called again") + + passwords["u1"].Data["password"] = []byte(`rotated`) + + assert.NilError(t, r.reconcilePGAdminUsers(ctx, cluster, users, passwords)) + assert.Equal(t, calls, 4, "PodExec should be called once") + + assert.NilError(t, r.reconcilePGAdminUsers(ctx, cluster, users, passwords)) + assert.Equal(t, calls, 4, "PodExec should not be called again") + + t.Run("ThenDisabled", func(t *testing.T) { + // TODO(cbandy): Revisit this when there is more than one UI. + cluster := cluster.DeepCopy() + cluster.Spec.UserInterface = nil + + assert.Assert(t, cluster.Status.UserInterface != nil, "expected some status") + + r := new(Reconciler) + assert.NilError(t, r.reconcilePGAdminUsers(ctx, cluster, users, passwords)) + assert.Assert(t, cluster.Status.UserInterface == nil, "expected no status") + }) + }) +} + +func pgAdminTestCluster(ns corev1.Namespace) *v1beta1.PostgresCluster { + return &v1beta1.PostgresCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Namespace: ns.Name, + }, + Spec: v1beta1.PostgresClusterSpec{ + PostgresVersion: 13, + InstanceSets: []v1beta1.PostgresInstanceSetSpec{{ + DataVolumeClaimSpec: testVolumeClaimSpec(), + }}, + Backups: v1beta1.Backups{ + PGBackRest: v1beta1.PGBackRestArchive{ + Repos: []v1beta1.PGBackRestRepo{{ + Name: "repo1", + Volume: &v1beta1.RepoPVC{ + VolumeClaimSpec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1Gi"), + }, + }, + }, + }, + }}, + }, + }, + UserInterface: &v1beta1.UserInterfaceSpec{ + PGAdmin: &v1beta1.PGAdminPodSpec{ + Image: "test-image", + DataVolumeClaimSpec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1Gi"), + }, + }, + StorageClassName: initialize.String("storage-class-for-data"), + }, + }, + }, + }, + } +} diff --git a/internal/controller/postgrescluster/pgbackrest.go b/internal/controller/postgrescluster/pgbackrest.go index 71f571b6aa..836df047fc 100644 --- a/internal/controller/postgrescluster/pgbackrest.go +++ b/internal/controller/postgrescluster/pgbackrest.go @@ -1,24 +1,14 @@ -package postgrescluster - -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +package postgrescluster import ( "context" "fmt" "io" + "reflect" "regexp" "sort" "strings" @@ -27,28 +17,28 @@ import ( "github.com/pkg/errors" appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" - batchv1beta1 "k8s.io/api/batch/v1beta1" corev1 "k8s.io/api/core/v1" - v1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" utilerrors "k8s.io/apimachinery/pkg/util/errors" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/crunchydata/postgres-operator/internal/config" + "github.com/crunchydata/postgres-operator/internal/feature" "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/logging" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/internal/patroni" "github.com/crunchydata/postgres-operator/internal/pgbackrest" + "github.com/crunchydata/postgres-operator/internal/pki" "github.com/crunchydata/postgres-operator/internal/postgres" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -117,13 +107,14 @@ var regexRepoIndex = regexp.MustCompile(`\d+`) // RepoResources is used to store various resources for pgBackRest repositories and // repository hosts type RepoResources struct { - cronjobs []*batchv1beta1.CronJob + hosts []*appsv1.StatefulSet + cronjobs []*batchv1.CronJob manualBackupJobs []*batchv1.Job replicaCreateBackupJobs []*batchv1.Job - hosts []*appsv1.StatefulSet - pvcs []*v1.PersistentVolumeClaim - sshConfig *v1.ConfigMap - sshSecret *v1.Secret + pvcs []*corev1.PersistentVolumeClaim + sas []*corev1.ServiceAccount + roles []*rbacv1.Role + rolebindings []*rbacv1.RoleBinding } // applyRepoHostIntent ensures the pgBackRest repository host StatefulSet is synchronized with the @@ -133,13 +124,37 @@ type RepoResources struct { // rollout of the pgBackRest repository host StatefulSet in accordance with its configured // strategy. func (r *Reconciler) applyRepoHostIntent(ctx context.Context, postgresCluster *v1beta1.PostgresCluster, - repoHostName string, repoResources *RepoResources) (*appsv1.StatefulSet, error) { + repoHostName string, repoResources *RepoResources, + observedInstances *observedInstances) (*appsv1.StatefulSet, error) { - repo, err := r.generateRepoHostIntent(postgresCluster, repoHostName, repoResources) + repo, err := r.generateRepoHostIntent(ctx, postgresCluster, repoHostName, repoResources, observedInstances) if err != nil { return nil, err } + // Previous versions of PGO used a StatefulSet Pod Management Policy that could leave the Pod + // in a failed state. When we see that it has the wrong policy, we will delete the StatefulSet + // and then recreate it with the correct policy, as this is not a property that can be patched. + // When we delete the StatefulSet, we will leave its Pods in place. They will be claimed by + // the StatefulSet that gets created in the next reconcile. + existing := &appsv1.StatefulSet{} + if err := errors.WithStack(r.Client.Get(ctx, client.ObjectKeyFromObject(repo), existing)); err != nil { + if !apierrors.IsNotFound(err) { + return nil, err + } + } else { + if existing.Spec.PodManagementPolicy != repo.Spec.PodManagementPolicy { + // We want to delete the STS without affecting the Pods, so we set the PropagationPolicy to Orphan. + // The orphaned Pods will be claimed by the new StatefulSet that gets created in the next reconcile. + uid := existing.GetUID() + version := existing.GetResourceVersion() + exactly := client.Preconditions{UID: &uid, ResourceVersion: &version} + propagate := client.PropagationPolicy(metav1.DeletePropagationOrphan) + + return repo, errors.WithStack(r.Client.Delete(ctx, existing, exactly, propagate)) + } + } + if err := r.apply(ctx, repo); err != nil { return nil, err } @@ -147,15 +162,15 @@ func (r *Reconciler) applyRepoHostIntent(ctx context.Context, postgresCluster *v return repo, nil } -// +kubebuilder:rbac:groups="",resources=persistentvolumeclaims,verbs=create;patch +// +kubebuilder:rbac:groups="",resources="persistentvolumeclaims",verbs={create,patch} // applyRepoVolumeIntent ensures the pgBackRest repository host deployment is synchronized with the // proper configuration according to the provided PostgresCluster custom resource. This is done by // applying the PostgresCluster controller's fully specified intent for the PersistentVolumeClaim // representing a repository. func (r *Reconciler) applyRepoVolumeIntent(ctx context.Context, - postgresCluster *v1beta1.PostgresCluster, spec *v1.PersistentVolumeClaimSpec, - repoName string, repoResources *RepoResources) (*v1.PersistentVolumeClaim, error) { + postgresCluster *v1beta1.PostgresCluster, spec corev1.PersistentVolumeClaimSpec, + repoName string, repoResources *RepoResources) (*corev1.PersistentVolumeClaim, error) { repo, err := r.generateRepoVolumeIntent(postgresCluster, spec, repoName, repoResources) if err != nil { @@ -170,47 +185,71 @@ func (r *Reconciler) applyRepoVolumeIntent(ctx context.Context, return repo, nil } +// +kubebuilder:rbac:groups="apps",resources="statefulsets",verbs={list} +// +kubebuilder:rbac:groups="batch",resources="cronjobs",verbs={list} +// +kubebuilder:rbac:groups="batch",resources="jobs",verbs={list} +// +kubebuilder:rbac:groups="",resources="configmaps",verbs={list} +// +kubebuilder:rbac:groups="",resources="persistentvolumeclaims",verbs={list} +// +kubebuilder:rbac:groups="",resources="secrets",verbs={list} +// +kubebuilder:rbac:groups="",resources="serviceaccounts",verbs={list} +// +kubebuilder:rbac:groups="rbac.authorization.k8s.io",resources="roles",verbs={list} +// +kubebuilder:rbac:groups="rbac.authorization.k8s.io",resources="rolebindings",verbs={list} + // getPGBackRestResources returns the existing pgBackRest resources that should utilized by the // PostgresCluster controller during reconciliation. Any items returned are verified to be owned // by the PostgresCluster controller and still applicable per the current PostgresCluster spec. -// Additionally, and resources identified that no longer correspond to any current configuration +// Additionally, any resources identified that no longer correspond to any current configuration // are deleted. func (r *Reconciler) getPGBackRestResources(ctx context.Context, - postgresCluster *v1beta1.PostgresCluster) (*RepoResources, error) { + postgresCluster *v1beta1.PostgresCluster, + backupsSpecFound bool, +) (*RepoResources, error) { repoResources := &RepoResources{} gvks := []schema.GroupVersionKind{{ - Group: v1.SchemeGroupVersion.Group, - Version: v1.SchemeGroupVersion.Version, - Kind: "ConfigMapList", + Group: appsv1.SchemeGroupVersion.Group, + Version: appsv1.SchemeGroupVersion.Version, + Kind: "StatefulSetList", + }, { + Group: batchv1.SchemeGroupVersion.Group, + Version: batchv1.SchemeGroupVersion.Version, + Kind: "CronJobList", }, { Group: batchv1.SchemeGroupVersion.Group, Version: batchv1.SchemeGroupVersion.Version, Kind: "JobList", }, { - Group: v1.SchemeGroupVersion.Group, - Version: v1.SchemeGroupVersion.Version, + Group: corev1.SchemeGroupVersion.Group, + Version: corev1.SchemeGroupVersion.Version, + Kind: "ConfigMapList", + }, { + Group: corev1.SchemeGroupVersion.Group, + Version: corev1.SchemeGroupVersion.Version, Kind: "PersistentVolumeClaimList", }, { - Group: v1.SchemeGroupVersion.Group, - Version: v1.SchemeGroupVersion.Version, + Group: corev1.SchemeGroupVersion.Group, + Version: corev1.SchemeGroupVersion.Version, Kind: "SecretList", }, { - Group: appsv1.SchemeGroupVersion.Group, - Version: appsv1.SchemeGroupVersion.Version, - Kind: "StatefulSetList", + Group: corev1.SchemeGroupVersion.Group, + Version: corev1.SchemeGroupVersion.Version, + Kind: "ServiceAccountList", }, { - Group: batchv1beta1.SchemeGroupVersion.Group, - Version: batchv1beta1.SchemeGroupVersion.Version, - Kind: "CronJobList", + Group: rbacv1.SchemeGroupVersion.Group, + Version: rbacv1.SchemeGroupVersion.Version, + Kind: "RoleList", + }, { + Group: rbacv1.SchemeGroupVersion.Group, + Version: rbacv1.SchemeGroupVersion.Version, + Kind: "RoleBindingList", }} selector := naming.PGBackRestSelector(postgresCluster.GetName()) for _, gvk := range gvks { uList := &unstructured.UnstructuredList{} uList.SetGroupVersionKind(gvk) - if err := r.Client.List(context.Background(), uList, + if err := r.Client.List(ctx, uList, client.InNamespace(postgresCluster.GetNamespace()), client.MatchingLabelsSelector{Selector: selector}); err != nil { return nil, errors.WithStack(err) @@ -219,13 +258,13 @@ func (r *Reconciler) getPGBackRestResources(ctx context.Context, continue } - owned, err := r.cleanupRepoResources(ctx, postgresCluster, uList.Items) + owned, err := r.cleanupRepoResources(ctx, postgresCluster, uList.Items, backupsSpecFound) if err != nil { return nil, errors.WithStack(err) } uList.Items = owned - if err := unstructuredToRepoResources(postgresCluster, gvk.Kind, - repoResources, uList); err != nil { + if err := unstructuredToRepoResources(gvk.Kind, repoResources, + uList); err != nil { return nil, errors.WithStack(err) } @@ -240,9 +279,12 @@ func (r *Reconciler) getPGBackRestResources(ctx context.Context, return repoResources, nil } -// +kubebuilder:rbac:groups="",resources=persistentvolumeclaims,verbs=delete -// +kubebuilder:rbac:groups=apps,resources=statefulsets,verbs=delete -// +kubebuilder:rbac:groups=batch,resources=cronjobs,verbs=delete +// +kubebuilder:rbac:groups="",resources="persistentvolumeclaims",verbs={delete} +// +kubebuilder:rbac:groups="",resources="serviceaccounts",verbs={delete} +// +kubebuilder:rbac:groups="apps",resources="statefulsets",verbs={delete} +// +kubebuilder:rbac:groups="batch",resources="cronjobs",verbs={delete} +// +kubebuilder:rbac:groups="rbac.authorization.k8s.io",resources="roles",verbs={delete} +// +kubebuilder:rbac:groups="rbac.authorization.k8s.io",resources="rolebindings",verbs={delete} // cleanupRepoResources cleans up pgBackRest repository resources that should no longer be // reconciled by deleting them. This includes deleting repos (i.e. PersistentVolumeClaims) that @@ -250,7 +292,9 @@ func (r *Reconciler) getPGBackRestResources(ctx context.Context, // pgBackRest repository host resources if a repository host is no longer configured. func (r *Reconciler) cleanupRepoResources(ctx context.Context, postgresCluster *v1beta1.PostgresCluster, - ownedResources []unstructured.Unstructured) ([]unstructured.Unstructured, error) { + ownedResources []unstructured.Unstructured, + backupsSpecFound bool, +) ([]unstructured.Unstructured, error) { // stores the resources that should not be deleted ownedNoDelete := []unstructured.Unstructured{} @@ -265,20 +309,31 @@ func (r *Reconciler) cleanupRepoResources(ctx context.Context, // spec switch { case hasLabel(naming.LabelPGBackRestConfig): + if !backupsSpecFound { + break + } // Simply add the things we never want to delete (e.g. the pgBackRest configuration) // to the slice and do not delete ownedNoDelete = append(ownedNoDelete, owned) delete = false case hasLabel(naming.LabelPGBackRestDedicated): - // If a dedicated repo host resource and a dedicated repo host is enabled, then - // add to the slice and do not delete. - if pgbackrest.DedicatedRepoHostEnabled(postgresCluster) { + if !backupsSpecFound { + break + } + // Any resources from before 5.1 that relate to the previously required + // SSH configuration should be deleted. + // TODO(tjmoore4): This can be removed once 5.0 is EOL. + if owned.GetName() != naming.PGBackRestSSHConfig(postgresCluster).Name && + owned.GetName() != naming.PGBackRestSSHSecret(postgresCluster).Name { + // If a dedicated repo host resource and a dedicated repo host is enabled, then + // add to the slice and do not delete. ownedNoDelete = append(ownedNoDelete, owned) delete = false } case hasLabel(naming.LabelPGBackRestRepoVolume): - // If a volume (PVC) is identified for a repo that no longer exists in the - // spec then delete it. Otherwise add it to the slice and continue. + if !backupsSpecFound { + break + } // If a volume (PVC) is identified for a repo that no longer exists in the // spec then delete it. Otherwise add it to the slice and continue. for _, repo := range postgresCluster.Spec.Backups.PGBackRest.Repos { @@ -291,6 +346,9 @@ func (r *Reconciler) cleanupRepoResources(ctx context.Context, } } case hasLabel(naming.LabelPGBackRestBackup): + if !backupsSpecFound { + break + } // If a Job is identified for a repo that no longer exists in the spec then // delete it. Otherwise add it to the slice and continue. for _, repo := range postgresCluster.Spec.Backups.PGBackRest.Repos { @@ -300,6 +358,9 @@ func (r *Reconciler) cleanupRepoResources(ctx context.Context, } } case hasLabel(naming.LabelPGBackRestCronJob): + if !backupsSpecFound { + break + } for _, repo := range postgresCluster.Spec.Backups.PGBackRest.Repos { if repo.Name == owned.GetLabels()[naming.LabelPGBackRestRepo] { if backupScheduleFound(repo, @@ -311,14 +372,33 @@ func (r *Reconciler) cleanupRepoResources(ctx context.Context, } } case hasLabel(naming.LabelPGBackRestRestore): + if !backupsSpecFound { + break + } + + // If the restore job has the PGBackRestBackupJobCompletion annotation, it is + // used for volume snapshots and should not be deleted (volume snapshots code + // will clean it up when appropriate). + if _, ok := owned.GetAnnotations()[naming.PGBackRestBackupJobCompletion]; ok { + ownedNoDelete = append(ownedNoDelete, owned) + delete = false + } + // When a cluster is prepared for restore, the system identifier is removed from status // and the cluster is therefore no longer bootstrapped. Only once the restore Job is // complete will the cluster then be bootstrapped again, which means by the time we - // detect a restore Job here and a bootstrapped cluster, the Job can be safely removed. + // detect a restore Job here and a bootstrapped cluster, the Job and any associated + // configuration resources can be safely removed. if !patroni.ClusterBootstrapped(postgresCluster) { ownedNoDelete = append(ownedNoDelete, owned) delete = false } + case hasLabel(naming.LabelPGBackRest): + if !backupsSpecFound { + break + } + ownedNoDelete = append(ownedNoDelete, owned) + delete = false } // If nothing has specified that the resource should not be deleted, then delete @@ -354,22 +434,27 @@ func backupScheduleFound(repo v1beta1.PGBackRestRepo, backupType string) bool { // unstructuredToRepoResources converts unstructured pgBackRest repository resources (specifically // unstructured StatefulSetLists and PersistentVolumeClaimList) into their structured equivalent. -func unstructuredToRepoResources(postgresCluster *v1beta1.PostgresCluster, kind string, - repoResources *RepoResources, uList *unstructured.UnstructuredList) error { +func unstructuredToRepoResources(kind string, repoResources *RepoResources, + uList *unstructured.UnstructuredList) error { switch kind { - case "ConfigMapList": - var cmList v1.ConfigMapList + case "StatefulSetList": + var stsList appsv1.StatefulSetList if err := runtime.DefaultUnstructuredConverter. - FromUnstructured(uList.UnstructuredContent(), &cmList); err != nil { + FromUnstructured(uList.UnstructuredContent(), &stsList); err != nil { return errors.WithStack(err) } - // we only care about ConfigMaps with the proper names - for i, cm := range cmList.Items { - if cm.GetName() == naming.PGBackRestSSHConfig(postgresCluster).Name { - repoResources.sshConfig = &cmList.Items[i] - break - } + for i := range stsList.Items { + repoResources.hosts = append(repoResources.hosts, &stsList.Items[i]) + } + case "CronJobList": + var cronList batchv1.CronJobList + if err := runtime.DefaultUnstructuredConverter. + FromUnstructured(uList.UnstructuredContent(), &cronList); err != nil { + return errors.WithStack(err) + } + for i := range cronList.Items { + repoResources.cronjobs = append(repoResources.cronjobs, &cronList.Items[i]) } case "JobList": var jobList batchv1.JobList @@ -388,8 +473,11 @@ func unstructuredToRepoResources(postgresCluster *v1beta1.PostgresCluster, kind append(repoResources.manualBackupJobs, &jobList.Items[i]) } } + case "ConfigMapList": + // Repository host now uses mTLS for encryption, authentication, and authorization. + // Configmaps for SSHD are no longer managed here. case "PersistentVolumeClaimList": - var pvcList v1.PersistentVolumeClaimList + var pvcList corev1.PersistentVolumeClaimList if err := runtime.DefaultUnstructuredConverter. FromUnstructured(uList.UnstructuredContent(), &pvcList); err != nil { return errors.WithStack(err) @@ -398,35 +486,36 @@ func unstructuredToRepoResources(postgresCluster *v1beta1.PostgresCluster, kind repoResources.pvcs = append(repoResources.pvcs, &pvcList.Items[i]) } case "SecretList": - var secretList v1.SecretList + // Repository host now uses mTLS for encryption, authentication, and authorization. + // Secrets for SSHD are no longer managed here. + // TODO(tjmoore4): Consider adding all pgBackRest secrets to RepoResources to + // observe all pgBackRest secrets in one place. + case "ServiceAccountList": + var saList corev1.ServiceAccountList if err := runtime.DefaultUnstructuredConverter. - FromUnstructured(uList.UnstructuredContent(), &secretList); err != nil { + FromUnstructured(uList.UnstructuredContent(), &saList); err != nil { return errors.WithStack(err) } - // we only care about Secret with the proper names - for i, secret := range secretList.Items { - if secret.GetName() == naming.PGBackRestSSHSecret(postgresCluster).Name { - repoResources.sshSecret = &secretList.Items[i] - break - } + for i := range saList.Items { + repoResources.sas = append(repoResources.sas, &saList.Items[i]) } - case "StatefulSetList": - var stsList appsv1.StatefulSetList + case "RoleList": + var roleList rbacv1.RoleList if err := runtime.DefaultUnstructuredConverter. - FromUnstructured(uList.UnstructuredContent(), &stsList); err != nil { + FromUnstructured(uList.UnstructuredContent(), &roleList); err != nil { return errors.WithStack(err) } - for i := range stsList.Items { - repoResources.hosts = append(repoResources.hosts, &stsList.Items[i]) + for i := range roleList.Items { + repoResources.roles = append(repoResources.roles, &roleList.Items[i]) } - case "CronJobList": - var cronList batchv1beta1.CronJobList + case "RoleBindingList": + var rb rbacv1.RoleBindingList if err := runtime.DefaultUnstructuredConverter. - FromUnstructured(uList.UnstructuredContent(), &cronList); err != nil { + FromUnstructured(uList.UnstructuredContent(), &rb); err != nil { return errors.WithStack(err) } - for i := range cronList.Items { - repoResources.cronjobs = append(repoResources.cronjobs, &cronList.Items[i]) + for i := range rb.Items { + repoResources.rolebindings = append(repoResources.rolebindings, &rb.Items[i]) } default: return fmt.Errorf("unexpected kind %q", kind) @@ -460,8 +549,9 @@ func (r *Reconciler) setScheduledJobStatus(ctx context.Context, for _, job := range jobList.Items { // we only care about the scheduled backup Jobs created by the // associated CronJobs - sbs := v1beta1.PGBackRestScheduledBackupStatus{} if job.GetLabels()[naming.LabelPGBackRestCronJob] != "" { + sbs := v1beta1.PGBackRestScheduledBackupStatus{} + if len(job.OwnerReferences) > 0 { sbs.CronJobName = job.OwnerReferences[0].Name } @@ -487,8 +577,8 @@ func (r *Reconciler) setScheduledJobStatus(ctx context.Context, // generateRepoHostIntent creates and populates StatefulSet with the PostgresCluster's full intent // as needed to create and reconcile a pgBackRest dedicated repository host within the kubernetes // cluster. -func (r *Reconciler) generateRepoHostIntent(postgresCluster *v1beta1.PostgresCluster, - repoHostName string, repoResources *RepoResources, +func (r *Reconciler) generateRepoHostIntent(ctx context.Context, postgresCluster *v1beta1.PostgresCluster, + repoHostName string, repoResources *RepoResources, observedInstances *observedInstances, ) (*appsv1.StatefulSet, error) { annotations := naming.Merge( @@ -498,7 +588,9 @@ func (r *Reconciler) generateRepoHostIntent(postgresCluster *v1beta1.PostgresClu postgresCluster.Spec.Metadata.GetLabelsOrNil(), postgresCluster.Spec.Backups.PGBackRest.Metadata.GetLabelsOrNil(), naming.PGBackRestDedicatedLabels(postgresCluster.GetName()), - ) + map[string]string{ + naming.LabelData: naming.DataPGBackRest, + }) repo := &appsv1.StatefulSet{ TypeMeta: metav1.TypeMeta{ @@ -516,7 +608,7 @@ func (r *Reconciler) generateRepoHostIntent(postgresCluster *v1beta1.PostgresClu MatchLabels: naming.PGBackRestDedicatedLabels(postgresCluster.GetName()), }, ServiceName: naming.ClusterPodService(postgresCluster).Name, - Template: v1.PodTemplateSpec{ + Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: labels, Annotations: annotations, @@ -528,6 +620,18 @@ func (r *Reconciler) generateRepoHostIntent(postgresCluster *v1beta1.PostgresClu if repoHost := postgresCluster.Spec.Backups.PGBackRest.RepoHost; repoHost != nil { repo.Spec.Template.Spec.Affinity = repoHost.Affinity repo.Spec.Template.Spec.Tolerations = repoHost.Tolerations + repo.Spec.Template.Spec.TopologySpreadConstraints = repoHost.TopologySpreadConstraints + repo.Spec.Template.Spec.PriorityClassName = initialize.FromPointer(repoHost.PriorityClassName) + } + + // if default pod scheduling is not explicitly disabled, add the default + // pod topology spread constraints + if !initialize.FromPointer(postgresCluster.Spec.DisableDefaultPodScheduling) { + repo.Spec.Template.Spec.TopologySpreadConstraints = append( + repo.Spec.Template.Spec.TopologySpreadConstraints, + defaultTopologySpreadConstraints( + naming.ClusterDataForPostgresAndPGBackRest(postgresCluster.Name), + )...) } // Set the image pull secrets, if any exist. @@ -536,41 +640,77 @@ func (r *Reconciler) generateRepoHostIntent(postgresCluster *v1beta1.PostgresClu // https://github.com/kubernetes/kubernetes/issues/88456 repo.Spec.Template.Spec.ImagePullSecrets = postgresCluster.Spec.ImagePullSecrets - // if the cluster is set to be shutdown, stop repohost pod - if postgresCluster.Spec.Shutdown != nil && *postgresCluster.Spec.Shutdown { + // determine if any PG Pods still exist + var instancePodExists bool + for _, instance := range observedInstances.forCluster { + if len(instance.Pods) > 0 { + instancePodExists = true + break + } + } + + // if the cluster is set to be shutdown and no instance Pods remain, stop the repohost pod + if postgresCluster.Spec.Shutdown != nil && *postgresCluster.Spec.Shutdown && + !instancePodExists { repo.Spec.Replicas = initialize.Int32(0) } else { // the cluster should not be shutdown, set this value to 1 repo.Spec.Replicas = initialize.Int32(1) } + // Use StatefulSet's "RollingUpdate" strategy and "Parallel" policy to roll + // out changes to pods even when not Running or not Ready. + // - https://docs.k8s.io/concepts/workloads/controllers/statefulset/#rolling-updates + // - https://docs.k8s.io/concepts/workloads/controllers/statefulset/#forced-rollback + // - https://kep.k8s.io/3541 + repo.Spec.PodManagementPolicy = appsv1.ParallelPodManagement + repo.Spec.UpdateStrategy.Type = appsv1.RollingUpdateStatefulSetStrategyType + + // Restart containers any time they stop, die, are killed, etc. + // - https://docs.k8s.io/concepts/workloads/pods/pod-lifecycle/#restart-policy + repo.Spec.Template.Spec.RestartPolicy = corev1.RestartPolicyAlways + + // When ShareProcessNamespace is enabled, Kubernetes' pause process becomes + // PID 1 and reaps those processes when they complete. + // - https://github.com/kubernetes/kubernetes/commit/81d27aa23969b77f + // + // The pgBackRest TLS server must be signaled when its configuration or + // certificates change. Let containers see each other's processes. + // - https://docs.k8s.io/tasks/configure-pod-container/share-process-namespace/ + repo.Spec.Template.Spec.ShareProcessNamespace = initialize.Bool(true) + + // pgBackRest does not make any Kubernetes API calls. Use the default + // ServiceAccount and do not mount its credentials. + repo.Spec.Template.Spec.AutomountServiceAccountToken = initialize.Bool(false) + + // Do not add environment variables describing services in this namespace. + repo.Spec.Template.Spec.EnableServiceLinks = initialize.Bool(false) + repo.Spec.Template.Spec.SecurityContext = postgres.PodSecurityContext(postgresCluster) - var resources v1.ResourceRequirements - if postgresCluster.Spec.Backups.PGBackRest.RepoHost != nil { - resources = postgresCluster.Spec.Backups.PGBackRest.RepoHost.Resources - } - // add ssh pod info - if err := pgbackrest.AddSSHToPod(postgresCluster, &repo.Spec.Template, true, - resources); err != nil { - return nil, errors.WithStack(err) - } - // add pgBackRest repo volumes to pod - if err := pgbackrest.AddRepoVolumesToPod(postgresCluster, &repo.Spec.Template, - getRepoPVCNames(postgresCluster, repoResources.pvcs), - naming.PGBackRestRepoContainerName); err != nil { - return nil, errors.WithStack(err) + pgbackrest.AddServerToRepoPod(ctx, postgresCluster, &repo.Spec.Template.Spec) + + if pgbackrest.RepoHostVolumeDefined(postgresCluster) { + // add the init container to make the pgBackRest repo volume log directory + pgbackrest.MakePGBackrestLogDir(&repo.Spec.Template, postgresCluster) + + // add pgBackRest repo volumes to pod + if err := pgbackrest.AddRepoVolumesToPod(postgresCluster, &repo.Spec.Template, + getRepoPVCNames(postgresCluster, repoResources.pvcs), + naming.PGBackRestRepoContainerName); err != nil { + return nil, errors.WithStack(err) + } } // add configs to pod - if err := pgbackrest.AddConfigsToPod(postgresCluster, &repo.Spec.Template, - pgbackrest.CMRepoKey, naming.PGBackRestRepoContainerName); err != nil { - return nil, errors.WithStack(err) - } + pgbackrest.AddConfigToRepoPod(postgresCluster, &repo.Spec.Template.Spec) // add nss_wrapper init container and add nss_wrapper env vars to the pgbackrest // container addNSSWrapper( - config.PGBackRestContainerImage(postgresCluster), &repo.Spec.Template) + config.PGBackRestContainerImage(postgresCluster), + postgresCluster.Spec.ImagePullPolicy, + &repo.Spec.Template) + addTMPEmptyDir(&repo.Spec.Template) // set ownership references @@ -583,8 +723,8 @@ func (r *Reconciler) generateRepoHostIntent(postgresCluster *v1beta1.PostgresClu } func (r *Reconciler) generateRepoVolumeIntent(postgresCluster *v1beta1.PostgresCluster, - spec *v1.PersistentVolumeClaimSpec, repoName string, - repoResources *RepoResources) (*v1.PersistentVolumeClaim, error) { + spec corev1.PersistentVolumeClaimSpec, repoName string, + repoResources *RepoResources) (*corev1.PersistentVolumeClaim, error) { annotations := naming.Merge( postgresCluster.Spec.Metadata.GetAnnotationsOrNil(), @@ -610,13 +750,13 @@ func (r *Reconciler) generateRepoVolumeIntent(postgresCluster *v1beta1.PostgresC meta.Labels = labels meta.Annotations = annotations - repoVol := &v1.PersistentVolumeClaim{ + repoVol := &corev1.PersistentVolumeClaim{ TypeMeta: metav1.TypeMeta{ - APIVersion: v1.SchemeGroupVersion.String(), + APIVersion: corev1.SchemeGroupVersion.String(), Kind: "PersistentVolumeClaim", }, ObjectMeta: meta, - Spec: *spec, + Spec: spec, } // set ownership references @@ -629,45 +769,76 @@ func (r *Reconciler) generateRepoVolumeIntent(postgresCluster *v1beta1.PostgresC } // generateBackupJobSpecIntent generates a JobSpec for a pgBackRest backup job -func generateBackupJobSpecIntent(postgresCluster *v1beta1.PostgresCluster, selector, - containerName, repoName, serviceAccountName, configName string, - labels, annotations map[string]string, opts ...string) (*batchv1.JobSpec, error) { +func generateBackupJobSpecIntent(ctx context.Context, postgresCluster *v1beta1.PostgresCluster, + repo v1beta1.PGBackRestRepo, serviceAccountName string, + labels, annotations map[string]string, opts ...string) *batchv1.JobSpec { - repoIndex := regexRepoIndex.FindString(repoName) + repoIndex := regexRepoIndex.FindString(repo.Name) cmdOpts := []string{ "--stanza=" + pgbackrest.DefaultStanzaName, "--repo=" + repoIndex, } + // If VolumeSnapshots are enabled, use archive-copy and archive-check options + if postgresCluster.Spec.Backups.Snapshots != nil && feature.Enabled(ctx, feature.VolumeSnapshots) { + cmdOpts = append(cmdOpts, "--archive-copy=y", "--archive-check=y") + } + cmdOpts = append(cmdOpts, opts...) + container := corev1.Container{ + Command: []string{"/opt/crunchy/bin/pgbackrest"}, + Env: []corev1.EnvVar{ + {Name: "COMMAND", Value: "backup"}, + {Name: "COMMAND_OPTS", Value: strings.Join(cmdOpts, " ")}, + {Name: "COMPARE_HASH", Value: "true"}, + {Name: "CONTAINER", Value: naming.PGBackRestRepoContainerName}, + {Name: "NAMESPACE", Value: postgresCluster.GetNamespace()}, + {Name: "SELECTOR", Value: naming.PGBackRestDedicatedSelector(postgresCluster.GetName()).String()}, + }, + Image: config.PGBackRestContainerImage(postgresCluster), + ImagePullPolicy: postgresCluster.Spec.ImagePullPolicy, + Name: naming.PGBackRestRepoContainerName, + SecurityContext: initialize.RestrictedSecurityContext(), + } + + if postgresCluster.Spec.Backups.PGBackRest.Jobs != nil { + container.Resources = postgresCluster.Spec.Backups.PGBackRest.Jobs.Resources + } + jobSpec := &batchv1.JobSpec{ - Template: v1.PodTemplateSpec{ + Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{Labels: labels, Annotations: annotations}, - Spec: v1.PodSpec{ - Containers: []v1.Container{{ - Command: []string{"/opt/crunchy/bin/pgbackrest"}, - Env: []v1.EnvVar{ - {Name: "COMMAND", Value: "backup"}, - {Name: "COMMAND_OPTS", Value: strings.Join(cmdOpts, " ")}, - {Name: "COMPARE_HASH", Value: "true"}, - {Name: "CONTAINER", Value: containerName}, - {Name: "NAMESPACE", Value: postgresCluster.GetNamespace()}, - {Name: "SELECTOR", Value: selector}, - }, - Image: config.PGBackRestContainerImage(postgresCluster), - Name: naming.PGBackRestRepoContainerName, - SecurityContext: initialize.RestrictedSecurityContext(), - }}, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{container}, + + // Disable environment variables for services other than the Kubernetes API. + // - https://docs.k8s.io/concepts/services-networking/connect-applications-service/#accessing-the-service + // - https://releases.k8s.io/v1.23.0/pkg/kubelet/kubelet_pods.go#L553-L563 + EnableServiceLinks: initialize.Bool(false), + // Set RestartPolicy to "Never" since we want a new Pod to be created by the Job // controller when there is a failure (instead of the container simply restarting). // This will ensure the Job always has the latest configs mounted following a // failure as needed to successfully verify config hashes and run the Job. - RestartPolicy: v1.RestartPolicyNever, + RestartPolicy: corev1.RestartPolicyNever, + SecurityContext: initialize.PodSecurityContext(), ServiceAccountName: serviceAccountName, }, }, } + if jobs := postgresCluster.Spec.Backups.PGBackRest.Jobs; jobs != nil { + jobSpec.TTLSecondsAfterFinished = jobs.TTLSecondsAfterFinished + } + + // set the priority class name, tolerations, and affinity, if they exist + if postgresCluster.Spec.Backups.PGBackRest.Jobs != nil { + jobSpec.Template.Spec.Tolerations = postgresCluster.Spec.Backups.PGBackRest.Jobs.Tolerations + jobSpec.Template.Spec.Affinity = postgresCluster.Spec.Backups.PGBackRest.Jobs.Affinity + jobSpec.Template.Spec.PriorityClassName = + initialize.FromPointer(postgresCluster.Spec.Backups.PGBackRest.Jobs.PriorityClassName) + } + // Set the image pull secrets, if any exist. // This is set here rather than using the service account due to the lack // of propagation to existing pods when the CRD is updated: @@ -675,18 +846,15 @@ func generateBackupJobSpecIntent(postgresCluster *v1beta1.PostgresCluster, selec jobSpec.Template.Spec.ImagePullSecrets = postgresCluster.Spec.ImagePullSecrets // add pgBackRest configs to template - if err := pgbackrest.AddConfigsToPod(postgresCluster, &jobSpec.Template, - configName, naming.PGBackRestRepoContainerName); err != nil { - return nil, errors.WithStack(err) - } + pgbackrest.AddConfigToRepoPod(postgresCluster, &jobSpec.Template.Spec) - return jobSpec, nil + return jobSpec } -// +kubebuilder:rbac:groups="",resources=configmaps,verbs=list;delete -// +kubebuilder:rbac:groups="",resources=secrets,verbs=list;delete -// +kubebuilder:rbac:groups="",resources=endpoints,verbs=get -// +kubebuilder:rbac:groups=batch,resources=jobs,verbs=list +// +kubebuilder:rbac:groups="",resources="configmaps",verbs={delete,list} +// +kubebuilder:rbac:groups="",resources="secrets",verbs={list,delete} +// +kubebuilder:rbac:groups="",resources="endpoints",verbs={get} +// +kubebuilder:rbac:groups="batch",resources="jobs",verbs={list} // observeRestoreEnv observes the current Kubernetes environment to obtain any resources applicable // to performing pgBackRest restores (e.g. when initializing a new cluster using an existing @@ -694,11 +862,11 @@ func generateBackupJobSpecIntent(postgresCluster *v1beta1.PostgresCluster, selec // created by Patroni (i.e. DCS, leader and failover Endpoints), while then also finding any existing // restore Jobs and then updating pgBackRest restore status accordingly. func (r *Reconciler) observeRestoreEnv(ctx context.Context, - cluster *v1beta1.PostgresCluster) ([]v1.Endpoints, *batchv1.Job, error) { + cluster *v1beta1.PostgresCluster) ([]corev1.Endpoints, *batchv1.Job, error) { // lookup the various patroni endpoints - leaderEP, dcsEP, failoverEP := v1.Endpoints{}, v1.Endpoints{}, v1.Endpoints{} - currentEndpoints := []v1.Endpoints{} + leaderEP, dcsEP, failoverEP := corev1.Endpoints{}, corev1.Endpoints{}, corev1.Endpoints{} + currentEndpoints := []corev1.Endpoints{} if err := r.Client.Get(ctx, naming.AsObjectKey(naming.PatroniLeaderEndpoints(cluster)), &leaderEP); err != nil { if !apierrors.IsNotFound(err) { @@ -726,6 +894,7 @@ func (r *Reconciler) observeRestoreEnv(ctx context.Context, restoreJobs := &batchv1.JobList{} if err := r.Client.List(ctx, restoreJobs, &client.ListOptions{ + Namespace: cluster.Namespace, LabelSelector: naming.PGBackRestRestoreJobSelector(cluster.GetName()), }); err != nil { return nil, nil, errors.WithStack(err) @@ -765,18 +934,17 @@ func (r *Reconciler) observeRestoreEnv(ctx context.Context, Reason: "PGBackRestRestoreComplete", Message: "pgBackRest restore completed successfully", }) - // TODO: remove guard with move to controller-runtime 0.9.0 https://issue.k8s.io/99714 - if len(cluster.Status.Conditions) > 0 { - meta.RemoveStatusCondition(&cluster.Status.Conditions, - ConditionPGBackRestRestoreProgressing) - } + meta.RemoveStatusCondition(&cluster.Status.Conditions, + ConditionPGBackRestRestoreProgressing) - // cleanup any configuration created solely for the restore, e.g. if we restored across - // namespaces and had to create configuration resources locally for the source cluster - restoreConfigMaps := &v1.ConfigMapList{} + // The clone process used to create resources that were used only + // by the restore job. Clean them up if they still exist. + selector := naming.PGBackRestRestoreConfigSelector(cluster.GetName()) + restoreConfigMaps := &corev1.ConfigMapList{} if err := r.Client.List(ctx, restoreConfigMaps, &client.ListOptions{ - LabelSelector: naming.PGBackRestRestoreConfigSelector(cluster.GetName()), - }, client.InNamespace(cluster.Namespace)); err != nil { + Namespace: cluster.Namespace, + LabelSelector: selector, + }); err != nil { return nil, nil, errors.WithStack(err) } for i := range restoreConfigMaps.Items { @@ -784,10 +952,11 @@ func (r *Reconciler) observeRestoreEnv(ctx context.Context, return nil, nil, errors.WithStack(err) } } - restoreSecrets := &v1.SecretList{} + restoreSecrets := &corev1.SecretList{} if err := r.Client.List(ctx, restoreSecrets, &client.ListOptions{ - LabelSelector: naming.PGBackRestRestoreConfigSelector(cluster.GetName()), - }, client.InNamespace(cluster.Namespace)); err != nil { + Namespace: cluster.Namespace, + LabelSelector: selector, + }); err != nil { return nil, nil, errors.WithStack(err) } for i := range restoreSecrets.Items { @@ -809,9 +978,9 @@ func (r *Reconciler) observeRestoreEnv(ctx context.Context, return currentEndpoints, restoreJob, nil } -// +kubebuilder:rbac:groups="",resources=endpoints,verbs=delete -// +kubebuilder:rbac:groups=apps,resources=statefulsets,verbs=delete -// +kubebuilder:rbac:groups=batch,resources=jobs,verbs=delete +// +kubebuilder:rbac:groups="",resources="endpoints",verbs={delete} +// +kubebuilder:rbac:groups="apps",resources="statefulsets",verbs={delete} +// +kubebuilder:rbac:groups="batch",resources="jobs",verbs={delete} // prepareForRestore is responsible for reconciling an in place restore for the PostgresCluster. // This includes setting a "PreparingForRestore" condition, and then removing all existing @@ -820,7 +989,7 @@ func (r *Reconciler) observeRestoreEnv(ctx context.Context, // cluster to re-bootstrap using a restored data directory. func (r *Reconciler) prepareForRestore(ctx context.Context, cluster *v1beta1.PostgresCluster, observed *observedInstances, - currentEndpoints []v1.Endpoints, restoreJob *batchv1.Job, restoreID string) error { + currentEndpoints []corev1.Endpoints, restoreJob *batchv1.Job, restoreID string) error { setPreparingClusterCondition := func(resource string) { meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{ @@ -904,10 +1073,7 @@ func (r *Reconciler) prepareForRestore(ctx context.Context, // if everything is gone, proceed with re-bootstrapping the cluster via an in-place restore if len(currentEndpoints) == 0 { - if len(cluster.Status.Conditions) > 0 { - // TODO: remove guard with move to controller-runtime 0.9.0 https://issue.k8s.io/99714 - meta.RemoveStatusCondition(&cluster.Status.Conditions, ConditionPostgresDataInitialized) - } + meta.RemoveStatusCondition(&cluster.Status.Conditions, ConditionPostgresDataInitialized) meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{ ObservedGeneration: cluster.GetGeneration(), Type: ConditionPGBackRestRestoreProgressing, @@ -916,7 +1082,7 @@ func (r *Reconciler) prepareForRestore(ctx context.Context, Message: "Restoring cluster in-place", }) // the cluster is no longer bootstrapped - cluster.Status.Patroni = nil + cluster.Status.Patroni.SystemIdentifier = "" // the restore will change the contents of the database, so the pgbouncer and exporter hashes // are no longer valid cluster.Status.Proxy.PGBouncer.PostgreSQLRevision = "" @@ -935,15 +1101,16 @@ func (r *Reconciler) prepareForRestore(ctx context.Context, return nil } -// +kubebuilder:rbac:groups=batch,resources=jobs,verbs=patch +// +kubebuilder:rbac:groups="batch",resources="jobs",verbs={patch} // reconcileRestoreJob is responsible for reconciling a Job that performs a pgBackRest restore in // order to populate a PGDATA directory. func (r *Reconciler) reconcileRestoreJob(ctx context.Context, - cluster, sourceCluster *v1beta1.PostgresCluster, - pgdataVolume, pgwalVolume *v1.PersistentVolumeClaim, + cluster *v1beta1.PostgresCluster, sourceCluster *v1beta1.PostgresCluster, + pgdataVolume, pgwalVolume *corev1.PersistentVolumeClaim, + pgtablespaceVolumes []*corev1.PersistentVolumeClaim, dataSource *v1beta1.PostgresClusterDataSource, - instanceName, instanceSetName, configHash string) error { + instanceName, instanceSetName, configHash, stanzaName string) error { repoName := dataSource.RepoName options := dataSource.Options @@ -953,7 +1120,9 @@ func (r *Reconciler) reconcileRestoreJob(ctx context.Context, for _, opt := range options { var msg string switch { - case strings.Contains(opt, "--repo"): + // Since '--repo' can be set with or without an equals ('=') sign, we check for both + // usage patterns. + case strings.Contains(opt, "--repo=") || strings.Contains(opt, "--repo "): msg = "Option '--repo' is not allowed: please use the 'repoName' field instead." case strings.Contains(opt, "--stanza"): msg = "Option '--stanza' is not allowed: the operator will automatically set this " + @@ -969,7 +1138,7 @@ func (r *Reconciler) reconcileRestoreJob(ctx context.Context, "option " } if msg != "" { - r.Recorder.Eventf(cluster, v1.EventTypeWarning, "InvalidDataSource", msg, repoName) + r.Recorder.Eventf(cluster, corev1.EventTypeWarning, "InvalidDataSource", msg, repoName) return nil } } @@ -978,31 +1147,36 @@ func (r *Reconciler) reconcileRestoreJob(ctx context.Context, // combine options provided by user in the spec with those populated by the operator for a // successful restore opts := append(options, []string{ - "--stanza=" + pgbackrest.DefaultStanzaName, "--pg1-path=" + pgdata, + "--stanza=" + stanzaName, + "--pg1-path=" + pgdata, "--repo=" + regexRepoIndex.FindString(repoName)}...) - var deltaOptFound bool + + var deltaOptFound, foundTarget bool for _, opt := range opts { - if strings.Contains(opt, "--delta") { + switch { + case strings.Contains(opt, "--target"): + foundTarget = true + case strings.Contains(opt, "--delta"): deltaOptFound = true - break } } if !deltaOptFound { opts = append(opts, "--delta") } - var foundTarget, foundTargetAction bool - for _, opt := range options { - switch { - case strings.Contains(opt, "--target"): - foundTarget = true - case strings.Contains(opt, "--target-action"): - foundTargetAction = true - } - } - // typically we'll want to default the target action to promote, but we'll honor any target - // action that is explicitly set - if foundTarget && !foundTargetAction { + // Note on the pgBackRest option `--target-action` in the restore job: + // (a) `--target-action` is only allowed if `--target` and `type` are set; + // TODO(benjaminjb): ensure that `type` is set as well before accepting `target-action` + // (b) our restore job assumes the `hot_standby: on` default, which is true of Postgres >= 10; + // (c) pgBackRest passes the `--target-action` setting as `recovery-target-action` + // in PostgreSQL versions >=9.5 and as `pause_at_recovery_target` on earlier 9.x versions. + // But note, pgBackRest may assume a default action of `pause` and may not pass any setting + // - https://pgbackrest.org/command.html#command-restore/category-command/option-type + // - https://www.postgresql.org/docs/14/runtime-config-wal.html#RUNTIME-CONFIG-WAL-RECOVERY-TARGET + // - https://github.com/pgbackrest/pgbackrest/blob/bb03b3f41942d0b781931092a76877ad309001ef/src/command/restore/restore.c#L1623 + // - https://github.com/pgbackrest/pgbackrest/issues/1314 + // - https://github.com/pgbackrest/pgbackrest/issues/987 + if foundTarget { opts = append(opts, "--target-action=promote") } @@ -1013,29 +1187,37 @@ func (r *Reconciler) reconcileRestoreJob(ctx context.Context, } } + // Check to see if huge pages have been requested in the spec. If they have, include 'huge_pages = try' + // in the restore command. If they haven't, include 'huge_pages = off'. + hugePagesSetting := "off" + if postgres.HugePagesRequested(cluster) { + hugePagesSetting = "try" + } + // NOTE (andrewlecuyer): Forcing users to put each argument separately might prevent the need // to do any escaping or use eval. - cmd := pgbackrest.RestoreCommand(pgdata, strings.Join(opts, " ")) + cmd := pgbackrest.RestoreCommand(pgdata, hugePagesSetting, config.FetchKeyCommand(&cluster.Spec), + pgtablespaceVolumes, strings.Join(opts, " ")) // create the volume resources required for the postgres data directory dataVolumeMount := postgres.DataVolumeMount() - dataVolume := v1.Volume{ + dataVolume := corev1.Volume{ Name: dataVolumeMount.Name, - VolumeSource: v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ ClaimName: pgdataVolume.GetName(), }, }, } - volumes := []v1.Volume{dataVolume} - volumeMounts := []v1.VolumeMount{dataVolumeMount} + volumes := []corev1.Volume{dataVolume} + volumeMounts := []corev1.VolumeMount{dataVolumeMount} if pgwalVolume != nil { walVolumeMount := postgres.WALVolumeMount() - walVolume := v1.Volume{ + walVolume := corev1.Volume{ Name: walVolumeMount.Name, - VolumeSource: v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ ClaimName: pgwalVolume.GetName(), }, }, @@ -1044,30 +1226,36 @@ func (r *Reconciler) reconcileRestoreJob(ctx context.Context, volumeMounts = append(volumeMounts, walVolumeMount) } + for _, pgtablespaceVolume := range pgtablespaceVolumes { + tablespaceVolumeMount := postgres.TablespaceVolumeMount( + pgtablespaceVolume.Labels[naming.LabelData]) + tablespaceVolume := corev1.Volume{ + Name: tablespaceVolumeMount.Name, + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: pgtablespaceVolume.GetName(), + }, + }, + } + volumes = append(volumes, tablespaceVolume) + volumeMounts = append(volumeMounts, tablespaceVolumeMount) + } + restoreJob := &batchv1.Job{} if err := r.generateRestoreJobIntent(cluster, configHash, instanceName, cmd, volumeMounts, volumes, dataSource, restoreJob); err != nil { return errors.WithStack(err) } - if pgbackrest.DedicatedRepoHostEnabled(sourceCluster) { - // add ssh configs to template - if err := pgbackrest.AddSSHToPod(sourceCluster, &restoreJob.Spec.Template, false, - dataSource.Resources, - naming.PGBackRestRestoreContainerName); err != nil { - return errors.WithStack(err) - } - } - // add pgBackRest configs to template - if err := pgbackrest.AddConfigsToPod(sourceCluster, &restoreJob.Spec.Template, - pgbackrest.CMInstanceKey, naming.PGBackRestRestoreContainerName); err != nil { - return errors.WithStack(err) - } + pgbackrest.AddConfigToRestorePod(cluster, sourceCluster, &restoreJob.Spec.Template.Spec) // add nss_wrapper init container and add nss_wrapper env vars to the pgbackrest restore // container - addNSSWrapper(config.PGBackRestContainerImage(cluster), &restoreJob.Spec.Template) + addNSSWrapper( + config.PGBackRestContainerImage(cluster), + cluster.Spec.ImagePullPolicy, + &restoreJob.Spec.Template) addTMPEmptyDir(&restoreJob.Spec.Template) @@ -1105,6 +1293,7 @@ func (r *Reconciler) generateRestoreJobIntent(cluster *v1beta1.PostgresCluster, Containers: []corev1.Container{{ Command: cmd, Image: config.PostgresContainerImage(cluster), + ImagePullPolicy: cluster.Spec.ImagePullPolicy, Name: naming.PGBackRestRestoreContainerName, VolumeMounts: volumeMounts, Env: []corev1.EnvVar{{Name: "PGHOST", Value: "/tmp"}}, @@ -1125,8 +1314,22 @@ func (r *Reconciler) generateRestoreJobIntent(cluster *v1beta1.PostgresCluster, // https://github.com/kubernetes/kubernetes/issues/88456 job.Spec.Template.Spec.ImagePullSecrets = cluster.Spec.ImagePullSecrets + // pgBackRest does not make any Kubernetes API calls, but it may interact + // with a cloud storage provider. Use the instance ServiceAccount for its + // possible cloud identity without mounting its Kubernetes API credentials. + // - https://cloud.google.com/kubernetes-engine/docs/concepts/workload-identity + // - https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html + job.Spec.Template.Spec.AutomountServiceAccountToken = initialize.Bool(false) + job.Spec.Template.Spec.ServiceAccountName = naming.ClusterInstanceRBAC(cluster).Name + + // Do not add environment variables describing services in this namespace. + job.Spec.Template.Spec.EnableServiceLinks = initialize.Bool(false) + job.Spec.Template.Spec.SecurityContext = postgres.PodSecurityContext(cluster) + // set the priority class name, if it exists + job.Spec.Template.Spec.PriorityClassName = initialize.FromPointer(dataSource.PriorityClassName) + job.SetGroupVersionKind(batchv1.SchemeGroupVersion.WithKind("Job")) if err := errors.WithStack(r.setControllerReference(cluster, job)); err != nil { return err @@ -1142,13 +1345,16 @@ func (r *Reconciler) generateRestoreJobIntent(cluster *v1beta1.PostgresCluster, // the results of any attempts to properly reconcile these resources. func (r *Reconciler) reconcilePGBackRest(ctx context.Context, postgresCluster *v1beta1.PostgresCluster, - instances *observedInstances) (reconcile.Result, error) { + instances *observedInstances, + rootCA *pki.RootCertificateAuthority, + backupsSpecFound bool, +) (reconcile.Result, error) { // add some additional context about what component is being reconciled log := logging.FromContext(ctx).WithValues("reconciler", "pgBackRest") - // if nil, create the pgBackRest status that will be updated when reconciling various - // pgBackRest resources + // if nil, create the pgBackRest status that will be updated when + // reconciling various pgBackRest resources if postgresCluster.Status.PGBackRest == nil { postgresCluster.Status.PGBackRest = &v1beta1.PGBackRestStatus{} } @@ -1159,27 +1365,33 @@ func (r *Reconciler) reconcilePGBackRest(ctx context.Context, // Get all currently owned pgBackRest resources in the environment as needed for // reconciliation. This includes deleting resources that should no longer exist per the // current spec (e.g. if repos, repo hosts, etc. have been removed). - repoResources, err := r.getPGBackRestResources(ctx, postgresCluster) + repoResources, err := r.getPGBackRestResources(ctx, postgresCluster, backupsSpecFound) if err != nil { // exit early if can't get and clean existing resources as needed to reconcile return reconcile.Result{}, errors.WithStack(err) } + // At this point, reconciliation is allowed, so if no backups spec is found + // clear the status and exit + if !backupsSpecFound { + postgresCluster.Status.PGBackRest = &v1beta1.PGBackRestStatus{} + return result, nil + } + var repoHost *appsv1.StatefulSet var repoHostName string - dedicatedEnabled := pgbackrest.DedicatedRepoHostEnabled(postgresCluster) - if dedicatedEnabled { - // reconcile the pgbackrest repository host - repoHost, err = r.reconcileDedicatedRepoHost(ctx, postgresCluster, repoResources) - if err != nil { - log.Error(err, "unable to reconcile pgBackRest repo host") - result = updateReconcileResult(result, reconcile.Result{Requeue: true}) - } - repoHostName = repoHost.GetName() - } else if len(postgresCluster.Status.Conditions) > 0 { - // TODO: remove guard above with move to controller-runtime 0.9.0 https://issue.k8s.io/99714 - // remove the dedicated repo host status if a dedicated host is not enabled - meta.RemoveStatusCondition(&postgresCluster.Status.Conditions, ConditionRepoHostReady) + // reconcile the pgbackrest repository host + repoHost, err = r.reconcileDedicatedRepoHost(ctx, postgresCluster, repoResources, instances) + if err != nil { + log.Error(err, "unable to reconcile pgBackRest repo host") + result.Requeue = true + return result, nil + } + repoHostName = repoHost.GetName() + + if err := r.reconcilePGBackRestSecret(ctx, postgresCluster, repoHost, rootCA); err != nil { + log.Error(err, "unable to reconcile pgBackRest secret") + result.Requeue = true } // calculate hashes for the external repository configurations in the spec (e.g. for Azure, @@ -1188,14 +1400,16 @@ func (r *Reconciler) reconcilePGBackRest(ctx context.Context, configHashes, configHash, err := pgbackrest.CalculateConfigHashes(postgresCluster) if err != nil { log.Error(err, "unable to calculate config hashes") - result = updateReconcileResult(result, reconcile.Result{Requeue: true}) + result.Requeue = true + return result, nil } // reconcile all pgbackrest repository repos replicaCreateRepo, err := r.reconcileRepos(ctx, postgresCluster, configHashes, repoResources) if err != nil { log.Error(err, "unable to reconcile pgBackRest repo host") - result = updateReconcileResult(result, reconcile.Result{Requeue: true}) + result.Requeue = true + return result, nil } // gather instance names and reconcile all pgbackrest configuration and secrets @@ -1205,18 +1419,19 @@ func (r *Reconciler) reconcilePGBackRest(ctx context.Context, } // sort to ensure consistent ordering of hosts when creating pgBackRest configs sort.Strings(instanceNames) - if err := r.reconcilePGBackRestConfig(ctx, postgresCluster, nil, repoHostName, + if err := r.reconcilePGBackRestConfig(ctx, postgresCluster, repoHostName, configHash, naming.ClusterPodService(postgresCluster).Name, - postgresCluster.GetNamespace(), instanceNames, repoResources.sshSecret); err != nil { + postgresCluster.GetNamespace(), instanceNames); err != nil { log.Error(err, "unable to reconcile pgBackRest configuration") - result = updateReconcileResult(result, reconcile.Result{Requeue: true}) + result.Requeue = true } // reconcile the RBAC required to run pgBackRest Jobs (e.g. for backups) sa, err := r.reconcilePGBackRestRBAC(ctx, postgresCluster) if err != nil { log.Error(err, "unable to create replica creation backup") - result = updateReconcileResult(result, reconcile.Result{Requeue: true}) + result.Requeue = true + return result, nil } // reconcile the pgBackRest stanza for all configuration pgBackRest repos @@ -1234,17 +1449,17 @@ func (r *Reconciler) reconcilePGBackRest(ctx context.Context, // custom configuration and ensure stanzas are still created). if err != nil { log.Error(err, "unable to create stanza") - result = updateReconcileResult(result, reconcile.Result{RequeueAfter: 10 * time.Second}) + result.RequeueAfter = 10 * time.Second } // If a config hash mismatch, then log an info message and requeue to try again. Add some time // to the requeue to give the pgBackRest configuration changes a chance to propagate to the // container. if configHashMismatch { log.Info("pgBackRest config hash mismatch detected, requeuing to reattempt stanza create") - result = updateReconcileResult(result, reconcile.Result{RequeueAfter: 10 * time.Second}) + result.RequeueAfter = 10 * time.Second } // reconcile the pgBackRest backup CronJobs - requeue := r.reconcileScheduledBackups(ctx, postgresCluster, sa) + requeue := r.reconcileScheduledBackups(ctx, postgresCluster, sa, repoResources.cronjobs) // If the pgBackRest backup CronJob reconciliation function has encountered an error, requeue // after 10 seconds. The error will not bubble up to allow the reconcile loop to continue. // An error is not logged because an event was already created. @@ -1252,7 +1467,7 @@ func (r *Reconciler) reconcilePGBackRest(ctx context.Context, // A potential option to handle this proactively would be to use a webhook: // https://book.kubebuilder.io/cronjob-tutorial/webhook-implementation.html if requeue { - result = updateReconcileResult(result, reconcile.Result{RequeueAfter: 10 * time.Second}) + result.RequeueAfter = 10 * time.Second } // Reconcile the initial backup that is needed to enable replica creation using pgBackRest. @@ -1260,7 +1475,7 @@ func (r *Reconciler) reconcilePGBackRest(ctx context.Context, if err := r.reconcileReplicaCreateBackup(ctx, postgresCluster, instances, repoResources.replicaCreateBackupJobs, sa, configHash, replicaCreateRepo); err != nil { log.Error(err, "unable to reconcile replica creation backup") - result = updateReconcileResult(result, reconcile.Result{Requeue: true}) + result.Requeue = true } // Reconcile a manual backup as defined in the spec, and triggered by the end-user via @@ -1268,21 +1483,24 @@ func (r *Reconciler) reconcilePGBackRest(ctx context.Context, if err := r.reconcileManualBackup(ctx, postgresCluster, repoResources.manualBackupJobs, sa, instances); err != nil { log.Error(err, "unable to reconcile manual backup") - result = updateReconcileResult(result, reconcile.Result{Requeue: true}) + result.Requeue = true } return result, nil } -// +kubebuilder:rbac:groups="",resources=persistentvolumeclaims,verbs=create;patch -// +kubebuilder:rbac:groups=batch,resources=jobs,verbs=create;patch;delete +// +kubebuilder:rbac:groups="",resources="persistentvolumeclaims",verbs={create,patch} +// +kubebuilder:rbac:groups="batch",resources="jobs",verbs={create,patch,delete} // reconcilePostgresClusterDataSource is responsible for reconciling a PostgresCluster data source. // This is specifically done by running a pgBackRest restore to populate a PostgreSQL data volume // for the PostgresCluster being reconciled using the backups of another PostgresCluster. func (r *Reconciler) reconcilePostgresClusterDataSource(ctx context.Context, cluster *v1beta1.PostgresCluster, dataSource *v1beta1.PostgresClusterDataSource, - configHash string, clusterVolumes []corev1.PersistentVolumeClaim) error { + configHash string, clusterVolumes []corev1.PersistentVolumeClaim, + rootCA *pki.RootCertificateAuthority, + backupsSpecFound bool, +) error { // grab cluster, namespaces and repo name information from the data source sourceClusterName := dataSource.ClusterName @@ -1299,7 +1517,7 @@ func (r *Reconciler) reconcilePostgresClusterDataSource(ctx context.Context, // repo name is required by the api, so RepoName should be populated sourceRepoName := dataSource.RepoName - // Ensure we proper instance and instance set can be identified via the status. The + // Ensure the proper instance and instance set can be identified via the status. The // StartupInstance and StartupInstanceSet values should be populated when the cluster // is being prepared for a restore, and should therefore always exist at this point. // Therefore, if either are not found it is treated as an error. @@ -1355,18 +1573,16 @@ func (r *Reconciler) reconcilePostgresClusterDataSource(ctx context.Context, // data directory for a brand new PostgresCluster using existing backups for that cluster). // If the source cluster is not the same as the current cluster, then look it up. sourceCluster := &v1beta1.PostgresCluster{} - var sourceClusterInstance string if sourceClusterName == cluster.GetName() && sourceClusterNamespace == cluster.GetNamespace() { sourceCluster = cluster.DeepCopy() - sourceClusterInstance = instanceName - instance := &Instance{Name: sourceClusterInstance} + instance := &Instance{Name: instanceName} // Reconciling pgBackRest here will ensure a pgBackRest instance config file exists (since // the cluster hasn't bootstrapped yet, and pgBackRest configs therefore have not yet been // reconciled) as needed to properly configure the pgBackRest restore Job. // Note that function reconcilePGBackRest only uses forCluster in observedInstances. result, err := r.reconcilePGBackRest(ctx, cluster, &observedInstances{ forCluster: []*Instance{instance}, - }) + }, rootCA, backupsSpecFound) if err != nil || result != (reconcile.Result{}) { return fmt.Errorf("unable to reconcile pgBackRest as needed to initialize "+ "PostgreSQL data for the cluster: %w", err) @@ -1376,21 +1592,17 @@ func (r *Reconciler) reconcilePostgresClusterDataSource(ctx context.Context, client.ObjectKey{Name: sourceClusterName, Namespace: sourceClusterNamespace}, sourceCluster); err != nil { if apierrors.IsNotFound(err) { - r.Recorder.Eventf(cluster, v1.EventTypeWarning, "InvalidDataSource", + r.Recorder.Eventf(cluster, corev1.EventTypeWarning, "InvalidDataSource", "PostgresCluster %q does not exist", sourceClusterName) return nil } return errors.WithStack(err) } - // If restoring across namespaces, then any SSH secrets must be copied and recreated in the - // current cluster's local namespace, and the proper SSH and pgBackRest configuration for - // the source cluster must also be generated in the current cluster's namespace - if cluster.GetNamespace() != sourceCluster.GetNamespace() { - if err := r.copyRestoreConfiguration(ctx, cluster, sourceCluster, - sourceClusterInstance); err != nil { - return errors.WithStack(err) - } + // Copy repository definitions and credentials from the source cluster. + // A copy is the only way to get this information across namespaces. + if err := r.copyRestoreConfiguration(ctx, cluster, sourceCluster); err != nil { + return err } } @@ -1403,7 +1615,7 @@ func (r *Reconciler) reconcilePostgresClusterDataSource(ctx context.Context, } } if !foundRepo { - r.Recorder.Eventf(cluster, v1.EventTypeWarning, "InvalidDataSource", + r.Recorder.Eventf(cluster, corev1.EventTypeWarning, "InvalidDataSource", "PostgresCluster %q does not have a repo named %q defined", sourceClusterName, sourceRepoName) return nil @@ -1416,7 +1628,105 @@ func (r *Reconciler) reconcilePostgresClusterDataSource(ctx context.Context, Namespace: cluster.GetNamespace(), }} // Reconcile the PGDATA and WAL volumes for the restore - pgdata, err := r.reconcilePostgresDataVolume(ctx, cluster, instanceSet, fakeSTS, clusterVolumes) + pgdata, err := r.reconcilePostgresDataVolume(ctx, cluster, instanceSet, fakeSTS, clusterVolumes, sourceCluster) + if err != nil { + return errors.WithStack(err) + } + pgwal, err := r.reconcilePostgresWALVolume(ctx, cluster, instanceSet, fakeSTS, nil, clusterVolumes) + if err != nil { + return errors.WithStack(err) + } + + pgtablespaces, err := r.reconcileTablespaceVolumes(ctx, cluster, instanceSet, fakeSTS, clusterVolumes) + if err != nil { + return errors.WithStack(err) + } + + // TODO(snapshots): If pgdata is being sourced by a VolumeSnapshot then don't perform a typical restore job; + // we only want to replay the WAL. + + // reconcile the pgBackRest restore Job to populate the cluster's data directory + if err := r.reconcileRestoreJob(ctx, cluster, sourceCluster, pgdata, pgwal, pgtablespaces, + dataSource, instanceName, instanceSetName, configHash, pgbackrest.DefaultStanzaName); err != nil { + return errors.WithStack(err) + } + + return nil +} + +// +kubebuilder:rbac:groups="",resources="persistentvolumeclaims",verbs={create,patch} +// +kubebuilder:rbac:groups="batch",resources="jobs",verbs={create,patch,delete} + +// reconcileCloudBasedDataSource is responsible for reconciling a cloud-based PostgresCluster +// data source, i.e., S3, etc. +func (r *Reconciler) reconcileCloudBasedDataSource(ctx context.Context, + cluster *v1beta1.PostgresCluster, dataSource *v1beta1.PGBackRestDataSource, + configHash string, clusterVolumes []corev1.PersistentVolumeClaim) error { + + // Ensure the proper instance and instance set can be identified via the status. The + // StartupInstance and StartupInstanceSet values should be populated when the cluster + // is being prepared for a restore, and should therefore always exist at this point. + // Therefore, if either are not found it is treated as an error. + instanceName := cluster.Status.StartupInstance + if instanceName == "" { + return errors.WithStack( + errors.New("unable to find instance name for pgBackRest restore Job")) + } + instanceSetName := cluster.Status.StartupInstanceSet + if instanceSetName == "" { + return errors.WithStack( + errors.New("unable to find instance set name for pgBackRest restore Job")) + } + + // Ensure an instance set can be found in the current spec that corresponds to the + // instanceSetName. A valid instance spec is needed to reconcile and cluster volumes + // below (e.g. the PGDATA and/or WAL volumes). + var instanceSet *v1beta1.PostgresInstanceSetSpec + for i, set := range cluster.Spec.InstanceSets { + if set.Name == instanceSetName { + instanceSet = &cluster.Spec.InstanceSets[i] + break + } + } + if instanceSet == nil { + return errors.WithStack( + errors.New("unable to determine the proper instance set for the restore")) + } + + // If the cluster is already bootstrapped, or if the bootstrap Job is complete, then + // nothing to do. However, also ensure the "data sources initialized" condition is set + // to true if for some reason it doesn't exist (e.g. if it was deleted since the + // data source for the cluster was initialized). + if patroni.ClusterBootstrapped(cluster) { + condition := meta.FindStatusCondition(cluster.Status.Conditions, + ConditionPostgresDataInitialized) + if condition == nil || (condition.Status != metav1.ConditionTrue) { + meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{ + ObservedGeneration: cluster.GetGeneration(), + Type: ConditionPostgresDataInitialized, + Status: metav1.ConditionTrue, + Reason: "ClusterAlreadyBootstrapped", + Message: "The cluster is already bootstrapped", + }) + } + return nil + } + + if err := r.createRestoreConfig(ctx, cluster, configHash); err != nil { + return err + } + + // TODO(benjaminjb): Is there a way to check that a repo exists outside of spinning + // up a pod with pgBackRest and checking? + + // Define a fake STS to use when calling the reconcile functions below since when + // bootstrapping the cluster it will not exist until after the restore is complete. + fakeSTS := &appsv1.StatefulSet{ObjectMeta: metav1.ObjectMeta{ + Name: instanceName, + Namespace: cluster.GetNamespace(), + }} + // Reconcile the PGDATA and WAL volumes for the restore + pgdata, err := r.reconcilePostgresDataVolume(ctx, cluster, instanceSet, fakeSTS, clusterVolumes, nil) if err != nil { return errors.WithStack(err) } @@ -1425,120 +1735,265 @@ func (r *Reconciler) reconcilePostgresClusterDataSource(ctx context.Context, return errors.WithStack(err) } + // TODO(benjaminjb): do we really need this for cloud-based datasources? + pgtablespaces, err := r.reconcileTablespaceVolumes(ctx, cluster, instanceSet, fakeSTS, clusterVolumes) + if err != nil { + return errors.WithStack(err) + } + + // The `reconcileRestoreJob` was originally designed to take a PostgresClusterDataSource + // and rather than reconfigure that func's signature, we translate the PGBackRestDataSource + tmpDataSource := &v1beta1.PostgresClusterDataSource{ + RepoName: dataSource.Repo.Name, + Options: dataSource.Options, + Resources: dataSource.Resources, + Affinity: dataSource.Affinity, + Tolerations: dataSource.Tolerations, + PriorityClassName: dataSource.PriorityClassName, + } + // reconcile the pgBackRest restore Job to populate the cluster's data directory - if err := r.reconcileRestoreJob(ctx, cluster, sourceCluster, pgdata, pgwal, dataSource, - instanceName, instanceSetName, configHash); err != nil { + // Note that the 'source cluster' is nil as this is not used by this restore type. + if err := r.reconcileRestoreJob(ctx, cluster, nil, pgdata, pgwal, pgtablespaces, tmpDataSource, + instanceName, instanceSetName, configHash, dataSource.Stanza); err != nil { return errors.WithStack(err) } return nil } +// createRestoreConfig creates a configmap struct with pgBackRest pgbackrest.conf settings +// in the data field, for use with restoring from cloud-based data sources +func (r *Reconciler) createRestoreConfig(ctx context.Context, postgresCluster *v1beta1.PostgresCluster, + configHash string) error { + + postgresClusterWithMockedBackups := postgresCluster.DeepCopy() + postgresClusterWithMockedBackups.Spec.Backups.PGBackRest.Global = postgresCluster.Spec. + DataSource.PGBackRest.Global + postgresClusterWithMockedBackups.Spec.Backups.PGBackRest.Repos = []v1beta1.PGBackRestRepo{ + postgresCluster.Spec.DataSource.PGBackRest.Repo, + } + + return r.reconcilePGBackRestConfig(ctx, postgresClusterWithMockedBackups, + "", configHash, "", "", []string{}) +} + // copyRestoreConfiguration copies pgBackRest configuration from another cluster for use by // the current PostgresCluster (e.g. when restoring across namespaces, and the configuration // for the source cluster needs to be copied into the PostgresCluster's local namespace). func (r *Reconciler) copyRestoreConfiguration(ctx context.Context, - cluster, sourceCluster *v1beta1.PostgresCluster, sourceClusterInstance string) error { + cluster, sourceCluster *v1beta1.PostgresCluster, +) error { + var err error - origSourceCluster := sourceCluster.DeepCopy() - sourceCluster.ObjectMeta.Name = cluster.GetName() + "-restore" - sourceCluster.ObjectMeta.Namespace = cluster.GetNamespace() - sourceCluster.ObjectMeta.Labels = cluster.GetLabels() - sourceCluster.ObjectMeta.Annotations = cluster.GetAnnotations() - var repoHostName string - if pgbackrest.DedicatedRepoHostEnabled(sourceCluster) { - repoHosts := &appsv1.StatefulSetList{} - selector := naming.PGBackRestDedicatedSelector(origSourceCluster.GetName()) - if err := r.Client.List(ctx, repoHosts, - client.InNamespace(origSourceCluster.GetNamespace()), - client.MatchingLabelsSelector{Selector: selector}); err != nil { - return errors.WithStack(err) - } - if len(repoHosts.Items) != 1 { - return errors.WithStack(errors.New("Invalid number of repo hosts found " + - "while reconciling restore job")) - } - repoHostName = repoHosts.Items[0].GetName() + sourceConfig := &corev1.ConfigMap{ObjectMeta: naming.PGBackRestConfig(sourceCluster)} + if err == nil { + err = errors.WithStack( + r.Client.Get(ctx, client.ObjectKeyFromObject(sourceConfig), sourceConfig)) } - sourceSSHConfig := &v1.Secret{} - if pgbackrest.DedicatedRepoHostEnabled(origSourceCluster) { - if err := r.Client.Get(ctx, - naming.AsObjectKey(naming.PGBackRestSSHSecret(origSourceCluster)), - sourceSSHConfig); err != nil { - return errors.WithStack(err) + + // Retrieve the pgBackRest Secret of the source cluster if it has one. When + // it does not, indicate that with a nil pointer. + sourceSecret := &corev1.Secret{ObjectMeta: naming.PGBackRestSecret(sourceCluster)} + if err == nil { + err = errors.WithStack( + r.Client.Get(ctx, client.ObjectKeyFromObject(sourceSecret), sourceSecret)) + + if apierrors.IsNotFound(err) { + sourceSecret, err = nil, nil } } - metadata := naming.PGBackRestSSHSecret(sourceCluster) - // label according to PostgresCluster being created (not the source cluster) - metadata.Labels = naming.Merge(cluster.Spec.Metadata.GetLabelsOrNil(), + + // See also [pgbackrest.CreatePGBackRestConfigMapIntent]. + config := &corev1.ConfigMap{ObjectMeta: naming.PGBackRestConfig(cluster)} + config.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("ConfigMap")) + + config.Annotations = naming.Merge( + cluster.Spec.Metadata.GetAnnotationsOrNil(), + cluster.Spec.Backups.PGBackRest.Metadata.GetAnnotationsOrNil(), + ) + config.Labels = naming.Merge( + cluster.Spec.Metadata.GetLabelsOrNil(), cluster.Spec.Backups.PGBackRest.Metadata.GetLabelsOrNil(), - naming.PGBackRestRestoreConfigLabels(cluster.GetName()), + naming.PGBackRestConfigLabels(cluster.GetName()), ) - metadata.Annotations = naming.Merge( + if err == nil { + err = r.setControllerReference(cluster, config) + } + + // See also [Reconciler.reconcilePGBackRestSecret]. + secret := &corev1.Secret{ObjectMeta: naming.PGBackRestSecret(cluster)} + secret.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("Secret")) + secret.Type = corev1.SecretTypeOpaque + + secret.Annotations = naming.Merge( cluster.Spec.Metadata.GetAnnotationsOrNil(), - cluster.Spec.Backups.PGBackRest.Metadata.GetAnnotationsOrNil()) - restoreSSHConfig := &v1.Secret{ - ObjectMeta: metadata, - Data: sourceSSHConfig.Data, + cluster.Spec.Backups.PGBackRest.Metadata.GetAnnotationsOrNil(), + ) + secret.Labels = naming.Merge( + cluster.Spec.Metadata.GetLabelsOrNil(), + cluster.Spec.Backups.PGBackRest.Metadata.GetLabelsOrNil(), + naming.PGBackRestConfigLabels(cluster.Name), + ) + if err == nil { + err = r.setControllerReference(cluster, secret) } - // set ownership refs according to PostgresCluster being created (not the source cluster) - if err := r.setOwnerReference(cluster, restoreSSHConfig); err != nil { - return errors.WithStack(err) + if err == nil { + pgbackrest.RestoreConfig( + sourceConfig, config, + sourceSecret, secret, + ) } - restoreSSHConfig.SetGroupVersionKind(v1.SchemeGroupVersion.WithKind("Secret")) - // Create metadata that can be used to override metadata (labels, annotations and ownership - // refs) in pgBackRest configuration resources. This allows us to copy resources from - // another cluster, but ensure pertinent metadata details are set according to the cluster - // currently being reconciled (ensuring proper garbage collection, etc.). - overrideMetadata := &metav1.ObjectMeta{ - Annotations: metadata.GetAnnotations(), - Labels: metadata.GetLabels(), - OwnerReferences: restoreSSHConfig.OwnerReferences, - } - if err := r.reconcilePGBackRestConfig(ctx, sourceCluster, overrideMetadata, repoHostName, "", - naming.ClusterPodService(origSourceCluster).Name, origSourceCluster.GetNamespace(), - []string{sourceClusterInstance}, restoreSSHConfig); err != nil { - return errors.WithStack(err) + if err == nil { + err = errors.WithStack(r.apply(ctx, config)) } - return nil + // Write the Secret when there is something we want to keep in it. + if err == nil && len(secret.Data) != 0 { + err = errors.WithStack(r.apply(ctx, secret)) + } + + // copy any needed projected Secrets or ConfigMaps + if err == nil { + err = r.copyConfigurationResources(ctx, cluster, sourceCluster) + } + + return err } -// reconcileRepoHosts is responsible for reconciling the pgBackRest ConfigMaps and Secrets. -// -// Please note that while the metadata for any resources generated within this function is -// typically generated to the PostgresCluster provided, an optional metadataOverride -// parameter can also be provided that can be used to override the labels, annotations and/or -// ownerships refs for any resources created by this function (note that all other fields in -// metadataOverride are ignored). This is useful in scenarios where the contents of the -// configuration resources should be reconciled according to the PostgresCluster provided, -// but those same resources need to be labeled, owned, etc. independently of that PostgresCluster -// (e.g. according to another cluster, such as when performing a restore across namespaces and -// copying configuration from a source cluster). -func (r *Reconciler) reconcilePGBackRestConfig(ctx context.Context, - postgresCluster *v1beta1.PostgresCluster, metadataOverride *metav1.ObjectMeta, - repoHostName, configHash, serviceName, serviceNamespace string, - instanceNames []string, sshSecret *v1.Secret) error { +// copyConfigurationResources copies all pgBackRest configuration ConfigMaps and +// Secrets used by the source cluster when bootstrapping the new cluster using +// pgBackRest restore. This ensures those configuration resources mounted as +// VolumeProjections by the source cluster can be used by the new cluster during +// bootstrapping. +func (r *Reconciler) copyConfigurationResources(ctx context.Context, cluster, + sourceCluster *v1beta1.PostgresCluster) error { + + for i := range sourceCluster.Spec.Backups.PGBackRest.Configuration { + // While all volume projections from .Configuration will be carried over to + // the pgBackRest restore Job, we only explicitly copy the relevant ConfigMaps + // and Secrets. Any DownwardAPI or ServiceAccountToken projections will need + // to be handled manually. + // - https://kubernetes.io/docs/concepts/storage/projected-volumes/ + if sourceCluster.Spec.Backups.PGBackRest.Configuration[i].Secret != nil { + secretProjection := sourceCluster.Spec.Backups.PGBackRest.Configuration[i].Secret + secretCopy := &corev1.Secret{} + secretName := types.NamespacedName{ + Name: secretProjection.Name, + Namespace: sourceCluster.Namespace, + } + // Get the existing Secret for the copy, if it exists. It **must** + // exist if not configured as optional. + if secretProjection.Optional != nil && *secretProjection.Optional { + if err := errors.WithStack(r.Client.Get(ctx, secretName, + secretCopy)); apierrors.IsNotFound(err) { + continue + } else { + return err + } + } else { + if err := errors.WithStack( + r.Client.Get(ctx, secretName, secretCopy)); err != nil { + return err + } + } + // Set a unique name for the Secret copy using the original Secret + // name and the Secret projection index number. + secretCopyName := fmt.Sprintf(naming.RestoreConfigCopySuffix, secretProjection.Name, i) + + // set the new name and namespace + secretCopy.ObjectMeta = metav1.ObjectMeta{ + Name: secretCopyName, + Namespace: cluster.Namespace, + } + secretCopy.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("Secret")) + secretCopy.Annotations = naming.Merge( + cluster.Spec.Metadata.GetAnnotationsOrNil(), + cluster.Spec.Backups.PGBackRest.Metadata.GetAnnotationsOrNil(), + ) + secretCopy.Labels = naming.Merge( + cluster.Spec.Metadata.GetLabelsOrNil(), + cluster.Spec.Backups.PGBackRest.Metadata.GetLabelsOrNil(), + // this label allows for cleanup when the restore completes + naming.PGBackRestRestoreJobLabels(cluster.Name), + ) + if err := r.setControllerReference(cluster, secretCopy); err != nil { + return err + } - log := logging.FromContext(ctx).WithValues("reconcileResource", "repoConfig") - errMsg := "reconciling pgBackRest configuration" + if err := errors.WithStack(r.apply(ctx, secretCopy)); err != nil { + return err + } + // update the copy of the source PostgresCluster to add the new Secret + // projection(s) to the restore Job + sourceCluster.Spec.Backups.PGBackRest.Configuration[i].Secret.Name = secretCopyName + } - // create a function that can be used to override metadata according to the metadataOverride - // parameter provided - overrideMetadata := func(metadata metav1.ObjectMeta) metav1.ObjectMeta { - name := metadata.Name - namespace := metadata.Namespace - metadata = *metadataOverride - metadata.Name = name - metadata.Namespace = namespace - return metadata + if sourceCluster.Spec.Backups.PGBackRest.Configuration[i].ConfigMap != nil { + configMapProjection := sourceCluster.Spec.Backups.PGBackRest.Configuration[i].ConfigMap + configMapCopy := &corev1.ConfigMap{} + configMapName := types.NamespacedName{ + Name: configMapProjection.Name, + Namespace: sourceCluster.Namespace, + } + // Get the existing ConfigMap for the copy, if it exists. It **must** + // exist if not configured as optional. + if configMapProjection.Optional != nil && *configMapProjection.Optional { + if err := errors.WithStack(r.Client.Get(ctx, configMapName, + configMapCopy)); apierrors.IsNotFound(err) { + continue + } else { + return err + } + } else { + if err := errors.WithStack( + r.Client.Get(ctx, configMapName, configMapCopy)); err != nil { + return err + } + } + // Set a unique name for the ConfigMap copy using the original ConfigMap + // name and the ConfigMap projection index number. + configMapCopyName := fmt.Sprintf(naming.RestoreConfigCopySuffix, configMapProjection.Name, i) + + // set the new name and namespace + configMapCopy.ObjectMeta = metav1.ObjectMeta{ + Name: configMapCopyName, + Namespace: cluster.Namespace, + } + configMapCopy.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("ConfigMap")) + configMapCopy.Annotations = naming.Merge( + cluster.Spec.Metadata.GetAnnotationsOrNil(), + cluster.Spec.Backups.PGBackRest.Metadata.GetAnnotationsOrNil(), + ) + configMapCopy.Labels = naming.Merge( + cluster.Spec.Metadata.GetLabelsOrNil(), + cluster.Spec.Backups.PGBackRest.Metadata.GetLabelsOrNil(), + // this label allows for cleanup when the restore completes + naming.PGBackRestRestoreJobLabels(cluster.Name), + ) + if err := r.setControllerReference(cluster, configMapCopy); err != nil { + return err + } + if err := errors.WithStack(r.apply(ctx, configMapCopy)); err != nil { + return err + } + // update the copy of the source PostgresCluster to add the new ConfigMap + // projection(s) to the restore Job + sourceCluster.Spec.Backups.PGBackRest.Configuration[i].ConfigMap.Name = configMapCopyName + } } + return nil +} + +// reconcilePGBackRestConfig is responsible for reconciling the pgBackRest ConfigMaps and Secrets. +func (r *Reconciler) reconcilePGBackRestConfig(ctx context.Context, + postgresCluster *v1beta1.PostgresCluster, + repoHostName, configHash, serviceName, serviceNamespace string, + instanceNames []string) error { backrestConfig := pgbackrest.CreatePGBackRestConfigMapIntent(postgresCluster, repoHostName, configHash, serviceName, serviceNamespace, instanceNames) - if metadataOverride != nil { - backrestConfig.ObjectMeta = overrideMetadata(backrestConfig.ObjectMeta) - } else if err := controllerutil.SetControllerReference(postgresCluster, backrestConfig, + if err := controllerutil.SetControllerReference(postgresCluster, backrestConfig, r.Client.Scheme()); err != nil { return err } @@ -1546,56 +2001,65 @@ func (r *Reconciler) reconcilePGBackRestConfig(ctx context.Context, return errors.WithStack(err) } - repoHostConfigured := pgbackrest.DedicatedRepoHostEnabled(postgresCluster) - if !repoHostConfigured { - log.V(1).Info("skipping SSH reconciliation, no repo hosts configured") - return nil - } + return nil +} - sshdConfig := pgbackrest.CreateSSHConfigMapIntent(postgresCluster) - if metadataOverride != nil { - sshdConfig.ObjectMeta = overrideMetadata(sshdConfig.ObjectMeta) - } else if err := controllerutil.SetControllerReference(postgresCluster, &sshdConfig, - r.Client.Scheme()); err != nil { - log.Error(err, errMsg) - return err - } - if err := r.apply(ctx, &sshdConfig); err != nil { - log.Error(err, errMsg) - return err - } +// +kubebuilder:rbac:groups="",resources="secrets",verbs={get} +// +kubebuilder:rbac:groups="",resources="secrets",verbs={create,delete,patch} - sshdSecret, err := pgbackrest.CreateSSHSecretIntent(postgresCluster, sshSecret, - serviceName, serviceNamespace) - if err != nil { - log.Error(err, errMsg) - return err +// reconcilePGBackRestSecret reconciles the pgBackRest Secret. +func (r *Reconciler) reconcilePGBackRestSecret(ctx context.Context, + cluster *v1beta1.PostgresCluster, repoHost *appsv1.StatefulSet, + rootCA *pki.RootCertificateAuthority) error { + + intent := &corev1.Secret{ObjectMeta: naming.PGBackRestSecret(cluster)} + intent.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("Secret")) + intent.Type = corev1.SecretTypeOpaque + + intent.Annotations = naming.Merge( + cluster.Spec.Metadata.GetAnnotationsOrNil(), + cluster.Spec.Backups.PGBackRest.Metadata.GetAnnotationsOrNil()) + intent.Labels = naming.Merge( + cluster.Spec.Metadata.GetLabelsOrNil(), + cluster.Spec.Backups.PGBackRest.Metadata.GetLabelsOrNil(), + naming.PGBackRestConfigLabels(cluster.Name), + ) + + existing := &corev1.Secret{} + err := errors.WithStack(client.IgnoreNotFound( + r.Client.Get(ctx, client.ObjectKeyFromObject(intent), existing))) + + if err == nil { + err = r.setControllerReference(cluster, intent) } - if metadataOverride != nil { - sshdSecret.ObjectMeta = overrideMetadata(sshdSecret.ObjectMeta) - } else if err := controllerutil.SetControllerReference(postgresCluster, &sshdSecret, - r.Client.Scheme()); err != nil { - return err + if err == nil { + err = pgbackrest.Secret(ctx, cluster, repoHost, rootCA, existing, intent) } - if err := r.apply(ctx, &sshdSecret); err != nil { - log.Error(err, errMsg) - return err + + // Delete the Secret when it exists and there is nothing we want to keep in it. + if err == nil && len(existing.UID) != 0 && len(intent.Data) == 0 { + err = errors.WithStack(client.IgnoreNotFound( + r.deleteControlled(ctx, cluster, existing))) } - return nil + // Write the Secret when there is something we want to keep in it. + if err == nil && len(intent.Data) != 0 { + err = errors.WithStack(r.apply(ctx, intent)) + } + return err } -// +kubebuilder:rbac:groups="",resources=serviceaccounts,verbs=create;patch -// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=roles,verbs=create;patch -// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=rolebindings,verbs=create;patch +// +kubebuilder:rbac:groups="",resources="serviceaccounts",verbs={create,patch} +// +kubebuilder:rbac:groups="rbac.authorization.k8s.io",resources="roles",verbs={create,patch} +// +kubebuilder:rbac:groups="rbac.authorization.k8s.io",resources="rolebindings",verbs={create,patch} // reconcileInstanceRBAC reconciles the Role, RoleBinding, and ServiceAccount for // pgBackRest func (r *Reconciler) reconcilePGBackRestRBAC(ctx context.Context, - postgresCluster *v1beta1.PostgresCluster) (*v1.ServiceAccount, error) { + postgresCluster *v1beta1.PostgresCluster) (*corev1.ServiceAccount, error) { - sa := &v1.ServiceAccount{ObjectMeta: naming.PGBackRestRBAC(postgresCluster)} - sa.SetGroupVersionKind(v1.SchemeGroupVersion.WithKind("ServiceAccount")) + sa := &corev1.ServiceAccount{ObjectMeta: naming.PGBackRestRBAC(postgresCluster)} + sa.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("ServiceAccount")) role := &rbacv1.Role{ObjectMeta: naming.PGBackRestRBAC(postgresCluster)} role.SetGroupVersionKind(rbacv1.SchemeGroupVersion.WithKind("Role")) @@ -1657,7 +2121,8 @@ func (r *Reconciler) reconcilePGBackRestRBAC(ctx context.Context, // StatefulSet according to a specific PostgresCluster custom resource. func (r *Reconciler) reconcileDedicatedRepoHost(ctx context.Context, postgresCluster *v1beta1.PostgresCluster, - repoResources *RepoResources) (*appsv1.StatefulSet, error) { + repoResources *RepoResources, + observedInstances *observedInstances) (*appsv1.StatefulSet, error) { log := logging.FromContext(ctx).WithValues("reconcileResource", "repoHost") @@ -1697,7 +2162,8 @@ func (r *Reconciler) reconcileDedicatedRepoHost(ctx context.Context, }) } repoHostName := repoResources.hosts[0].Name - repoHost, err := r.applyRepoHostIntent(ctx, postgresCluster, repoHostName, repoResources) + repoHost, err := r.applyRepoHostIntent(ctx, postgresCluster, repoHostName, repoResources, + observedInstances) if err != nil { log.Error(err, "reconciling repository host") return nil, err @@ -1706,20 +2172,20 @@ func (r *Reconciler) reconcileDedicatedRepoHost(ctx context.Context, postgresCluster.Status.PGBackRest.RepoHost = getRepoHostStatus(repoHost) if isCreate { - r.Recorder.Eventf(postgresCluster, v1.EventTypeNormal, EventRepoHostCreated, + r.Recorder.Eventf(postgresCluster, corev1.EventTypeNormal, EventRepoHostCreated, "created pgBackRest repository host %s/%s", repoHost.TypeMeta.Kind, repoHostName) } return repoHost, nil } -// +kubebuilder:rbac:groups=batch,resources=jobs,verbs=create;patch;delete +// +kubebuilder:rbac:groups="batch",resources="jobs",verbs={create,patch,delete} // reconcileManualBackup is responsible for reconciling pgBackRest backups that are initiated // manually by the end-user func (r *Reconciler) reconcileManualBackup(ctx context.Context, postgresCluster *v1beta1.PostgresCluster, manualBackupJobs []*batchv1.Job, - serviceAccount *v1.ServiceAccount, instances *observedInstances) error { + serviceAccount *corev1.ServiceAccount, instances *observedInstances) error { manualAnnotation := postgresCluster.GetAnnotations()[naming.PGBackRestBackup] manualStatus := postgresCluster.Status.PGBackRest.ManualBackup @@ -1793,7 +2259,7 @@ func (r *Reconciler) reconcileManualBackup(ctx context.Context, // // TODO (andrewlecuyer): Since reconciliation doesn't currently occur when a leader is elected, // the operator may not get another chance to create the backup if a writable instance is not - // detected, and it then returns without requeing. To ensure this doesn't occur and that the + // detected, and it then returns without requeuing. To ensure this doesn't occur and that the // operator always has a chance to reconcile when an instance becomes writable, we should watch // Pods in the cluster for leader election events, and trigger reconciles accordingly. if !clusterWritable || manualAnnotation == "" || @@ -1807,13 +2273,11 @@ func (r *Reconciler) reconcileManualBackup(ctx context.Context, manualStatus = &v1beta1.PGBackRestJobStatus{ ID: manualAnnotation, } - // TODO: remove guard with move to controller-runtime 0.9.0 https://issue.k8s.io/99714 - if len(postgresCluster.Status.Conditions) > 0 { - // Remove an existing manual backup condition if present. It will be - // created again as needed based on the newly reconciled backup Job. - meta.RemoveStatusCondition(&postgresCluster.Status.Conditions, - ConditionManualBackupSuccessful) - } + // Remove an existing manual backup condition if present. It will be + // created again as needed based on the newly reconciled backup Job. + meta.RemoveStatusCondition(&postgresCluster.Status.Conditions, + ConditionManualBackupSuccessful) + postgresCluster.Status.PGBackRest.ManualBackup = manualStatus } @@ -1823,20 +2287,18 @@ func (r *Reconciler) reconcileManualBackup(ctx context.Context, return nil } - // determine if the dedicated repository host is ready (if enabled) using the repo host ready + // determine if the dedicated repository host is ready using the repo host ready // condition, and return if not - if pgbackrest.DedicatedRepoHostEnabled(postgresCluster) { - condition := meta.FindStatusCondition(postgresCluster.Status.Conditions, ConditionRepoHostReady) - if condition == nil || condition.Status != metav1.ConditionTrue { - return nil - } + repoCondition := meta.FindStatusCondition(postgresCluster.Status.Conditions, ConditionRepoHostReady) + if repoCondition == nil || repoCondition.Status != metav1.ConditionTrue { + return nil } // Determine if the replica create backup is complete and return if not. This allows for proper // orchestration of backup Jobs since only one backup can be run at a time. - condition := meta.FindStatusCondition(postgresCluster.Status.Conditions, + backupCondition := meta.FindStatusCondition(postgresCluster.Status.Conditions, ConditionReplicaCreate) - if condition == nil || condition.Status != metav1.ConditionTrue { + if backupCondition == nil || backupCondition.Status != metav1.ConditionTrue { return nil } @@ -1854,44 +2316,43 @@ func (r *Reconciler) reconcileManualBackup(ctx context.Context, } } if !statusFound { - r.Recorder.Eventf(postgresCluster, v1.EventTypeWarning, "InvalidBackupRepo", + r.Recorder.Eventf(postgresCluster, corev1.EventTypeWarning, "InvalidBackupRepo", "Unable to find status for %q as configured for a manual backup. Please ensure "+ "this repo is defined in the spec.", repoName) return nil } if !stanzaCreated { - r.Recorder.Eventf(postgresCluster, v1.EventTypeWarning, "StanzaNotCreated", + r.Recorder.Eventf(postgresCluster, corev1.EventTypeWarning, "StanzaNotCreated", "Stanza not created for %q as specified for a manual backup", repoName) return nil } + var repo v1beta1.PGBackRestRepo + for i := range postgresCluster.Spec.Backups.PGBackRest.Repos { + if postgresCluster.Spec.Backups.PGBackRest.Repos[i].Name == repoName { + repo = postgresCluster.Spec.Backups.PGBackRest.Repos[i] + } + } + if repo.Name == "" { + return errors.Errorf("repo %q is not defined for this cluster", repoName) + } + // Users should specify the repo for the command using the "manual.repoName" field in the spec, // and not using the "--repo" option in the "manual.options" field. Therefore, record a // warning event and return if a "--repo" option is found. Reconciliation will then be // reattempted when "--repo" is removed from "manual.options" and the spec is updated. + // Since '--repo' can be set with or without an equals ('=') sign, we check for both + // usage patterns. backupOpts := postgresCluster.Spec.Backups.PGBackRest.Manual.Options for _, opt := range backupOpts { - if strings.Contains(opt, "--repo") { - r.Recorder.Eventf(postgresCluster, v1.EventTypeWarning, "InvalidManualBackup", + if strings.Contains(opt, "--repo=") || strings.Contains(opt, "--repo ") { + r.Recorder.Eventf(postgresCluster, corev1.EventTypeWarning, "InvalidManualBackup", "Option '--repo' is not allowed: please use the 'repoName' field instead.", repoName) return nil } } - // get pod name and container name as needed to exec into the proper pod and create - // the pgBackRest backup - selector, containerName, err := getPGBackRestExecSelector(postgresCluster, repoName) - if err != nil { - return errors.WithStack(err) - } - - // set the name of the pgbackrest config file that will be mounted to the backup Job - configName := pgbackrest.CMInstanceKey - if containerName == naming.PGBackRestRepoContainerName { - configName = pgbackrest.CMRepoKey - } - // create the backup Job backupJob := &batchv1.Job{} backupJob.ObjectMeta = naming.PGBackRestBackupJob(postgresCluster) @@ -1912,11 +2373,9 @@ func (r *Reconciler) reconcileManualBackup(ctx context.Context, backupJob.ObjectMeta.Labels = labels backupJob.ObjectMeta.Annotations = annotations - spec, err := generateBackupJobSpecIntent(postgresCluster, selector.String(), containerName, - repoName, serviceAccount.GetName(), configName, labels, annotations, backupOpts...) - if err != nil { - return errors.WithStack(err) - } + spec := generateBackupJobSpecIntent(ctx, postgresCluster, repo, + serviceAccount.GetName(), labels, annotations, backupOpts...) + backupJob.Spec = *spec // set gvk and ownership refs @@ -1934,18 +2393,19 @@ func (r *Reconciler) reconcileManualBackup(ctx context.Context, return nil } -// +kubebuilder:rbac:groups=batch,resources=jobs,verbs=create;patch;delete +// +kubebuilder:rbac:groups="batch",resources="jobs",verbs={create,patch,delete} // reconcileReplicaCreateBackup is responsible for reconciling a full pgBackRest backup for the // cluster as required to create replicas func (r *Reconciler) reconcileReplicaCreateBackup(ctx context.Context, postgresCluster *v1beta1.PostgresCluster, instances *observedInstances, replicaCreateBackupJobs []*batchv1.Job, - serviceAccount *v1.ServiceAccount, configHash, replicaCreateRepoName string) error { + serviceAccount *corev1.ServiceAccount, configHash string, + replicaCreateRepo v1beta1.PGBackRestRepo) error { var replicaCreateRepoStatus *v1beta1.RepoStatus - for i, r := range postgresCluster.Status.PGBackRest.Repos { - if r.Name == replicaCreateRepoName { + for i, repo := range postgresCluster.Status.PGBackRest.Repos { + if repo.Name == replicaCreateRepo.Name { replicaCreateRepoStatus = &postgresCluster.Status.PGBackRest.Repos[i] break } @@ -1989,7 +2449,7 @@ func (r *Reconciler) reconcileReplicaCreateBackup(ctx context.Context, // // TODO (andrewlecuyer): Since reconciliation doesn't currently occur when a leader is elected, // the operator may not get another chance to create the backup if a writable instance is not - // detected, and it then returns without requeing. To ensure this doesn't occur and that the + // detected, and it then returns without requeuing. To ensure this doesn't occur and that the // operator always has a chance to reconcile when an instance becomes writable, we should watch // Pods in the cluster for leader election events, and trigger reconciles accordingly. if !clusterWritable || replicaCreateRepoStatus == nil || replicaCreateRepoStatus.ReplicaCreateBackupComplete { @@ -2003,19 +2463,6 @@ func (r *Reconciler) reconcileReplicaCreateBackup(ctx context.Context, replicaRepoReady = (condition.Status == metav1.ConditionTrue) } - // get pod name and container name as needed to exec into the proper pod and create - // the pgBackRest backup - selector, containerName, err := getPGBackRestExecSelector(postgresCluster, replicaCreateRepoName) - if err != nil { - return errors.WithStack(err) - } - - // set the name of the pgbackrest config file that will be mounted to the backup Job - configName := pgbackrest.CMInstanceKey - if containerName == naming.PGBackRestRepoContainerName { - configName = pgbackrest.CMRepoKey - } - // determine if the dedicated repository host is ready using the repo host ready status var dedicatedRepoReady bool condition = meta.FindStatusCondition(postgresCluster.Status.Conditions, ConditionRepoHostReady) @@ -2034,7 +2481,7 @@ func (r *Reconciler) reconcileReplicaCreateBackup(ctx context.Context, // determine if the replica creation repo has changed replicaCreateRepoChanged := true - if replicaCreateRepoName == job.GetLabels()[naming.LabelPGBackRestRepo] { + if replicaCreateRepo.Name == job.GetLabels()[naming.LabelPGBackRestRepo] { replicaCreateRepoChanged = false } @@ -2042,14 +2489,10 @@ func (r *Reconciler) reconcileReplicaCreateBackup(ctx context.Context, // - The job has failed. The Job will be deleted and recreated to try again. // - The replica creation repo has changed since the Job was created. Delete and recreate // with the Job with the proper repo configured. - // - The "config" annotation has changed, indicating there is a new primary. Delete and - // recreate the Job with the proper config mounted (applicable when a dedicated repo - // host is not enabled). // - The "config hash" annotation has changed, indicating a configuration change has been // made in the spec (specifically a change to the config for an external repo). Delete // and recreate the Job with proper hash per the current config. if failed || replicaCreateRepoChanged || - (job.GetAnnotations()[naming.PGBackRestCurrentConfig] != configName) || (job.GetAnnotations()[naming.PGBackRestConfigHash] != configHash) { if err := r.Client.Delete(ctx, job, client.PropagationPolicy(metav1.DeletePropagationBackground)); err != nil { @@ -2065,10 +2508,9 @@ func (r *Reconciler) reconcileReplicaCreateBackup(ctx context.Context, } } - dedicatedEnabled := pgbackrest.DedicatedRepoHostEnabled(postgresCluster) - // return if no job has been created and the replica repo or the dedicated repo host is not - // ready - if job == nil && ((dedicatedEnabled && !dedicatedRepoReady) || !replicaRepoReady) { + // return if no job has been created and the replica repo or the dedicated + // repo host is not ready + if job == nil && (!dedicatedRepoReady || !replicaRepoReady) { return nil } @@ -2087,17 +2529,14 @@ func (r *Reconciler) reconcileReplicaCreateBackup(ctx context.Context, annotations = naming.Merge(postgresCluster.Spec.Metadata.GetAnnotationsOrNil(), postgresCluster.Spec.Backups.PGBackRest.Metadata.GetAnnotationsOrNil(), map[string]string{ - naming.PGBackRestCurrentConfig: configName, - naming.PGBackRestConfigHash: configHash, + naming.PGBackRestConfigHash: configHash, }) backupJob.ObjectMeta.Labels = labels backupJob.ObjectMeta.Annotations = annotations - spec, err := generateBackupJobSpecIntent(postgresCluster, selector.String(), containerName, - replicaCreateRepoName, serviceAccount.GetName(), configName, labels, annotations) - if err != nil { - return errors.WithStack(err) - } + spec := generateBackupJobSpecIntent(ctx, postgresCluster, replicaCreateRepo, + serviceAccount.GetName(), labels, annotations) + backupJob.Spec = *spec // set gvk and ownership refs @@ -2118,24 +2557,24 @@ func (r *Reconciler) reconcileReplicaCreateBackup(ctx context.Context, // for the cluster func (r *Reconciler) reconcileRepos(ctx context.Context, postgresCluster *v1beta1.PostgresCluster, extConfigHashes map[string]string, - repoResources *RepoResources) (string, error) { + repoResources *RepoResources) (v1beta1.PGBackRestRepo, error) { log := logging.FromContext(ctx).WithValues("reconcileResource", "repoVolume") errors := []error{} errMsg := "reconciling repository volume" - repoVols := []*v1.PersistentVolumeClaim{} - var replicaCreateRepoName string + repoVols := []*corev1.PersistentVolumeClaim{} + var replicaCreateRepo v1beta1.PGBackRestRepo for i, repo := range postgresCluster.Spec.Backups.PGBackRest.Repos { // the repo at index 0 is the replica creation repo if i == 0 { - replicaCreateRepoName = repo.Name + replicaCreateRepo = postgresCluster.Spec.Backups.PGBackRest.Repos[i] } // we only care about reconciling repo volumes, so ignore everything else if repo.Volume == nil { continue } - repo, err := r.applyRepoVolumeIntent(ctx, postgresCluster, &repo.Volume.VolumeClaimSpec, + repo, err := r.applyRepoVolumeIntent(ctx, postgresCluster, repo.Volume.VolumeClaimSpec, repo.Name, repoResources) if err != nil { log.Error(err, errMsg) @@ -2149,17 +2588,13 @@ func (r *Reconciler) reconcileRepos(ctx context.Context, postgresCluster.Status.PGBackRest.Repos = getRepoVolumeStatus(postgresCluster.Status.PGBackRest.Repos, repoVols, extConfigHashes, - replicaCreateRepoName) + replicaCreateRepo.Name) - if len(errors) > 0 { - return "", utilerrors.NewAggregate(errors) - } - - return replicaCreateRepoName, nil + return replicaCreateRepo, utilerrors.NewAggregate(errors) } -// +kubebuilder:rbac:groups="",resources=pods,verbs=get;list -// +kubebuilder:rbac:groups="",resources=pods/exec,verbs=create +// +kubebuilder:rbac:groups="",resources="pods",verbs={get,list} +// +kubebuilder:rbac:groups="",resources="pods/exec",verbs={create} // reconcileStanzaCreate is responsible for ensuring stanzas are properly created for the // pgBackRest repositories configured for a PostgresCluster. If the bool returned from this @@ -2178,8 +2613,8 @@ func (r *Reconciler) reconcileStanzaCreate(ctx context.Context, return } replicaCreateRepoName := postgresCluster.Spec.Backups.PGBackRest.Repos[0].Name - for i, r := range postgresCluster.Status.PGBackRest.Repos { - if r.Name == replicaCreateRepoName { + for i, repo := range postgresCluster.Status.PGBackRest.Repos { + if repo.Name == replicaCreateRepoName { replicaCreateRepoStatus = &postgresCluster.Status.PGBackRest.Repos[i] break } @@ -2239,7 +2674,7 @@ func (r *Reconciler) reconcileStanzaCreate(ctx context.Context, // // TODO (andrewlecuyer): Since reconciliation doesn't currently occur when a leader is elected, // the operator may not get another chance to create the stanza if a writable instance is not - // detected, and it then returns without requeing. To ensure this doesn't occur and that the + // detected, and it then returns without requeuing. To ensure this doesn't occur and that the // operator always has a chance to reconcile when an instance becomes writable, we should watch // Pods in the cluster for leader election events, and trigger reconciles accordingly. if !clusterWritable || stanzasCreated { @@ -2249,13 +2684,15 @@ func (r *Reconciler) reconcileStanzaCreate(ctx context.Context, // create a pgBackRest executor and attempt stanza creation exec := func(ctx context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string) error { - return r.PodExec(postgresCluster.GetNamespace(), writableInstanceName, + return r.PodExec(ctx, postgresCluster.GetNamespace(), writableInstanceName, naming.ContainerDatabase, stdin, stdout, stderr, command...) } - configHashMismatch, err := pgbackrest.Executor(exec).StanzaCreate(ctx, configHash) + + // Always attempt to create pgBackRest stanza first + configHashMismatch, err := pgbackrest.Executor(exec).StanzaCreateOrUpgrade(ctx, configHash, postgresCluster) if err != nil { // record and log any errors resulting from running the stanza-create command - r.Recorder.Event(postgresCluster, v1.EventTypeWarning, EventUnableToCreateStanzas, + r.Recorder.Event(postgresCluster, corev1.EventTypeWarning, EventUnableToCreateStanzas, err.Error()) return false, errors.WithStack(err) @@ -2269,7 +2706,7 @@ func (r *Reconciler) reconcileStanzaCreate(ctx context.Context, } // record an event indicating successful stanza creation - r.Recorder.Event(postgresCluster, v1.EventTypeNormal, EventStanzasCreated, + r.Recorder.Event(postgresCluster, corev1.EventTypeNormal, EventStanzasCreated, "pgBackRest stanza creation completed successfully") // if no errors then stanza(s) created successfully @@ -2280,47 +2717,8 @@ func (r *Reconciler) reconcileStanzaCreate(ctx context.Context, return false, nil } -// getPGBackRestExecSelector returns a selector and container name that allows the proper -// Pod (along with a specific container within it) to be found within the Kubernetes -// cluster as needed to exec into the container and run a pgBackRest command. -func getPGBackRestExecSelector(postgresCluster *v1beta1.PostgresCluster, - repoName string) (labels.Selector, string, error) { - - var repo *v1beta1.PGBackRestRepo - for i, r := range postgresCluster.Spec.Backups.PGBackRest.Repos { - if r.Name == repoName { - repo = &postgresCluster.Spec.Backups.PGBackRest.Repos[i] - } - } - if repo == nil { - return nil, "", fmt.Errorf("repo %q is not defined for this cluster", repoName) - } - - var volumeRepo bool - if repo.Volume != nil { - volumeRepo = true - } - - var err error - var podSelector labels.Selector - var containerName string - if volumeRepo { - podSelector = naming.PGBackRestDedicatedSelector(postgresCluster.GetName()) - containerName = naming.PGBackRestRepoContainerName - } else { - primarySelector := naming.ClusterPrimary(postgresCluster.GetName()) - podSelector, err = metav1.LabelSelectorAsSelector(&primarySelector) - if err != nil { - return nil, "", err - } - containerName = naming.ContainerDatabase - } - - return podSelector, containerName, nil -} - -// getRepoHostStatus is responsible for returning the pgBackRest status for the provided pgBackRest -// repository host +// getRepoHostStatus is responsible for returning the pgBackRest status for the +// provided pgBackRest repository host func getRepoHostStatus(repoHost *appsv1.StatefulSet) *v1beta1.RepoHostStatus { repoHostStatus := &v1beta1.RepoHostStatus{} @@ -2340,7 +2738,7 @@ func getRepoHostStatus(repoHost *appsv1.StatefulSet) *v1beta1.RepoHostStatus { // existing/current status for any repos in the cluster, the repository volumes // (i.e. PVCs) reconciled for the cluster, and the hashes calculated for the configuration for any // external repositories defined for the cluster. -func getRepoVolumeStatus(repoStatus []v1beta1.RepoStatus, repoVolumes []*v1.PersistentVolumeClaim, +func getRepoVolumeStatus(repoStatus []v1beta1.RepoStatus, repoVolumes []*corev1.PersistentVolumeClaim, configHashes map[string]string, replicaCreateRepoName string) []v1beta1.RepoStatus { // the new repository status that will be generated and returned @@ -2367,8 +2765,8 @@ func getRepoVolumeStatus(repoStatus []v1beta1.RepoStatus, repoVolumes []*v1.Pers } // update binding info if needed - if rs.Bound != (rv.Status.Phase == v1.ClaimBound) { - rs.Bound = (rv.Status.Phase == v1.ClaimBound) + if rs.Bound != (rv.Status.Phase == corev1.ClaimBound) { + rs.Bound = (rv.Status.Phase == corev1.ClaimBound) } // if a different volume is detected, reset the stanza and replica create backup status @@ -2385,7 +2783,7 @@ func getRepoVolumeStatus(repoStatus []v1beta1.RepoStatus, repoVolumes []*v1.Pers } if newRepoVolStatus { updatedRepoStatus = append(updatedRepoStatus, v1beta1.RepoStatus{ - Bound: (rv.Status.Phase == v1.ClaimBound), + Bound: (rv.Status.Phase == corev1.ClaimBound), Name: repoName, VolumeName: rv.Spec.VolumeName, }) @@ -2443,7 +2841,8 @@ func getRepoVolumeStatus(repoStatus []v1beta1.RepoStatus, repoVolumes []*v1.Pers // reconcileScheduledBackups is responsible for reconciling pgBackRest backup // schedules configured in the cluster definition func (r *Reconciler) reconcileScheduledBackups( - ctx context.Context, cluster *v1beta1.PostgresCluster, sa *v1.ServiceAccount, + ctx context.Context, cluster *v1beta1.PostgresCluster, sa *corev1.ServiceAccount, + cronjobs []*batchv1.CronJob, ) bool { log := logging.FromContext(ctx).WithValues("reconcileResource", "repoCronJob") // requeue if there is an error during creation @@ -2456,21 +2855,21 @@ func (r *Reconciler) reconcileScheduledBackups( // next if the repo level schedule is not nil, create the CronJob. if repo.BackupSchedules.Full != nil { if err := r.reconcilePGBackRestCronJob(ctx, cluster, repo, - full, repo.BackupSchedules.Full, sa); err != nil { + full, repo.BackupSchedules.Full, sa, cronjobs); err != nil { log.Error(err, "unable to reconcile Full backup for "+repo.Name) requeue = true } } if repo.BackupSchedules.Differential != nil { if err := r.reconcilePGBackRestCronJob(ctx, cluster, repo, - differential, repo.BackupSchedules.Differential, sa); err != nil { + differential, repo.BackupSchedules.Differential, sa, cronjobs); err != nil { log.Error(err, "unable to reconcile Differential backup for "+repo.Name) requeue = true } } if repo.BackupSchedules.Incremental != nil { if err := r.reconcilePGBackRestCronJob(ctx, cluster, repo, - incremental, repo.BackupSchedules.Incremental, sa); err != nil { + incremental, repo.BackupSchedules.Incremental, sa, cronjobs); err != nil { log.Error(err, "unable to reconcile Incremental backup for "+repo.Name) requeue = true } @@ -2480,13 +2879,14 @@ func (r *Reconciler) reconcileScheduledBackups( return requeue } -// +kubebuilder:rbac:groups=batch,resources=cronjobs,verbs=create;patch +// +kubebuilder:rbac:groups="batch",resources="cronjobs",verbs={create,patch} // reconcilePGBackRestCronJob creates the CronJob for the given repo, pgBackRest // backup type and schedule func (r *Reconciler) reconcilePGBackRestCronJob( ctx context.Context, cluster *v1beta1.PostgresCluster, repo v1beta1.PGBackRestRepo, - backupType string, schedule *string, serviceAccount *v1.ServiceAccount, + backupType string, schedule *string, serviceAccount *corev1.ServiceAccount, + cronjobs []*batchv1.CronJob, ) error { log := logging.FromContext(ctx).WithValues("reconcileResource", "repoCronJob") @@ -2497,9 +2897,27 @@ func (r *Reconciler) reconcilePGBackRestCronJob( labels := naming.Merge( cluster.Spec.Metadata.GetLabelsOrNil(), cluster.Spec.Backups.PGBackRest.Metadata.GetLabelsOrNil(), - naming.PGBackRestCronJobLabels(cluster.Name, repo.Name, backupType), - ) + naming.PGBackRestCronJobLabels(cluster.Name, repo.Name, backupType)) objectmeta := naming.PGBackRestCronJob(cluster, backupType, repo.Name) + + // Look for an existing CronJob by the associated Labels. If one exists, + // update the ObjectMeta accordingly. + for _, cronjob := range cronjobs { + // ignore CronJobs that are terminating + if cronjob.GetDeletionTimestamp() != nil { + continue + } + + if cronjob.GetLabels()[naming.LabelCluster] == cluster.Name && + cronjob.GetLabels()[naming.LabelPGBackRestCronJob] == backupType && + cronjob.GetLabels()[naming.LabelPGBackRestRepo] == repo.Name { + objectmeta = metav1.ObjectMeta{ + Namespace: cluster.GetNamespace(), + Name: cronjob.Name, + } + } + } + objectmeta.Labels = labels objectmeta.Annotations = annotations @@ -2529,13 +2947,13 @@ func (r *Reconciler) reconcilePGBackRestCronJob( } } if !statusFound { - r.Recorder.Eventf(cluster, v1.EventTypeWarning, "InvalidBackupRepo", + r.Recorder.Eventf(cluster, corev1.EventTypeWarning, "InvalidBackupRepo", "Unable to find status for %q as configured for a scheduled backup. Please ensure "+ "this repo is defined in the spec.", repo.Name) return nil } if !stanzaCreated { - r.Recorder.Eventf(cluster, v1.EventTypeWarning, "StanzaNotCreated", + r.Recorder.Eventf(cluster, corev1.EventTypeWarning, "StanzaNotCreated", "Stanza not created for %q as specified for a scheduled backup", repo.Name) return nil } @@ -2543,24 +2961,8 @@ func (r *Reconciler) reconcilePGBackRestCronJob( // set backup type (i.e. "full", "diff", "incr") backupOpts := []string{"--type=" + backupType} - // get pod name and container name as needed to exec into the proper pod and create - // the pgBackRest backup - selector, containerName, err := getPGBackRestExecSelector(cluster, repo.Name) - if err != nil { - return errors.WithStack(err) - } - - // set the name of the pgbackrest config file that will be mounted to the backup Job - configName := pgbackrest.CMInstanceKey - if containerName == naming.PGBackRestRepoContainerName { - configName = pgbackrest.CMRepoKey - } - - jobSpec, err := generateBackupJobSpecIntent(cluster, selector.String(), containerName, - repo.Name, serviceAccount.GetName(), configName, labels, annotations, backupOpts...) - if err != nil { - return errors.WithStack(err) - } + jobSpec := generateBackupJobSpecIntent(ctx, cluster, repo, + serviceAccount.GetName(), labels, annotations, backupOpts...) // Suspend cronjobs when shutdown or read-only. Any jobs that have already // started will continue. @@ -2568,12 +2970,13 @@ func (r *Reconciler) reconcilePGBackRestCronJob( suspend := (cluster.Spec.Shutdown != nil && *cluster.Spec.Shutdown) || (cluster.Spec.Standby != nil && cluster.Spec.Standby.Enabled) - pgBackRestCronJob := &batchv1beta1.CronJob{ + pgBackRestCronJob := &batchv1.CronJob{ ObjectMeta: objectmeta, - Spec: batchv1beta1.CronJobSpec{ - Schedule: *schedule, - Suspend: &suspend, - JobTemplate: batchv1beta1.JobTemplateSpec{ + Spec: batchv1.CronJobSpec{ + Schedule: *schedule, + Suspend: &suspend, + ConcurrencyPolicy: batchv1.ForbidConcurrent, + JobTemplate: batchv1.JobTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Annotations: annotations, Labels: labels, @@ -2591,17 +2994,108 @@ func (r *Reconciler) reconcilePGBackRestCronJob( cluster.Spec.ImagePullSecrets // set metadata - pgBackRestCronJob.SetGroupVersionKind(batchv1beta1.SchemeGroupVersion.WithKind("CronJob")) - err = errors.WithStack(r.setControllerReference(cluster, pgBackRestCronJob)) + pgBackRestCronJob.SetGroupVersionKind(batchv1.SchemeGroupVersion.WithKind("CronJob")) + err := errors.WithStack(r.setControllerReference(cluster, pgBackRestCronJob)) if err == nil { err = r.apply(ctx, pgBackRestCronJob) } if err != nil { // record and log any errors resulting from trying to create the pgBackRest backup CronJob - r.Recorder.Event(cluster, v1.EventTypeWarning, EventUnableToCreatePGBackRestCronJob, + r.Recorder.Event(cluster, corev1.EventTypeWarning, EventUnableToCreatePGBackRestCronJob, err.Error()) log.Error(err, "error when attempting to create pgBackRest CronJob") } return err } + +// BackupsEnabled checks the state of the backups (i.e., if backups are in the spec, +// if a repo-host StatefulSet exists, if the annotation permitting backup deletion exists) +// and determines whether reconciliation is allowed. +// Reconciliation of backup-related Kubernetes objects is paused if +// - a user created a cluster with backups; +// - the cluster is updated to remove backups; +// - the annotation authorizing that removal is missing. +// +// This function also returns whether the spec has a defined backups or not. +func (r *Reconciler) BackupsEnabled( + ctx context.Context, + postgresCluster *v1beta1.PostgresCluster, +) ( + backupsSpecFound bool, + backupsReconciliationAllowed bool, + err error, +) { + specFound, stsNotFound, annotationFound, err := r.ObserveBackupUniverse(ctx, postgresCluster) + + switch { + case err != nil: + case specFound: + backupsSpecFound = true + backupsReconciliationAllowed = true + case annotationFound || stsNotFound: + backupsReconciliationAllowed = true + case !annotationFound && !stsNotFound: + // Destroying backups is a two key operation: + // 1. You must remove the backups section of the spec. + // 2. You must apply an annotation to the cluster. + // The existence of a StatefulSet without the backups spec is + // evidence of key 1 being turned without key 2 being turned + // -- block reconciliation until the annotation is added. + backupsReconciliationAllowed = false + default: + backupsReconciliationAllowed = false + } + return backupsSpecFound, backupsReconciliationAllowed, err +} + +// ObserveBackupUniverse returns +// - whether the spec has backups defined; +// - whether the repo-host statefulset exists; +// - whether the cluster has the annotation authorizing backup removal. +func (r *Reconciler) ObserveBackupUniverse(ctx context.Context, + postgresCluster *v1beta1.PostgresCluster, +) ( + backupsSpecFound bool, + repoHostStatefulSetNotFound bool, + backupsRemovalAnnotationFound bool, + err error, +) { + + // Does the cluster have a blank Backups section + backupsSpecFound = !reflect.DeepEqual(postgresCluster.Spec.Backups, v1beta1.Backups{PGBackRest: v1beta1.PGBackRestArchive{}}) + + // Does the repo-host StatefulSet exist? + name := fmt.Sprintf("%s-%s", postgresCluster.GetName(), "repo-host") + existing := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: postgresCluster.Namespace, + Name: name, + }, + } + err = errors.WithStack( + r.Client.Get(ctx, client.ObjectKeyFromObject(existing), existing)) + repoHostStatefulSetNotFound = apierrors.IsNotFound(err) + + // If we have an error that is not related to a missing repo-host StatefulSet, + // we return an error and expect the calling function to correctly stop processing. + if err != nil && !repoHostStatefulSetNotFound { + return true, false, false, err + } + + backupsRemovalAnnotationFound = authorizeBackupRemovalAnnotationPresent(postgresCluster) + + // If we have reached this point, the err is either nil or an IsNotFound error + // which we do not care about; hence, pass nil rather than the err + return backupsSpecFound, repoHostStatefulSetNotFound, backupsRemovalAnnotationFound, nil +} + +func authorizeBackupRemovalAnnotationPresent(postgresCluster *v1beta1.PostgresCluster) bool { + annotations := postgresCluster.GetAnnotations() + for annotation := range annotations { + if annotation == naming.AuthorizeBackupRemovalAnnotation { + return annotations[naming.AuthorizeBackupRemovalAnnotation] == "true" + } + } + return false +} diff --git a/internal/controller/postgrescluster/pgbackrest_test.go b/internal/controller/postgrescluster/pgbackrest_test.go index 67212ca63d..8e34dabb5e 100644 --- a/internal/controller/postgrescluster/pgbackrest_test.go +++ b/internal/controller/postgrescluster/pgbackrest_test.go @@ -1,22 +1,9 @@ -// +build envtest +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - import ( "context" "errors" @@ -32,11 +19,9 @@ import ( "gotest.tools/v3/assert" appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" - batchv1beta1 "k8s.io/api/batch/v1beta1" corev1 "k8s.io/api/core/v1" - v1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" - kerr "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -55,6 +40,9 @@ import ( "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/internal/pgbackrest" + "github.com/crunchydata/postgres-operator/internal/pki" + "github.com/crunchydata/postgres-operator/internal/testing/cmp" + "github.com/crunchydata/postgres-operator/internal/testing/require" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -72,15 +60,15 @@ func fakePostgresCluster(clusterName, namespace, clusterUID string, Port: initialize.Int32(5432), Shutdown: initialize.Bool(false), PostgresVersion: 13, - ImagePullSecrets: []v1.LocalObjectReference{{ + ImagePullSecrets: []corev1.LocalObjectReference{{ Name: "myImagePullSecret"}, }, Image: "example.com/crunchy-postgres-ha:test", InstanceSets: []v1beta1.PostgresInstanceSetSpec{{ Name: "instance1", - DataVolumeClaimSpec: v1.PersistentVolumeClaimSpec{ - AccessModes: []v1.PersistentVolumeAccessMode{corev1.ReadWriteMany}, - Resources: corev1.ResourceRequirements{ + DataVolumeClaimSpec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteMany}, + Resources: corev1.VolumeResourceRequirements{ Requests: corev1.ResourceList{ corev1.ResourceStorage: resource.MustParse("1Gi"), }, @@ -90,6 +78,9 @@ func fakePostgresCluster(clusterName, namespace, clusterUID string, Backups: v1beta1.Backups{ PGBackRest: v1beta1.PGBackRestArchive{ Image: "example.com/crunchy-pgbackrest:test", + Jobs: &v1beta1.BackupJobs{ + PriorityClassName: initialize.String("some-priority-class"), + }, Global: map[string]string{"repo2-test": "config", "repo3-test": "config", "repo4-test": "config"}, Repos: []v1beta1.PGBackRestRepo{{ @@ -126,22 +117,36 @@ func fakePostgresCluster(clusterName, namespace, clusterUID string, postgresCluster.Spec.Backups.PGBackRest.Repos[0] = v1beta1.PGBackRestRepo{ Name: "repo1", Volume: &v1beta1.RepoPVC{ - VolumeClaimSpec: v1.PersistentVolumeClaimSpec{ - AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteMany}, - Resources: v1.ResourceRequirements{ - Requests: map[v1.ResourceName]resource.Quantity{ - v1.ResourceStorage: resource.MustParse("1Gi"), + VolumeClaimSpec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteMany}, + Resources: corev1.VolumeResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceStorage: resource.MustParse("1Gi"), }, }, }, }, } postgresCluster.Spec.Backups.PGBackRest.RepoHost = &v1beta1.PGBackRestRepoHost{ - Resources: corev1.ResourceRequirements{}, - Affinity: &corev1.Affinity{}, - Tolerations: []v1.Toleration{ + PriorityClassName: initialize.String("some-priority-class"), + Resources: corev1.ResourceRequirements{}, + Affinity: &corev1.Affinity{}, + Tolerations: []corev1.Toleration{ {Key: "woot"}, }, + TopologySpreadConstraints: []corev1.TopologySpreadConstraint{ + { + MaxSkew: int32(1), + TopologyKey: "fakekey", + WhenUnsatisfiable: corev1.ScheduleAnyway, + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + {Key: naming.LabelCluster, Operator: "In", Values: []string{"somename"}}, + {Key: naming.LabelData, Operator: "Exists"}, + }, + }, + }, + }, } } // always add schedule info to the first repo @@ -154,15 +159,23 @@ func fakePostgresCluster(clusterName, namespace, clusterUID string, return postgresCluster } +func fakeObservedCronJobs() []*batchv1.CronJob { + return []*batchv1.CronJob{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "fake-cronjob", + }}} +} + func TestReconcilePGBackRest(t *testing.T) { // Garbage collector cleans up test resources before the test completes if strings.EqualFold(os.Getenv("USE_EXISTING_CLUSTER"), "true") { t.Skip("USE_EXISTING_CLUSTER: Test fails due to garbage collection") } - // setup the test environment and ensure a clean teardown - tEnv, tClient, cfg := setupTestEnv(t, ControllerName) - t.Cleanup(func() { teardownTestEnv(t, tEnv) }) + cfg, tClient := setupKubernetes(t) + require.ParallelCapacity(t, 2) + r := &Reconciler{} ctx, cancel := setupManager(t, cfg, func(mgr manager.Manager) { r = &Reconciler{ @@ -174,413 +187,482 @@ func TestReconcilePGBackRest(t *testing.T) { }) t.Cleanup(func() { teardownManager(cancel, t) }) - clusterName := "hippocluster" - clusterUID := "hippouid" - - ns := &v1.Namespace{} - ns.GenerateName = "postgres-operator-test-" - assert.NilError(t, tClient.Create(ctx, ns)) - t.Cleanup(func() { assert.Check(t, tClient.Delete(ctx, ns)) }) - namespace := ns.Name - - // create a PostgresCluster to test with - postgresCluster := fakePostgresCluster(clusterName, ns.GetName(), clusterUID, true) - - // create a service account to test with - serviceAccount, err := r.reconcilePGBackRestRBAC(ctx, postgresCluster) - assert.NilError(t, err) - assert.Assert(t, serviceAccount != nil) - - // create the 'observed' instances and set the leader - instances := &observedInstances{ - forCluster: []*Instance{{Name: "instance1", - Pods: []*v1.Pod{{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{naming.LabelRole: naming.RolePatroniLeader}, - }, - Spec: v1.PodSpec{}, - }}, - }, {Name: "instance2"}, {Name: "instance3"}}, - } - - // set status - postgresCluster.Status = v1beta1.PostgresClusterStatus{ - Patroni: &v1beta1.PatroniStatus{SystemIdentifier: "12345abcde"}, - PGBackRest: &v1beta1.PGBackRestStatus{ - RepoHost: &v1beta1.RepoHostStatus{Ready: true}, - Repos: []v1beta1.RepoStatus{{Name: "repo1", StanzaCreated: true}}}, - } - - // set conditions - clusterConditions := map[string]metav1.ConditionStatus{ - ConditionRepoHostReady: metav1.ConditionTrue, - ConditionReplicaCreate: metav1.ConditionTrue, - } - for condition, status := range clusterConditions { - meta.SetStatusCondition(&postgresCluster.Status.Conditions, metav1.Condition{ - Type: condition, Reason: "testing", Status: status}) - } - - result, err := r.reconcilePGBackRest(ctx, postgresCluster, instances) - if err != nil || result != (reconcile.Result{}) { - t.Errorf("unable to reconcile pgBackRest: %v", err) - } + t.Run("run reconcile with backups defined", func(t *testing.T) { + clusterName := "hippocluster" + clusterUID := "hippouid" - // repo is the first defined repo - repo := postgresCluster.Spec.Backups.PGBackRest.Repos[0] + ns := setupNamespace(t, tClient) + // create a PostgresCluster to test with + postgresCluster := fakePostgresCluster(clusterName, ns.GetName(), clusterUID, true) - // test that the repo was created properly - t.Run("verify pgbackrest dedicated repo StatefulSet", func(t *testing.T) { + // create a service account to test with + serviceAccount, err := r.reconcilePGBackRestRBAC(ctx, postgresCluster) + assert.NilError(t, err) + assert.Assert(t, serviceAccount != nil) - // get the pgBackRest repo sts using the labels we expect it to have - dedicatedRepos := &appsv1.StatefulSetList{} - if err := tClient.List(ctx, dedicatedRepos, client.InNamespace(namespace), - client.MatchingLabels{ - naming.LabelCluster: clusterName, - naming.LabelPGBackRest: "", - naming.LabelPGBackRestDedicated: "", - }); err != nil { - t.Fatal(err) + // create the 'observed' instances and set the leader + instances := &observedInstances{ + forCluster: []*Instance{{Name: "instance1", + Pods: []*corev1.Pod{{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{naming.LabelRole: naming.RolePatroniLeader}, + }, + Spec: corev1.PodSpec{}, + }}, + }, {Name: "instance2"}, {Name: "instance3"}}, } - repo := appsv1.StatefulSet{} - // verify that we found a repo sts as expected - if len(dedicatedRepos.Items) == 0 { - t.Fatal("Did not find a dedicated repo sts") - } else if len(dedicatedRepos.Items) > 1 { - t.Fatal("Too many dedicated repo sts's found") - } else { - repo = dedicatedRepos.Items[0] + // set status + postgresCluster.Status = v1beta1.PostgresClusterStatus{ + Patroni: v1beta1.PatroniStatus{SystemIdentifier: "12345abcde"}, + PGBackRest: &v1beta1.PGBackRestStatus{ + RepoHost: &v1beta1.RepoHostStatus{Ready: true}, + Repos: []v1beta1.RepoStatus{{Name: "repo1", StanzaCreated: true}}}, } - // verify proper number of replicas - if *repo.Spec.Replicas != 1 { - t.Errorf("%v replicas found for dedicated repo sts, expected %v", - repo.Spec.Replicas, 1) + // set conditions + clusterConditions := map[string]metav1.ConditionStatus{ + ConditionRepoHostReady: metav1.ConditionTrue, + ConditionReplicaCreate: metav1.ConditionTrue, } - - // verify proper ownership - var foundOwnershipRef bool - for _, r := range repo.GetOwnerReferences() { - if r.Kind == "PostgresCluster" && r.Name == clusterName && - r.UID == types.UID(clusterUID) { - - foundOwnershipRef = true - break - } + for condition, status := range clusterConditions { + meta.SetStatusCondition(&postgresCluster.Status.Conditions, metav1.Condition{ + Type: condition, Reason: "testing", Status: status}) } - if !foundOwnershipRef { - t.Errorf("did not find expected ownership references") - } + rootCA, err := pki.NewRootCertificateAuthority() + assert.NilError(t, err) - // verify proper matching labels - expectedLabels := map[string]string{ - naming.LabelCluster: clusterName, - naming.LabelPGBackRest: "", - naming.LabelPGBackRestDedicated: "", - } - expectedLabelsSelector, err := metav1.LabelSelectorAsSelector( - metav1.SetAsLabelSelector(expectedLabels)) - if err != nil { - t.Error(err) - } - if !expectedLabelsSelector.Matches(labels.Set(repo.GetLabels())) { - t.Errorf("dedicated repo host is missing an expected label: found=%v, expected=%v", - repo.GetLabels(), expectedLabels) + result, err := r.reconcilePGBackRest(ctx, postgresCluster, instances, rootCA, true) + if err != nil || result != (reconcile.Result{}) { + t.Errorf("unable to reconcile pgBackRest: %v", err) } - // Ensure Affinity Spec has been added to dedicated repo - if repo.Spec.Template.Spec.Affinity == nil { - t.Error("dedicated repo host is missing affinity spec") - } + // repo is the first defined repo + repo := postgresCluster.Spec.Backups.PGBackRest.Repos[0] - // Ensure Tolerations have been added to dedicated repo - if repo.Spec.Template.Spec.Tolerations == nil { - t.Error("dedicated repo host is missing tolerations") - } + // test that the repo was created properly + t.Run("verify pgbackrest dedicated repo StatefulSet", func(t *testing.T) { - // Ensure imagePullSecret has been added to the dedicated repo - if repo.Spec.Template.Spec.ImagePullSecrets == nil { - t.Error("image pull secret is missing tolerations") - } + // get the pgBackRest repo sts using the labels we expect it to have + dedicatedRepos := &appsv1.StatefulSetList{} + if err := tClient.List(ctx, dedicatedRepos, client.InNamespace(ns.Name), + client.MatchingLabels{ + naming.LabelCluster: clusterName, + naming.LabelPGBackRest: "", + naming.LabelPGBackRestDedicated: "", + }); err != nil { + t.Fatal(err) + } - if repo.Spec.Template.Spec.ImagePullSecrets != nil { - if repo.Spec.Template.Spec.ImagePullSecrets[0].Name != - "myImagePullSecret" { - t.Error("image pull secret name is not set correctly") + repo := appsv1.StatefulSet{} + // verify that we found a repo sts as expected + if len(dedicatedRepos.Items) == 0 { + t.Fatal("Did not find a dedicated repo sts") + } else if len(dedicatedRepos.Items) > 1 { + t.Fatal("Too many dedicated repo sts's found") + } else { + repo = dedicatedRepos.Items[0] } - } - // verify that the repohost container exists and contains the proper env vars - var repoHostContExists bool - for _, c := range repo.Spec.Template.Spec.Containers { - if c.Name == naming.PGBackRestRepoContainerName { - repoHostContExists = true + // verify proper number of replicas + if *repo.Spec.Replicas != 1 { + t.Errorf("%v replicas found for dedicated repo sts, expected %v", + repo.Spec.Replicas, 1) } - } - // now verify the proper env within the container - if !repoHostContExists { - t.Errorf("dedicated repo host is missing a container with name %s", - naming.PGBackRestRepoContainerName) - } - repoHostStatus := postgresCluster.Status.PGBackRest.RepoHost - if repoHostStatus != nil { - if repoHostStatus.APIVersion != "apps/v1" || repoHostStatus.Kind != "StatefulSet" { - t.Errorf("invalid version/kind for dedicated repo host status") + // verify proper ownership + var foundOwnershipRef bool + for _, r := range repo.GetOwnerReferences() { + if r.Kind == "PostgresCluster" && r.Name == clusterName && + r.UID == types.UID(clusterUID) { + + foundOwnershipRef = true + break + } } - } else { - t.Errorf("dedicated repo host status is missing") - } - var foundConditionRepoHostsReady bool - for _, c := range postgresCluster.Status.Conditions { - if c.Type == "PGBackRestRepoHostReady" { - foundConditionRepoHostsReady = true - break + if !foundOwnershipRef { + t.Errorf("did not find expected ownership references") } - } - if !foundConditionRepoHostsReady { - t.Errorf("status condition PGBackRestRepoHostsReady is missing") - } - events := &corev1.EventList{} - if err := wait.Poll(time.Second/2, Scale(time.Second*2), func() (bool, error) { - if err := tClient.List(ctx, events, &client.MatchingFields{ - "involvedObject.kind": "PostgresCluster", - "involvedObject.name": clusterName, - "involvedObject.namespace": namespace, - "involvedObject.uid": string(clusterUID), - "reason": "RepoHostCreated", - }); err != nil { - return false, err + // verify proper matching labels + expectedLabels := map[string]string{ + naming.LabelCluster: clusterName, + naming.LabelPGBackRest: "", + naming.LabelPGBackRestDedicated: "", } - if len(events.Items) != 1 { - return false, nil + expectedLabelsSelector, err := metav1.LabelSelectorAsSelector( + metav1.SetAsLabelSelector(expectedLabels)) + if err != nil { + t.Error(err) + } + if !expectedLabelsSelector.Matches(labels.Set(repo.GetLabels())) { + t.Errorf("dedicated repo host is missing an expected label: found=%v, expected=%v", + repo.GetLabels(), expectedLabels) } - return true, nil - }); err != nil { - t.Error(err) - } - }) - - t.Run("verify pgbackrest repo volumes", func(t *testing.T) { - // get the pgBackRest repo sts using the labels we expect it to have - repoVols := &v1.PersistentVolumeClaimList{} - if err := tClient.List(ctx, repoVols, client.InNamespace(namespace), - client.MatchingLabels{ - naming.LabelCluster: clusterName, - naming.LabelPGBackRest: "", - naming.LabelPGBackRestRepoVolume: "", - }); err != nil { - t.Fatal(err) - } - assert.Assert(t, len(repoVols.Items) > 0) + template := repo.Spec.Template.DeepCopy() + + // Containers and Volumes should be populated. + assert.Assert(t, len(template.Spec.Containers) != 0) + assert.Assert(t, len(template.Spec.InitContainers) != 0) + assert.Assert(t, len(template.Spec.Volumes) != 0) + + // Ignore Containers and Volumes in the comparison below. + template.Spec.Containers = nil + template.Spec.InitContainers = nil + template.Spec.Volumes = nil + + // TODO(tjmoore4): Add additional tests to test appending existing + // topology spread constraints and spec.disableDefaultPodScheduling being + // set to true (as done in instance StatefulSet tests). + assert.Assert(t, cmp.MarshalMatches(template.Spec, ` +affinity: {} +automountServiceAccountToken: false +containers: null +dnsPolicy: ClusterFirst +enableServiceLinks: false +imagePullSecrets: +- name: myImagePullSecret +priorityClassName: some-priority-class +restartPolicy: Always +schedulerName: default-scheduler +securityContext: + fsGroup: 26 + fsGroupChangePolicy: OnRootMismatch +shareProcessNamespace: true +terminationGracePeriodSeconds: 30 +tolerations: +- key: woot +topologySpreadConstraints: +- labelSelector: + matchExpressions: + - key: postgres-operator.crunchydata.com/cluster + operator: In + values: + - somename + - key: postgres-operator.crunchydata.com/data + operator: Exists + maxSkew: 1 + topologyKey: fakekey + whenUnsatisfiable: ScheduleAnyway +- labelSelector: + matchExpressions: + - key: postgres-operator.crunchydata.com/data + operator: In + values: + - postgres + - pgbackrest + matchLabels: + postgres-operator.crunchydata.com/cluster: hippocluster + maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway +- labelSelector: + matchExpressions: + - key: postgres-operator.crunchydata.com/data + operator: In + values: + - postgres + - pgbackrest + matchLabels: + postgres-operator.crunchydata.com/cluster: hippocluster + maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway + `)) + + // verify that the repohost container exists and contains the proper env vars + var repoHostContExists bool + for _, c := range repo.Spec.Template.Spec.Containers { + if c.Name == naming.PGBackRestRepoContainerName { + repoHostContExists = true + } + } + // now verify the proper env within the container + if !repoHostContExists { + t.Errorf("dedicated repo host is missing a container with name %s", + naming.PGBackRestRepoContainerName) + } - for _, r := range postgresCluster.Spec.Backups.PGBackRest.Repos { - if r.Volume == nil { - continue + repoHostStatus := postgresCluster.Status.PGBackRest.RepoHost + if repoHostStatus != nil { + if repoHostStatus.APIVersion != "apps/v1" || repoHostStatus.Kind != "StatefulSet" { + t.Errorf("invalid version/kind for dedicated repo host status") + } + } else { + t.Errorf("dedicated repo host status is missing") } - var foundRepoVol bool - for _, v := range repoVols.Items { - if v.GetName() == - naming.PGBackRestRepoVolume(postgresCluster, r.Name).Name { - foundRepoVol = true + + var foundConditionRepoHostsReady bool + for _, c := range postgresCluster.Status.Conditions { + if c.Type == "PGBackRestRepoHostReady" { + foundConditionRepoHostsReady = true break } } - assert.Assert(t, foundRepoVol) - } - }) + if !foundConditionRepoHostsReady { + t.Errorf("status condition PGBackRestRepoHostsReady is missing") + } + + assert.Check(t, wait.PollUntilContextTimeout(ctx, time.Second/2, Scale(time.Second*2), false, + func(ctx context.Context) (bool, error) { + events := &corev1.EventList{} + err := tClient.List(ctx, events, &client.MatchingFields{ + "involvedObject.kind": "PostgresCluster", + "involvedObject.name": clusterName, + "involvedObject.namespace": ns.Name, + "involvedObject.uid": clusterUID, + "reason": "RepoHostCreated", + }) + return len(events.Items) == 1, err + })) + }) - t.Run("verify pgbackrest configuration", func(t *testing.T) { + t.Run("verify pgbackrest repo volumes", func(t *testing.T) { + + // get the pgBackRest repo sts using the labels we expect it to have + repoVols := &corev1.PersistentVolumeClaimList{} + if err := tClient.List(ctx, repoVols, client.InNamespace(ns.Name), + client.MatchingLabels{ + naming.LabelCluster: clusterName, + naming.LabelPGBackRest: "", + naming.LabelPGBackRestRepoVolume: "", + }); err != nil { + t.Fatal(err) + } + assert.Assert(t, len(repoVols.Items) > 0) - config := &v1.ConfigMap{} - if err := tClient.Get(ctx, types.NamespacedName{ - Name: naming.PGBackRestConfig(postgresCluster).Name, - Namespace: postgresCluster.GetNamespace(), - }, config); err != nil { - assert.NilError(t, err) - } - assert.Assert(t, len(config.Data) > 0) - - var instanceConfFound, dedicatedRepoConfFound bool - for k, v := range config.Data { - if v != "" { - if k == pgbackrest.CMInstanceKey { - instanceConfFound = true - } else if k == pgbackrest.CMRepoKey { - dedicatedRepoConfFound = true + for _, r := range postgresCluster.Spec.Backups.PGBackRest.Repos { + if r.Volume == nil { + continue } - } - } - assert.Check(t, instanceConfFound) - assert.Check(t, dedicatedRepoConfFound) - - sshConfig := &v1.ConfigMap{} - if err := tClient.Get(ctx, types.NamespacedName{ - Name: naming.PGBackRestSSHConfig(postgresCluster).Name, - Namespace: postgresCluster.GetNamespace(), - }, sshConfig); err != nil { - assert.NilError(t, err) - } - assert.Assert(t, len(sshConfig.Data) > 0) - - var foundSSHConfig, foundSSHDConfig bool - for k, v := range sshConfig.Data { - if v != "" { - if k == "ssh_config" { - foundSSHConfig = true - } else if k == "sshd_config" { - foundSSHDConfig = true + var foundRepoVol bool + for _, v := range repoVols.Items { + if v.GetName() == + naming.PGBackRestRepoVolume(postgresCluster, r.Name).Name { + foundRepoVol = true + break + } } + assert.Assert(t, foundRepoVol) } - } - assert.Check(t, foundSSHConfig) - assert.Check(t, foundSSHDConfig) - - sshSecret := &v1.Secret{} - if err := tClient.Get(ctx, types.NamespacedName{ - Name: naming.PGBackRestSSHSecret(postgresCluster).Name, - Namespace: postgresCluster.GetNamespace(), - }, sshSecret); err != nil { - assert.NilError(t, err) - } - assert.Assert(t, len(sshSecret.Data) > 0) - - var foundPubKey, foundPrivKey, foundKnownHosts bool - for k, v := range sshSecret.Data { - if len(v) > 0 { - if k == "id_ecdsa.pub" { - foundPubKey = true - } else if k == "id_ecdsa" { - foundPrivKey = true - } else if k == "ssh_known_hosts" { - foundKnownHosts = true + }) + + t.Run("verify pgbackrest configuration", func(t *testing.T) { + + config := &corev1.ConfigMap{} + if err := tClient.Get(ctx, types.NamespacedName{ + Name: naming.PGBackRestConfig(postgresCluster).Name, + Namespace: postgresCluster.GetNamespace(), + }, config); err != nil { + assert.NilError(t, err) + } + assert.Assert(t, len(config.Data) > 0) + + var instanceConfFound, dedicatedRepoConfFound bool + for k, v := range config.Data { + if v != "" { + if k == pgbackrest.CMInstanceKey { + instanceConfFound = true + } else if k == pgbackrest.CMRepoKey { + dedicatedRepoConfFound = true + } } } - } - assert.Check(t, foundPubKey) - assert.Check(t, foundPrivKey) - assert.Check(t, foundKnownHosts) - }) + assert.Check(t, instanceConfFound) + assert.Check(t, dedicatedRepoConfFound) + }) - t.Run("verify pgbackrest schedule cronjob", func(t *testing.T) { + t.Run("verify pgbackrest schedule cronjob", func(t *testing.T) { - // set status - postgresCluster.Status = v1beta1.PostgresClusterStatus{ - Patroni: &v1beta1.PatroniStatus{SystemIdentifier: "12345abcde"}, - PGBackRest: &v1beta1.PGBackRestStatus{ - Repos: []v1beta1.RepoStatus{{Name: "repo1", StanzaCreated: true}}}, - } + // set status + postgresCluster.Status = v1beta1.PostgresClusterStatus{ + Patroni: v1beta1.PatroniStatus{SystemIdentifier: "12345abcde"}, + PGBackRest: &v1beta1.PGBackRestStatus{ + Repos: []v1beta1.RepoStatus{{Name: "repo1", StanzaCreated: true}}}, + } - // set conditions - clusterConditions := map[string]metav1.ConditionStatus{ - ConditionRepoHostReady: metav1.ConditionTrue, - ConditionReplicaCreate: metav1.ConditionTrue, - } + // set conditions + clusterConditions := map[string]metav1.ConditionStatus{ + ConditionRepoHostReady: metav1.ConditionTrue, + ConditionReplicaCreate: metav1.ConditionTrue, + } - for condition, status := range clusterConditions { - meta.SetStatusCondition(&postgresCluster.Status.Conditions, metav1.Condition{ - Type: condition, Reason: "testing", Status: status}) - } + for condition, status := range clusterConditions { + meta.SetStatusCondition(&postgresCluster.Status.Conditions, metav1.Condition{ + Type: condition, Reason: "testing", Status: status}) + } - requeue := r.reconcileScheduledBackups(context.Background(), - postgresCluster, serviceAccount) - assert.Assert(t, !requeue) + requeue := r.reconcileScheduledBackups(ctx, postgresCluster, serviceAccount, fakeObservedCronJobs()) + assert.Assert(t, !requeue) - returnedCronJob := &batchv1beta1.CronJob{} - if err := tClient.Get(ctx, types.NamespacedName{ - Name: postgresCluster.Name + "-pgbackrest-repo1-full", - Namespace: postgresCluster.GetNamespace(), - }, returnedCronJob); err != nil { - assert.NilError(t, err) - } + returnedCronJob := &batchv1.CronJob{} + if err := tClient.Get(ctx, types.NamespacedName{ + Name: postgresCluster.Name + "-repo1-full", + Namespace: postgresCluster.GetNamespace(), + }, returnedCronJob); err != nil { + assert.NilError(t, err) + } - // check returned cronjob matches set spec - assert.Equal(t, returnedCronJob.Name, "hippocluster-pgbackrest-repo1-full") - assert.Equal(t, returnedCronJob.Spec.Schedule, testCronSchedule) - assert.Equal(t, returnedCronJob.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Name, - "pgbackrest") - assert.Assert(t, returnedCronJob.Spec.JobTemplate.Spec.Template.Spec.Containers[0].SecurityContext != &corev1.SecurityContext{}) + // check returned cronjob matches set spec + assert.Equal(t, returnedCronJob.Name, "hippocluster-repo1-full") + assert.Equal(t, returnedCronJob.Spec.Schedule, testCronSchedule) + assert.Equal(t, returnedCronJob.Spec.ConcurrencyPolicy, batchv1.ForbidConcurrent) + assert.Equal(t, returnedCronJob.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Name, + "pgbackrest") + assert.Assert(t, returnedCronJob.Spec.JobTemplate.Spec.Template.Spec.Containers[0].SecurityContext != &corev1.SecurityContext{}) - }) + }) - t.Run("verify pgbackrest schedule found", func(t *testing.T) { + t.Run("verify pgbackrest schedule found", func(t *testing.T) { - assert.Assert(t, backupScheduleFound(repo, "full")) + assert.Assert(t, backupScheduleFound(repo, "full")) - testrepo := v1beta1.PGBackRestRepo{ - Name: "repo1", - BackupSchedules: &v1beta1.PGBackRestBackupSchedules{ - Full: &testCronSchedule, - Differential: &testCronSchedule, - Incremental: &testCronSchedule, - }} + testrepo := v1beta1.PGBackRestRepo{ + Name: "repo1", + BackupSchedules: &v1beta1.PGBackRestBackupSchedules{ + Full: &testCronSchedule, + Differential: &testCronSchedule, + Incremental: &testCronSchedule, + }} - assert.Assert(t, backupScheduleFound(testrepo, "full")) - assert.Assert(t, backupScheduleFound(testrepo, "diff")) - assert.Assert(t, backupScheduleFound(testrepo, "incr")) + assert.Assert(t, backupScheduleFound(testrepo, "full")) + assert.Assert(t, backupScheduleFound(testrepo, "diff")) + assert.Assert(t, backupScheduleFound(testrepo, "incr")) - }) + }) + + t.Run("verify pgbackrest schedule not found", func(t *testing.T) { + + assert.Assert(t, !backupScheduleFound(repo, "notabackuptype")) + + noscheduletestrepo := v1beta1.PGBackRestRepo{Name: "repo1"} + assert.Assert(t, !backupScheduleFound(noscheduletestrepo, "full")) + + }) + + t.Run("pgbackrest schedule suspended status", func(t *testing.T) { + + returnedCronJob := &batchv1.CronJob{} + if err := tClient.Get(ctx, types.NamespacedName{ + Name: postgresCluster.Name + "-repo1-full", + Namespace: postgresCluster.GetNamespace(), + }, returnedCronJob); err != nil { + assert.NilError(t, err) + } + + t.Run("pgbackrest schedule suspended false", func(t *testing.T) { + assert.Assert(t, !*returnedCronJob.Spec.Suspend) + }) - t.Run("verify pgbackrest schedule not found", func(t *testing.T) { + t.Run("shutdown", func(t *testing.T) { + *postgresCluster.Spec.Shutdown = true + postgresCluster.Spec.Standby = nil - assert.Assert(t, !backupScheduleFound(repo, "notabackuptype")) + requeue := r.reconcileScheduledBackups(ctx, + postgresCluster, serviceAccount, fakeObservedCronJobs()) + assert.Assert(t, !requeue) - noscheduletestrepo := v1beta1.PGBackRestRepo{Name: "repo1"} - assert.Assert(t, !backupScheduleFound(noscheduletestrepo, "full")) + assert.NilError(t, tClient.Get(ctx, types.NamespacedName{ + Name: postgresCluster.Name + "-repo1-full", + Namespace: postgresCluster.GetNamespace(), + }, returnedCronJob)) + assert.Assert(t, *returnedCronJob.Spec.Suspend) + }) + + t.Run("standby", func(t *testing.T) { + *postgresCluster.Spec.Shutdown = false + postgresCluster.Spec.Standby = &v1beta1.PostgresStandbySpec{ + Enabled: true, + } + + requeue := r.reconcileScheduledBackups(ctx, + postgresCluster, serviceAccount, fakeObservedCronJobs()) + assert.Assert(t, !requeue) + + assert.NilError(t, tClient.Get(ctx, types.NamespacedName{ + Name: postgresCluster.Name + "-repo1-full", + Namespace: postgresCluster.GetNamespace(), + }, returnedCronJob)) + + assert.Assert(t, *returnedCronJob.Spec.Suspend) + }) + }) }) - t.Run("pgbackrest schedule suspended status", func(t *testing.T) { + t.Run("run reconcile with backups not defined", func(t *testing.T) { + clusterName := "hippocluster2" + clusterUID := "hippouid2" - returnedCronJob := &batchv1beta1.CronJob{} - if err := tClient.Get(ctx, types.NamespacedName{ - Name: postgresCluster.Name + "-pgbackrest-repo1-full", - Namespace: postgresCluster.GetNamespace(), - }, returnedCronJob); err != nil { - assert.NilError(t, err) + ns := setupNamespace(t, tClient) + // create a PostgresCluster without backups to test with + postgresCluster := fakePostgresCluster(clusterName, ns.GetName(), clusterUID, true) + postgresCluster.Spec.Backups = v1beta1.Backups{} + + // create the 'observed' instances and set the leader + instances := &observedInstances{ + forCluster: []*Instance{{Name: "instance1", + Pods: []*corev1.Pod{{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{naming.LabelRole: naming.RolePatroniLeader}, + }, + Spec: corev1.PodSpec{}, + }}, + }, {Name: "instance2"}, {Name: "instance3"}}, } - t.Run("pgbackrest schedule suspended false", func(t *testing.T) { - assert.Assert(t, !*returnedCronJob.Spec.Suspend) - }) + rootCA, err := pki.NewRootCertificateAuthority() + assert.NilError(t, err) - t.Run("shutdown", func(t *testing.T) { - *postgresCluster.Spec.Shutdown = true - postgresCluster.Spec.Standby = nil + result, err := r.reconcilePGBackRest(ctx, postgresCluster, instances, rootCA, false) + if err != nil { + t.Errorf("unable to reconcile pgBackRest: %v", err) + } + assert.Equal(t, result, reconcile.Result{}) - requeue := r.reconcileScheduledBackups(ctx, - postgresCluster, serviceAccount) - assert.Assert(t, !requeue) + t.Run("verify pgbackrest dedicated repo StatefulSet", func(t *testing.T) { - assert.NilError(t, tClient.Get(ctx, types.NamespacedName{ - Name: postgresCluster.Name + "-pgbackrest-repo1-full", - Namespace: postgresCluster.GetNamespace(), - }, returnedCronJob)) + // Verify the sts doesn't exist + dedicatedRepos := &appsv1.StatefulSetList{} + if err := tClient.List(ctx, dedicatedRepos, client.InNamespace(ns.Name), + client.MatchingLabels{ + naming.LabelCluster: clusterName, + naming.LabelPGBackRest: "", + naming.LabelPGBackRestDedicated: "", + }); err != nil { + t.Fatal(err) + } - assert.Assert(t, *returnedCronJob.Spec.Suspend) + assert.Equal(t, len(dedicatedRepos.Items), 0) }) - t.Run("standby", func(t *testing.T) { - *postgresCluster.Spec.Shutdown = false - postgresCluster.Spec.Standby = &v1beta1.PostgresStandbySpec{ - Enabled: true, + t.Run("verify pgbackrest repo volumes", func(t *testing.T) { + + // get the pgBackRest repo sts using the labels we expect it to have + repoVols := &corev1.PersistentVolumeClaimList{} + if err := tClient.List(ctx, repoVols, client.InNamespace(ns.Name), + client.MatchingLabels{ + naming.LabelCluster: clusterName, + naming.LabelPGBackRest: "", + naming.LabelPGBackRestRepoVolume: "", + }); err != nil { + t.Fatal(err) } - requeue := r.reconcileScheduledBackups(ctx, - postgresCluster, serviceAccount) - assert.Assert(t, !requeue) + assert.Equal(t, len(repoVols.Items), 0) + }) - assert.NilError(t, tClient.Get(ctx, types.NamespacedName{ - Name: postgresCluster.Name + "-pgbackrest-repo1-full", - Namespace: postgresCluster.GetNamespace(), - }, returnedCronJob)) + t.Run("verify pgbackrest configuration", func(t *testing.T) { - assert.Assert(t, *returnedCronJob.Spec.Suspend) + config := &corev1.ConfigMap{} + err := tClient.Get(ctx, types.NamespacedName{ + Name: naming.PGBackRestConfig(postgresCluster).Name, + Namespace: postgresCluster.GetNamespace(), + }, config) + assert.Equal(t, apierrors.IsNotFound(err), true) }) }) } @@ -591,27 +673,16 @@ func TestReconcilePGBackRestRBAC(t *testing.T) { t.Skip("USE_EXISTING_CLUSTER: Test fails due to garbage collection") } - // setup the test environment and ensure a clean teardown - tEnv, tClient, cfg := setupTestEnv(t, ControllerName) - t.Cleanup(func() { teardownTestEnv(t, tEnv) }) - r := &Reconciler{} - ctx, cancel := setupManager(t, cfg, func(mgr manager.Manager) { - r = &Reconciler{ - Client: mgr.GetClient(), - Recorder: mgr.GetEventRecorderFor(ControllerName), - Tracer: otel.Tracer(ControllerName), - Owner: ControllerName, - } - }) - t.Cleanup(func() { teardownManager(cancel, t) }) + ctx := context.Background() + _, tClient := setupKubernetes(t) + require.ParallelCapacity(t, 0) + + r := &Reconciler{Client: tClient, Owner: client.FieldOwner(t.Name())} clusterName := "hippocluster" clusterUID := "hippouid" - ns := &v1.Namespace{} - ns.GenerateName = "postgres-operator-test-" - assert.NilError(t, tClient.Create(ctx, ns)) - t.Cleanup(func() { assert.Check(t, tClient.Delete(ctx, ns)) }) + ns := setupNamespace(t, tClient) // create a PostgresCluster to test with postgresCluster := fakePostgresCluster(clusterName, ns.GetName(), clusterUID, true) @@ -657,10 +728,9 @@ func TestReconcilePGBackRestRBAC(t *testing.T) { } func TestReconcileStanzaCreate(t *testing.T) { + cfg, tClient := setupKubernetes(t) + require.ParallelCapacity(t, 0) - // setup the test environment and ensure a clean teardown - tEnv, tClient, cfg := setupTestEnv(t, ControllerName) - t.Cleanup(func() { teardownTestEnv(t, tEnv) }) r := &Reconciler{} ctx, cancel := setupManager(t, cfg, func(mgr manager.Manager) { r = &Reconciler{ @@ -675,11 +745,7 @@ func TestReconcileStanzaCreate(t *testing.T) { clusterName := "hippocluster" clusterUID := "hippouid" - ns := &v1.Namespace{} - ns.GenerateName = "postgres-operator-test-" - assert.NilError(t, tClient.Create(ctx, ns)) - t.Cleanup(func() { assert.Check(t, tClient.Delete(ctx, ns)) }) - namespace := ns.Name + ns := setupNamespace(t, tClient) // create a PostgresCluster to test with postgresCluster := fakePostgresCluster(clusterName, ns.GetName(), clusterUID, true) @@ -698,13 +764,13 @@ func TestReconcileStanzaCreate(t *testing.T) { }, }}) - stanzaCreateFail := func(namespace, pod, container string, stdin io.Reader, stdout, - stderr io.Writer, command ...string) error { + stanzaCreateFail := func(ctx context.Context, namespace, pod, container string, stdin io.Reader, + stdout, stderr io.Writer, command ...string) error { return errors.New("fake stanza create failed") } - stanzaCreateSuccess := func(namespace, pod, container string, stdin io.Reader, stdout, - stderr io.Writer, command ...string) error { + stanzaCreateSuccess := func(ctx context.Context, namespace, pod, container string, stdin io.Reader, + stdout, stderr io.Writer, command ...string) error { return nil } @@ -718,27 +784,22 @@ func TestReconcileStanzaCreate(t *testing.T) { Message: "pgBackRest dedicated repository host is ready", }) - configHashMistmatch, err := r.reconcileStanzaCreate(ctx, postgresCluster, instances, "abcde12345") - assert.NilError(t, err) - assert.Assert(t, !configHashMistmatch) - - events := &corev1.EventList{} - err = wait.Poll(time.Second/2, Scale(time.Second*2), func() (bool, error) { - if err := tClient.List(ctx, events, &client.MatchingFields{ - "involvedObject.kind": "PostgresCluster", - "involvedObject.name": clusterName, - "involvedObject.namespace": namespace, - "involvedObject.uid": string(clusterUID), - "reason": "StanzasCreated", - }); err != nil { - return false, err - } - if len(events.Items) != 1 { - return false, nil - } - return true, nil - }) + configHashMismatch, err := r.reconcileStanzaCreate(ctx, postgresCluster, instances, "abcde12345") assert.NilError(t, err) + assert.Assert(t, !configHashMismatch) + + assert.NilError(t, wait.PollUntilContextTimeout(ctx, time.Second/2, Scale(time.Second*2), false, + func(ctx context.Context) (bool, error) { + events := &corev1.EventList{} + err := tClient.List(ctx, events, &client.MatchingFields{ + "involvedObject.kind": "PostgresCluster", + "involvedObject.name": clusterName, + "involvedObject.namespace": ns.Name, + "involvedObject.uid": clusterUID, + "reason": "StanzasCreated", + }) + return len(events.Items) == 1, err + })) // status should indicate stanzas were created for _, r := range postgresCluster.Status.PGBackRest.Repos { @@ -758,31 +819,26 @@ func TestReconcileStanzaCreate(t *testing.T) { Reason: "RepoHostReady", Message: "pgBackRest dedicated repository host is ready", }) - postgresCluster.Status.Patroni = &v1beta1.PatroniStatus{ + postgresCluster.Status.Patroni = v1beta1.PatroniStatus{ SystemIdentifier: "6952526174828511264", } - configHashMismatch, err := r.reconcileStanzaCreate(ctx, postgresCluster, instances, "abcde12345") + configHashMismatch, err = r.reconcileStanzaCreate(ctx, postgresCluster, instances, "abcde12345") assert.Error(t, err, "fake stanza create failed: ") assert.Assert(t, !configHashMismatch) - events = &corev1.EventList{} - err = wait.Poll(time.Second/2, Scale(time.Second*2), func() (bool, error) { - if err := tClient.List(ctx, events, &client.MatchingFields{ - "involvedObject.kind": "PostgresCluster", - "involvedObject.name": clusterName, - "involvedObject.namespace": namespace, - "involvedObject.uid": string(clusterUID), - "reason": "UnableToCreateStanzas", - }); err != nil { - return false, err - } - if len(events.Items) != 1 { - return false, nil - } - return true, nil - }) - assert.NilError(t, err) + assert.NilError(t, wait.PollUntilContextTimeout(ctx, time.Second/2, Scale(time.Second*2), false, + func(ctx context.Context) (bool, error) { + events := &corev1.EventList{} + err := tClient.List(ctx, events, &client.MatchingFields{ + "involvedObject.kind": "PostgresCluster", + "involvedObject.name": clusterName, + "involvedObject.namespace": ns.Name, + "involvedObject.uid": clusterUID, + "reason": "UnableToCreateStanzas", + }) + return len(events.Items) == 1, err + })) // status should indicate stanza were not created for _, r := range postgresCluster.Status.PGBackRest.Repos { @@ -790,90 +846,22 @@ func TestReconcileStanzaCreate(t *testing.T) { } } -func TestGetPGBackRestExecSelector(t *testing.T) { +func TestReconcileReplicaCreateBackup(t *testing.T) { + // Garbage collector cleans up test resources before the test completes + if strings.EqualFold(os.Getenv("USE_EXISTING_CLUSTER"), "true") { + t.Skip("USE_EXISTING_CLUSTER: Test fails due to garbage collection") + } - testCases := []struct { - cluster *v1beta1.PostgresCluster - repoName string - desc string - expectedSelector string - expectedContainer string - }{{ - desc: "volume repo defined dedicated repo host enabled", - cluster: &v1beta1.PostgresCluster{ - ObjectMeta: metav1.ObjectMeta{Name: "hippo"}, - Spec: v1beta1.PostgresClusterSpec{ - Backups: v1beta1.Backups{ - PGBackRest: v1beta1.PGBackRestArchive{ - Repos: []v1beta1.PGBackRestRepo{{ - Name: "repo1", - Volume: &v1beta1.RepoPVC{}, - }}, - }, - }, - }, - }, - repoName: "repo1", - expectedSelector: "postgres-operator.crunchydata.com/cluster=hippo," + - "postgres-operator.crunchydata.com/pgbackrest=," + - "postgres-operator.crunchydata.com/pgbackrest-dedicated=", - expectedContainer: "pgbackrest", - }, { - desc: "cloud repo defined no repo host enabled", - cluster: &v1beta1.PostgresCluster{ - ObjectMeta: metav1.ObjectMeta{Name: "hippo"}, - Spec: v1beta1.PostgresClusterSpec{ - Backups: v1beta1.Backups{ - PGBackRest: v1beta1.PGBackRestArchive{ - Repos: []v1beta1.PGBackRestRepo{{ - Name: "repo1", - S3: &v1beta1.RepoS3{}, - }}, - }, - }, - }, - }, - repoName: "repo1", - expectedSelector: "postgres-operator.crunchydata.com/cluster=hippo," + - "postgres-operator.crunchydata.com/instance," + - "postgres-operator.crunchydata.com/role=master", - expectedContainer: "database", - }} + ctx := context.Background() + _, tClient := setupKubernetes(t) + require.ParallelCapacity(t, 1) - for _, tc := range testCases { - t.Run(tc.desc, func(t *testing.T) { - selector, container, err := getPGBackRestExecSelector(tc.cluster, tc.repoName) - assert.NilError(t, err) - assert.Assert(t, selector.String() == tc.expectedSelector) - assert.Assert(t, container == tc.expectedContainer) - }) - } -} - -func TestReconcileReplicaCreateBackup(t *testing.T) { - - // setup the test environment and ensure a clean teardown - tEnv, tClient, cfg := setupTestEnv(t, ControllerName) - t.Cleanup(func() { teardownTestEnv(t, tEnv) }) - r := &Reconciler{} - ctx, cancel := setupManager(t, cfg, func(mgr manager.Manager) { - r = &Reconciler{ - Client: mgr.GetClient(), - Recorder: mgr.GetEventRecorderFor(ControllerName), - Tracer: otel.Tracer(ControllerName), - Owner: ControllerName, - } - }) - t.Cleanup(func() { teardownManager(cancel, t) }) + r := &Reconciler{Client: tClient, Owner: client.FieldOwner(t.Name())} clusterName := "hippocluster" clusterUID := "hippouid" - ns := &v1.Namespace{} - ns.GenerateName = "postgres-operator-test-" - assert.NilError(t, tClient.Create(ctx, ns)) - t.Cleanup(func() { assert.Check(t, tClient.Delete(ctx, ns)) }) - namespace := ns.Name + ns := setupNamespace(t, tClient) // create a PostgresCluster to test with postgresCluster := fakePostgresCluster(clusterName, ns.GetName(), clusterUID, true) @@ -906,11 +894,11 @@ func TestReconcileReplicaCreateBackup(t *testing.T) { Reason: "StanzaCreated", Message: "pgBackRest replica create repo is ready for backups", }) - postgresCluster.Status.Patroni = &v1beta1.PatroniStatus{ + postgresCluster.Status.Patroni = v1beta1.PatroniStatus{ SystemIdentifier: "6952526174828511264", } - replicaCreateRepo := postgresCluster.Spec.Backups.PGBackRest.Repos[0].Name + replicaCreateRepo := postgresCluster.Spec.Backups.PGBackRest.Repos[0] configHash := "abcde12345" sa := &corev1.ServiceAccount{ @@ -924,11 +912,12 @@ func TestReconcileReplicaCreateBackup(t *testing.T) { // now find the expected job jobs := &batchv1.JobList{} err = tClient.List(ctx, jobs, &client.ListOptions{ - LabelSelector: naming.PGBackRestBackupJobSelector(clusterName, replicaCreateRepo, + Namespace: postgresCluster.Namespace, + LabelSelector: naming.PGBackRestBackupJobSelector(clusterName, replicaCreateRepo.Name, naming.BackupReplicaCreate), }) assert.NilError(t, err) - assert.Assert(t, len(jobs.Items) == 1) + assert.Equal(t, len(jobs.Items), 1, "expected 1 job") backupJob := jobs.Items[0] var foundOwnershipRef bool @@ -941,17 +930,13 @@ func TestReconcileReplicaCreateBackup(t *testing.T) { } assert.Assert(t, foundOwnershipRef) - var foundConfigAnnotation, foundHashAnnotation bool + var foundHashAnnotation bool // verify annotations for k, v := range backupJob.GetAnnotations() { - if k == naming.PGBackRestCurrentConfig && v == pgbackrest.CMRepoKey { - foundConfigAnnotation = true - } if k == naming.PGBackRestConfigHash && v == configHash { foundHashAnnotation = true } } - assert.Assert(t, foundConfigAnnotation) assert.Assert(t, foundHashAnnotation) // verify container & env vars @@ -970,7 +955,7 @@ func TestReconcileReplicaCreateBackup(t *testing.T) { case "CONTAINER": assert.Assert(t, env.Value == naming.PGBackRestRepoContainerName) case "NAMESPACE": - assert.Assert(t, env.Value == namespace) + assert.Assert(t, env.Value == ns.Name) case "SELECTOR": assert.Assert(t, env.Value == "postgres-operator.crunchydata.com/cluster=hippocluster,"+ "postgres-operator.crunchydata.com/pgbackrest=,"+ @@ -988,6 +973,9 @@ func TestReconcileReplicaCreateBackup(t *testing.T) { assert.Equal(t, backupJob.Spec.Template.Spec.ImagePullSecrets[0].Name, "myImagePullSecret") + // verify the priority class + assert.Equal(t, backupJob.Spec.Template.Spec.PriorityClassName, "some-priority-class") + // now set the job to complete backupJob.Status.Conditions = append(backupJob.Status.Conditions, batchv1.JobCondition{Type: batchv1.JobComplete, Status: corev1.ConditionTrue}) @@ -1007,8 +995,8 @@ func TestReconcileReplicaCreateBackup(t *testing.T) { // verify the status has been updated properly var replicaCreateRepoStatus *v1beta1.RepoStatus - for i, r := range postgresCluster.Status.PGBackRest.Repos { - if r.Name == replicaCreateRepo { + for i, repo := range postgresCluster.Status.PGBackRest.Repos { + if repo.Name == replicaCreateRepo.Name { replicaCreateRepoStatus = &postgresCluster.Status.PGBackRest.Repos[i] break } @@ -1019,12 +1007,11 @@ func TestReconcileReplicaCreateBackup(t *testing.T) { } func TestReconcileManualBackup(t *testing.T) { + cfg, tClient := setupKubernetes(t) + require.ParallelCapacity(t, 2) - // setup the test environment and ensure a clean teardown - tEnv, tClient, cfg := setupTestEnv(t, ControllerName) - t.Cleanup(func() { teardownTestEnv(t, tEnv) }) r := &Reconciler{} - ctx, cancel := setupManager(t, cfg, func(mgr manager.Manager) { + _, cancel := setupManager(t, cfg, func(mgr manager.Manager) { r = &Reconciler{ Client: mgr.GetClient(), Recorder: mgr.GetEventRecorderFor(ControllerName), @@ -1034,11 +1021,7 @@ func TestReconcileManualBackup(t *testing.T) { }) t.Cleanup(func() { teardownManager(cancel, t) }) - ns := &v1.Namespace{} - ns.GenerateName = "postgres-operator-test-" - assert.NilError(t, tClient.Create(ctx, ns)) - t.Cleanup(func() { assert.Check(t, tClient.Delete(ctx, ns)) }) - + ns := setupNamespace(t, tClient) defaultBackupId := "default-backup-id" backupId := metav1.Now().OpenAPISchemaFormat() @@ -1061,7 +1044,7 @@ func TestReconcileManualBackup(t *testing.T) { instances := &observedInstances{ forCluster: []*Instance{{ Name: "instance1", - Pods: []*v1.Pod{{ + Pods: []*corev1.Pod{{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{naming.LabelRole: naming.RolePatroniLeader}, }, @@ -1345,13 +1328,14 @@ func TestReconcileManualBackup(t *testing.T) { postgresCluster := fakePostgresCluster(clusterName, ns.GetName(), "", dedicated) postgresCluster.Spec.Backups.PGBackRest.Manual = tc.manual - postgresCluster.Status = *tc.status postgresCluster.Annotations = map[string]string{naming.PGBackRestBackup: tc.backupId} + assert.NilError(t, tClient.Create(ctx, postgresCluster)) + + postgresCluster.Status = *tc.status for condition, status := range tc.clusterConditions { meta.SetStatusCondition(&postgresCluster.Status.Conditions, metav1.Condition{ Type: condition, Reason: "testing", Status: status}) } - assert.NilError(t, tClient.Create(ctx, postgresCluster)) assert.NilError(t, tClient.Status().Update(ctx, postgresCluster)) currentJobs := []*batchv1.Job{} @@ -1383,6 +1367,7 @@ func TestReconcileManualBackup(t *testing.T) { jobs := &batchv1.JobList{} err := tClient.List(ctx, jobs, &client.ListOptions{ + Namespace: postgresCluster.Namespace, LabelSelector: naming.PGBackRestBackupJobSelector(clusterName, tc.manual.RepoName, naming.BackupManual), }) @@ -1400,16 +1385,11 @@ func TestReconcileManualBackup(t *testing.T) { assert.Assert(t, foundOwnershipRef) // verify image pull secret - var foundImagePullSecret bool - for _, job := range jobs.Items { - if job.Spec.Template.Spec.ImagePullSecrets != nil && - job.Spec.Template.Spec.ImagePullSecrets[0].Name == - "myImagePullSecret" { - foundImagePullSecret = true - break - } - } - assert.Assert(t, foundImagePullSecret) + assert.Assert(t, len(jobs.Items[0].Spec.Template.Spec.ImagePullSecrets) > 0) + assert.Equal(t, jobs.Items[0].Spec.Template.Spec.ImagePullSecrets[0].Name, "myImagePullSecret") + + // verify the priority class + assert.Equal(t, jobs.Items[0].Spec.Template.Spec.PriorityClassName, "some-priority-class") // verify status is populated with the proper ID assert.Assert(t, postgresCluster.Status.PGBackRest.ManualBackup != nil) @@ -1423,7 +1403,7 @@ func TestReconcileManualBackup(t *testing.T) { // if a deletion is expected, then an error is expected. otherwise an error is // not expected. if tc.expectCurrentJobDeletion { - assert.Assert(t, kerr.IsNotFound(err)) + assert.Assert(t, apierrors.IsNotFound(err)) assert.ErrorContains(t, err, fmt.Sprintf(`"%s" not found`, currentJobs[0].GetName())) } else { @@ -1434,6 +1414,7 @@ func TestReconcileManualBackup(t *testing.T) { // just use a pgbackrest selector to check for the existence of any job since // we might not have a repo name for tests within a manual backup defined err := tClient.List(ctx, jobs, &client.ListOptions{ + Namespace: postgresCluster.Namespace, LabelSelector: naming.PGBackRestSelector(clusterName), }) assert.NilError(t, err) @@ -1441,23 +1422,18 @@ func TestReconcileManualBackup(t *testing.T) { // if an event is expected, the check for it if tc.expectedEventReason != "" { - events := &corev1.EventList{} - err = wait.Poll(time.Second/2, Scale(time.Second*2), func() (bool, error) { - if err := tClient.List(ctx, events, &client.MatchingFields{ - "involvedObject.kind": "PostgresCluster", - "involvedObject.name": clusterName, - "involvedObject.namespace": ns.GetName(), - "involvedObject.uid": string(postgresCluster.GetUID()), - "reason": tc.expectedEventReason, - }); err != nil { - return false, err - } - if len(events.Items) != 1 { - return false, nil - } - return true, nil - }) - assert.NilError(t, err) + assert.NilError(t, wait.PollUntilContextTimeout(ctx, time.Second/2, Scale(time.Second*2), false, + func(ctx context.Context) (bool, error) { + events := &corev1.EventList{} + err := tClient.List(ctx, events, &client.MatchingFields{ + "involvedObject.kind": "PostgresCluster", + "involvedObject.name": clusterName, + "involvedObject.namespace": ns.GetName(), + "involvedObject.uid": string(postgresCluster.GetUID()), + "reason": tc.expectedEventReason, + }) + return len(events.Items) == 1, err + })) } return } @@ -1472,32 +1448,18 @@ func TestGetPGBackRestResources(t *testing.T) { t.Skip("USE_EXISTING_CLUSTER: Test fails due to garbage collection") } - // setup the test environment and ensure a clean teardown - tEnv, tClient, cfg := setupTestEnv(t, ControllerName) - t.Cleanup(func() { teardownTestEnv(t, tEnv) }) - r := &Reconciler{} - ctx, cancel := setupManager(t, cfg, func(mgr manager.Manager) { - r = &Reconciler{ - Client: mgr.GetClient(), - Recorder: mgr.GetEventRecorderFor(ControllerName), - Tracer: otel.Tracer(ControllerName), - Owner: ControllerName, - } - }) - t.Cleanup(func() { teardownManager(cancel, t) }) + ctx := context.Background() + _, tClient := setupKubernetes(t) + require.ParallelCapacity(t, 1) + + r := &Reconciler{Client: tClient, Owner: client.FieldOwner(t.Name())} clusterName := "hippocluster" clusterUID := "hippouid" - - ns := &v1.Namespace{} - ns.GenerateName = "postgres-operator-test-" - assert.NilError(t, tClient.Create(ctx, ns)) - t.Cleanup(func() { assert.Check(t, tClient.Delete(ctx, ns)) }) - namespace := ns.Name + namespace := setupNamespace(t, tClient).Name type testResult struct { - jobCount, hostCount, pvcCount int - sshConfigPresent, sshSecretPresent bool + jobCount, hostCount, pvcCount int } testCases := []struct { @@ -1516,10 +1478,10 @@ func TestGetPGBackRestResources(t *testing.T) { naming.BackupReplicaCreate), }, Spec: batchv1.JobSpec{ - Template: v1.PodTemplateSpec{ - Spec: v1.PodSpec{ - Containers: []v1.Container{{Name: "test", Image: "test"}}, - RestartPolicy: v1.RestartPolicyNever, + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{Name: "test", Image: "test"}}, + RestartPolicy: corev1.RestartPolicyNever, }, }, }, @@ -1541,7 +1503,6 @@ func TestGetPGBackRestResources(t *testing.T) { }, result: testResult{ jobCount: 1, pvcCount: 0, hostCount: 0, - sshConfigPresent: false, sshSecretPresent: false, }, }, { desc: "repo no longer exists delete job", @@ -1554,10 +1515,10 @@ func TestGetPGBackRestResources(t *testing.T) { naming.BackupReplicaCreate), }, Spec: batchv1.JobSpec{ - Template: v1.PodTemplateSpec{ - Spec: v1.PodSpec{ - Containers: []v1.Container{{Name: "test", Image: "test"}}, - RestartPolicy: v1.RestartPolicyNever, + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{Name: "test", Image: "test"}}, + RestartPolicy: corev1.RestartPolicyNever, }, }, }, @@ -1579,7 +1540,6 @@ func TestGetPGBackRestResources(t *testing.T) { }, result: testResult{ jobCount: 0, pvcCount: 0, hostCount: 0, - sshConfigPresent: false, sshSecretPresent: false, }, }, { desc: "repo still defined keep pvc", @@ -1590,9 +1550,9 @@ func TestGetPGBackRestResources(t *testing.T) { Namespace: namespace, Labels: naming.PGBackRestRepoVolumeLabels(clusterName, "repo1"), }, - Spec: v1.PersistentVolumeClaimSpec{ + Spec: corev1.PersistentVolumeClaimSpec{ AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteMany}, - Resources: corev1.ResourceRequirements{ + Resources: corev1.VolumeResourceRequirements{ Requests: corev1.ResourceList{ corev1.ResourceStorage: resource.MustParse("1Gi"), }, @@ -1619,7 +1579,6 @@ func TestGetPGBackRestResources(t *testing.T) { }, result: testResult{ jobCount: 0, pvcCount: 1, hostCount: 0, - sshConfigPresent: false, sshSecretPresent: false, }, }, { desc: "repo no longer exists delete pvc", @@ -1630,9 +1589,9 @@ func TestGetPGBackRestResources(t *testing.T) { Namespace: namespace, Labels: naming.PGBackRestRepoVolumeLabels(clusterName, "repo1"), }, - Spec: v1.PersistentVolumeClaimSpec{ + Spec: corev1.PersistentVolumeClaimSpec{ AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteMany}, - Resources: corev1.ResourceRequirements{ + Resources: corev1.VolumeResourceRequirements{ Requests: corev1.ResourceList{ corev1.ResourceStorage: resource.MustParse("1Gi"), }, @@ -1659,7 +1618,6 @@ func TestGetPGBackRestResources(t *testing.T) { }, result: testResult{ jobCount: 0, pvcCount: 0, hostCount: 0, - sshConfigPresent: false, sshSecretPresent: false, }, }, { desc: "dedicated repo host defined keep dedicated sts", @@ -1673,11 +1631,11 @@ func TestGetPGBackRestResources(t *testing.T) { Spec: appsv1.StatefulSetSpec{ Selector: metav1.SetAsLabelSelector( naming.PGBackRestDedicatedLabels(clusterName)), - Template: v1.PodTemplateSpec{ + Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: naming.PGBackRestDedicatedLabels(clusterName), }, - Spec: v1.PodSpec{}, + Spec: corev1.PodSpec{}, }, }, }, @@ -1698,62 +1656,24 @@ func TestGetPGBackRestResources(t *testing.T) { }, result: testResult{ jobCount: 0, pvcCount: 0, hostCount: 1, - sshConfigPresent: false, sshSecretPresent: false, - }, - }, { - desc: "no dedicated repo host defined delete dedicated sts", - createResources: []client.Object{ - &appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: "delete-dedicated", - Namespace: namespace, - Labels: naming.PGBackRestDedicatedLabels(clusterName), - }, - Spec: appsv1.StatefulSetSpec{ - Selector: metav1.SetAsLabelSelector( - naming.PGBackRestDedicatedLabels(clusterName)), - Template: v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: naming.PGBackRestDedicatedLabels(clusterName), - }, - Spec: v1.PodSpec{}, - }, - }, - }, - }, - cluster: &v1beta1.PostgresCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: clusterName, - Namespace: namespace, - UID: types.UID(clusterUID), - }, - Spec: v1beta1.PostgresClusterSpec{ - Backups: v1beta1.Backups{ - PGBackRest: v1beta1.PGBackRestArchive{}, - }, - }, - }, - result: testResult{ - jobCount: 0, pvcCount: 0, hostCount: 0, - sshConfigPresent: false, sshSecretPresent: false, }, }, { - desc: "no repo host defined delete dedicated sts", + desc: "no dedicated repo host defined, dedicated sts not deleted", createResources: []client.Object{ &appsv1.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ - Name: "delete-dedicated-no-repo-host", + Name: "keep-dedicated-two", Namespace: namespace, Labels: naming.PGBackRestDedicatedLabels(clusterName), }, Spec: appsv1.StatefulSetSpec{ Selector: metav1.SetAsLabelSelector( naming.PGBackRestDedicatedLabels(clusterName)), - Template: v1.PodTemplateSpec{ + Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: naming.PGBackRestDedicatedLabels(clusterName), }, - Spec: v1.PodSpec{}, + Spec: corev1.PodSpec{}, }, }, }, @@ -1771,128 +1691,8 @@ func TestGetPGBackRestResources(t *testing.T) { }, }, result: testResult{ - jobCount: 0, pvcCount: 0, hostCount: 0, - sshConfigPresent: false, sshSecretPresent: false, - }, - }, { - desc: "dedicated repo host defined keep ssh configmap", - createResources: []client.Object{ - &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - // cleanup logic is sensitive the name of this resource - Name: "keep-ssh-cm-ssh-config", - Namespace: namespace, - Labels: naming.PGBackRestDedicatedLabels("keep-ssh-cm"), - }, - Data: map[string]string{}, - }, - }, - cluster: &v1beta1.PostgresCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "keep-ssh-cm", - Namespace: namespace, - UID: types.UID(clusterUID), - }, - Spec: v1beta1.PostgresClusterSpec{ - Backups: v1beta1.Backups{ - PGBackRest: v1beta1.PGBackRestArchive{ - Repos: []v1beta1.PGBackRestRepo{{Volume: &v1beta1.RepoPVC{}}}, - }, - }, - }, - }, - result: testResult{ - jobCount: 0, pvcCount: 0, hostCount: 0, - sshConfigPresent: true, sshSecretPresent: false, - }, - }, { - desc: "no repo host defined keep delete configmap", - createResources: []client.Object{ - &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - // cleanup logic is sensitive the name of this resource - Name: "delete-ssh-cm-ssh-config", - Namespace: namespace, - Labels: naming.PGBackRestDedicatedLabels("delete-ssh-cm"), - }, - Data: map[string]string{}, - }, - }, - cluster: &v1beta1.PostgresCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "delete-ssh-cm", - Namespace: namespace, - UID: types.UID(clusterUID), - }, - Spec: v1beta1.PostgresClusterSpec{ - Backups: v1beta1.Backups{ - PGBackRest: v1beta1.PGBackRestArchive{}, - }, - }, - }, - result: testResult{ - jobCount: 0, pvcCount: 0, hostCount: 0, - sshConfigPresent: false, sshSecretPresent: false, - }, - }, { - desc: "dedicated repo host defined keep ssh secret", - createResources: []client.Object{ - &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - // cleanup logic is sensitive the name of this resource - Name: "keep-ssh-secret-ssh", - Namespace: namespace, - Labels: naming.PGBackRestDedicatedLabels("keep-ssh-secret"), - }, - Data: map[string][]byte{}, - }, - }, - cluster: &v1beta1.PostgresCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "keep-ssh-secret", - Namespace: namespace, - UID: types.UID(clusterUID), - }, - Spec: v1beta1.PostgresClusterSpec{ - Backups: v1beta1.Backups{ - PGBackRest: v1beta1.PGBackRestArchive{ - Repos: []v1beta1.PGBackRestRepo{{Volume: &v1beta1.RepoPVC{}}}, - }, - }, - }, - }, - result: testResult{ - jobCount: 0, pvcCount: 0, hostCount: 0, - sshConfigPresent: false, sshSecretPresent: true, - }, - }, { - desc: "no repo host defined keep delete secret", - createResources: []client.Object{ - &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - // cleanup logic is sensitive the name of this resource - Name: "delete-ssh-secret-ssh-secret", - Namespace: namespace, - Labels: naming.PGBackRestDedicatedLabels("delete-ssh-secret"), - }, - Data: map[string][]byte{}, - }, - }, - cluster: &v1beta1.PostgresCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "delete-ssh-secret", - Namespace: namespace, - UID: types.UID(clusterUID), - }, - Spec: v1beta1.PostgresClusterSpec{ - Backups: v1beta1.Backups{ - PGBackRest: v1beta1.PGBackRestArchive{}, - }, - }, - }, - result: testResult{ - jobCount: 0, pvcCount: 0, hostCount: 0, - sshConfigPresent: false, sshSecretPresent: false, + // Host count is 2 due to previous repo host sts not being deleted. + jobCount: 0, pvcCount: 0, hostCount: 2, }, }} @@ -1905,24 +1705,21 @@ func TestGetPGBackRestResources(t *testing.T) { assert.NilError(t, err) assert.NilError(t, tClient.Create(ctx, resource)) - resources, err := r.getPGBackRestResources(ctx, tc.cluster) + resources, err := r.getPGBackRestResources(ctx, tc.cluster, true) assert.NilError(t, err) assert.Assert(t, tc.result.jobCount == len(resources.replicaCreateBackupJobs)) assert.Assert(t, tc.result.hostCount == len(resources.hosts)) assert.Assert(t, tc.result.pvcCount == len(resources.pvcs)) - assert.Assert(t, tc.result.sshConfigPresent == (resources.sshConfig != nil)) - assert.Assert(t, tc.result.sshSecretPresent == (resources.sshSecret != nil)) } }) } } func TestReconcilePostgresClusterDataSource(t *testing.T) { + cfg, tClient := setupKubernetes(t) + require.ParallelCapacity(t, 4) - // setup the test environment and ensure a clean teardown - tEnv, tClient, cfg := setupTestEnv(t, ControllerName) - t.Cleanup(func() { teardownTestEnv(t, tEnv) }) r := &Reconciler{} ctx, cancel := setupManager(t, cfg, func(mgr manager.Manager) { r = &Reconciler{ @@ -1934,14 +1731,12 @@ func TestReconcilePostgresClusterDataSource(t *testing.T) { }) t.Cleanup(func() { teardownManager(cancel, t) }) - ns := &v1.Namespace{} - ns.GenerateName = "postgres-operator-test-" - assert.NilError(t, tClient.Create(ctx, ns)) - t.Cleanup(func() { assert.Check(t, tClient.Delete(ctx, ns)) }) - namespace := ns.Name + namespace := setupNamespace(t, tClient).Name + rootCA, err := pki.NewRootCertificateAuthority() + assert.NilError(t, err) type testResult struct { - jobCount, pvcCount int + configCount, jobCount, pvcCount int invalidSourceRepo, invalidSourceCluster, invalidOptions bool expectedClusterCondition *metav1.Condition } @@ -1963,7 +1758,7 @@ func TestReconcilePostgresClusterDataSource(t *testing.T) { sourceClusterName: "init-source", sourceClusterRepos: []v1beta1.PGBackRestRepo{{Name: "repo1"}}, result: testResult{ - jobCount: 1, pvcCount: 1, + configCount: 1, jobCount: 1, pvcCount: 1, invalidSourceRepo: false, invalidSourceCluster: false, invalidOptions: false, expectedClusterCondition: nil, }, @@ -1976,7 +1771,7 @@ func TestReconcilePostgresClusterDataSource(t *testing.T) { sourceClusterName: "the-right-source", sourceClusterRepos: []v1beta1.PGBackRestRepo{{Name: "repo1"}}, result: testResult{ - jobCount: 0, pvcCount: 0, + configCount: 0, jobCount: 0, pvcCount: 0, invalidSourceRepo: false, invalidSourceCluster: true, invalidOptions: false, expectedClusterCondition: nil, }, @@ -1989,21 +1784,35 @@ func TestReconcilePostgresClusterDataSource(t *testing.T) { sourceClusterName: "invalid-repo", sourceClusterRepos: []v1beta1.PGBackRestRepo{{Name: "repo1"}}, result: testResult{ - jobCount: 0, pvcCount: 0, + configCount: 1, jobCount: 0, pvcCount: 0, invalidSourceRepo: true, invalidSourceCluster: false, invalidOptions: false, expectedClusterCondition: nil, }, }, { - desc: "invalid option: repo", + desc: "invalid option: --repo=", + dataSource: &v1beta1.DataSource{PostgresCluster: &v1beta1.PostgresClusterDataSource{ + ClusterName: "invalid-repo-option-equals", RepoName: "repo1", + Options: []string{"--repo="}, + }}, + clusterBootstrapped: false, + sourceClusterName: "invalid-repo-option-equals", + sourceClusterRepos: []v1beta1.PGBackRestRepo{{Name: "repo1"}}, + result: testResult{ + configCount: 1, jobCount: 0, pvcCount: 1, + invalidSourceRepo: false, invalidSourceCluster: false, invalidOptions: true, + expectedClusterCondition: nil, + }, + }, { + desc: "invalid option: --repo ", dataSource: &v1beta1.DataSource{PostgresCluster: &v1beta1.PostgresClusterDataSource{ - ClusterName: "invalid-repo-option", RepoName: "repo1", - Options: []string{"--repo"}, + ClusterName: "invalid-repo-option-space", RepoName: "repo1", + Options: []string{"--repo "}, }}, clusterBootstrapped: false, - sourceClusterName: "invalid-repo-option", + sourceClusterName: "invalid-repo-option-space", sourceClusterRepos: []v1beta1.PGBackRestRepo{{Name: "repo1"}}, result: testResult{ - jobCount: 0, pvcCount: 1, + configCount: 1, jobCount: 0, pvcCount: 1, invalidSourceRepo: false, invalidSourceCluster: false, invalidOptions: true, expectedClusterCondition: nil, }, @@ -2017,7 +1826,7 @@ func TestReconcilePostgresClusterDataSource(t *testing.T) { sourceClusterName: "invalid-stanza-option", sourceClusterRepos: []v1beta1.PGBackRestRepo{{Name: "repo1"}}, result: testResult{ - jobCount: 0, pvcCount: 1, + configCount: 1, jobCount: 0, pvcCount: 1, invalidSourceRepo: false, invalidSourceCluster: false, invalidOptions: true, expectedClusterCondition: nil, }, @@ -2031,7 +1840,7 @@ func TestReconcilePostgresClusterDataSource(t *testing.T) { sourceClusterName: "invalid-pgpath-option", sourceClusterRepos: []v1beta1.PGBackRestRepo{{Name: "repo1"}}, result: testResult{ - jobCount: 0, pvcCount: 1, + configCount: 1, jobCount: 0, pvcCount: 1, invalidSourceRepo: false, invalidSourceCluster: false, invalidOptions: true, expectedClusterCondition: nil, }, @@ -2044,7 +1853,7 @@ func TestReconcilePostgresClusterDataSource(t *testing.T) { sourceClusterName: "init-cond-missing", sourceClusterRepos: []v1beta1.PGBackRestRepo{{Name: "repo1"}}, result: testResult{ - jobCount: 0, pvcCount: 0, + configCount: 0, jobCount: 0, pvcCount: 0, invalidSourceRepo: false, invalidSourceCluster: false, invalidOptions: false, expectedClusterCondition: &metav1.Condition{ Type: ConditionPostgresDataInitialized, @@ -2062,7 +1871,7 @@ func TestReconcilePostgresClusterDataSource(t *testing.T) { sourceClusterName: "invalid-hash", sourceClusterRepos: []v1beta1.PGBackRestRepo{{Name: "repo1"}}, result: testResult{ - jobCount: 0, pvcCount: 0, + configCount: 0, jobCount: 0, pvcCount: 0, invalidSourceRepo: false, invalidSourceCluster: false, invalidOptions: false, expectedClusterCondition: nil, }, @@ -2080,115 +1889,843 @@ func TestReconcilePostgresClusterDataSource(t *testing.T) { } clusterUID := "hippouid" + strconv.Itoa(i) - cluster := fakePostgresCluster(clusterName, namespace, clusterUID, dedicated) - cluster.Spec.DataSource = tc.dataSource - assert.NilError(t, tClient.Create(ctx, cluster)) - if tc.clusterBootstrapped { - cluster.Status.Patroni = &v1beta1.PatroniStatus{ - SystemIdentifier: "123456789", - } - } - cluster.Status.StartupInstance = "testinstance" - cluster.Status.StartupInstanceSet = "instance1" - assert.NilError(t, tClient.Status().Update(ctx, cluster)) - if !dedicated { - tc.sourceClusterName = tc.sourceClusterName + "-no-repo" - } - sourceCluster := fakePostgresCluster(tc.sourceClusterName, namespace, - "source"+clusterUID, dedicated) - sourceCluster.Spec.Backups.PGBackRest.Repos = tc.sourceClusterRepos - assert.NilError(t, tClient.Create(ctx, sourceCluster)) + cluster := fakePostgresCluster(clusterName, namespace, clusterUID, dedicated) + cluster.Spec.DataSource = tc.dataSource + assert.NilError(t, tClient.Create(ctx, cluster)) + if tc.clusterBootstrapped { + cluster.Status.Patroni = v1beta1.PatroniStatus{ + SystemIdentifier: "123456789", + } + } + cluster.Status.StartupInstance = "testinstance" + cluster.Status.StartupInstanceSet = "instance1" + assert.NilError(t, tClient.Status().Update(ctx, cluster)) + if !dedicated { + tc.sourceClusterName = tc.sourceClusterName + "-no-repo" + } + sourceCluster := fakePostgresCluster(tc.sourceClusterName, namespace, + "source"+clusterUID, dedicated) + sourceCluster.Spec.Backups.PGBackRest.Repos = tc.sourceClusterRepos + assert.NilError(t, tClient.Create(ctx, sourceCluster)) + + sourceClusterConfig := &corev1.ConfigMap{ + ObjectMeta: naming.PGBackRestConfig(sourceCluster), + Data: map[string]string{ + "pgbackrest_instance.conf": "source-stuff", + }, + } + assert.NilError(t, tClient.Create(ctx, sourceClusterConfig)) + + sourceClusterPrimary := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "primary-" + tc.sourceClusterName, + Namespace: namespace, + Labels: map[string]string{ + naming.LabelCluster: tc.sourceClusterName, + naming.LabelInstanceSet: "test", + naming.LabelInstance: "test-abcd", + naming.LabelRole: naming.RolePatroniLeader, + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "test", + Image: "test", + Command: []string{"test"}, + }}, + }, + } + assert.NilError(t, tClient.Create(ctx, sourceClusterPrimary)) + + var pgclusterDataSource *v1beta1.PostgresClusterDataSource + if tc.dataSource != nil { + pgclusterDataSource = tc.dataSource.PostgresCluster + } + err := r.reconcilePostgresClusterDataSource(ctx, cluster, pgclusterDataSource, + "testhash", nil, rootCA, true) + assert.NilError(t, err) + + restoreConfig := &corev1.ConfigMap{} + err = tClient.Get(ctx, + naming.AsObjectKey(naming.PGBackRestConfig(cluster)), restoreConfig) + + if tc.result.configCount == 0 { + assert.Assert(t, apierrors.IsNotFound(err), "expected NotFound, got %#v", err) + } else { + assert.NilError(t, err) + assert.DeepEqual(t, restoreConfig.Data, sourceClusterConfig.Data) + } + + restoreJobs := &batchv1.JobList{} + assert.NilError(t, tClient.List(ctx, restoreJobs, &client.ListOptions{ + LabelSelector: naming.PGBackRestRestoreJobSelector(clusterName), + Namespace: cluster.Namespace, + })) + assert.Assert(t, tc.result.jobCount == len(restoreJobs.Items)) + if len(restoreJobs.Items) == 1 { + assert.Assert(t, restoreJobs.Items[0].Labels[naming.LabelStartupInstance] != "") + assert.Assert(t, restoreJobs.Items[0].Annotations[naming.PGBackRestConfigHash] != "") + } + + dataPVCs := &corev1.PersistentVolumeClaimList{} + selector, err := naming.AsSelector(naming.Cluster(cluster.Name)) + assert.NilError(t, err) + dataRoleReq, err := labels.NewRequirement(naming.LabelRole, selection.Equals, + []string{naming.RolePostgresData}) + assert.NilError(t, err) + selector.Add(*dataRoleReq) + assert.NilError(t, tClient.List(ctx, dataPVCs, &client.ListOptions{ + LabelSelector: selector, + Namespace: cluster.Namespace, + })) + + assert.Assert(t, tc.result.pvcCount == len(dataPVCs.Items)) + + if tc.result.expectedClusterCondition != nil { + condition := meta.FindStatusCondition(cluster.Status.Conditions, + tc.result.expectedClusterCondition.Type) + if assert.Check(t, condition != nil) { + assert.Equal(t, tc.result.expectedClusterCondition.Status, condition.Status) + assert.Equal(t, tc.result.expectedClusterCondition.Reason, condition.Reason) + assert.Equal(t, tc.result.expectedClusterCondition.Message, condition.Message) + } + } + + if tc.result.invalidSourceCluster || tc.result.invalidSourceRepo || + tc.result.invalidOptions { + assert.Check(t, wait.PollUntilContextTimeout(ctx, time.Second/2, Scale(time.Second*2), false, + func(ctx context.Context) (bool, error) { + events := &corev1.EventList{} + err := tClient.List(ctx, events, &client.MatchingFields{ + "involvedObject.kind": "PostgresCluster", + "involvedObject.name": clusterName, + "involvedObject.namespace": namespace, + "reason": "InvalidDataSource", + }) + return len(events.Items) == 1, err + })) + } + }) + } + } +} + +func TestReconcileCloudBasedDataSource(t *testing.T) { + cfg, tClient := setupKubernetes(t) + require.ParallelCapacity(t, 4) + + r := &Reconciler{} + ctx, cancel := setupManager(t, cfg, func(mgr manager.Manager) { + r = &Reconciler{ + Client: tClient, + Recorder: mgr.GetEventRecorderFor(ControllerName), + Tracer: otel.Tracer(ControllerName), + Owner: ControllerName, + } + }) + t.Cleanup(func() { teardownManager(cancel, t) }) + + namespace := setupNamespace(t, tClient).Name + + type testResult struct { + configCount, jobCount, pvcCount int + conf string + expectedClusterCondition *metav1.Condition + } + + for _, dedicated := range []bool{true, false} { + testCases := []struct { + desc string + dataSource *v1beta1.DataSource + clusterBootstrapped bool + result testResult + }{{ + desc: "initial reconcile", + dataSource: &v1beta1.DataSource{PGBackRest: &v1beta1.PGBackRestDataSource{ + Stanza: "db", + Repo: v1beta1.PGBackRestRepo{ + Name: "repo1", + }, + }}, + clusterBootstrapped: false, + result: testResult{ + configCount: 1, jobCount: 1, pvcCount: 1, + expectedClusterCondition: nil, + conf: "|\n # Generated by postgres-operator. DO NOT EDIT.\n # Your changes will not be saved.\n\n [global]\n archive-async = y\n log-path = /pgdata/pgbackrest/log\n repo1-path = /pgbackrest/repo1\n spool-path = /pgdata/pgbackrest-spool\n\n [db]\n pg1-path = /pgdata/pg13\n pg1-port = 5432\n pg1-socket-path = /tmp/postgres\n", + }, + }, { + desc: "global/configuration set", + dataSource: &v1beta1.DataSource{PGBackRest: &v1beta1.PGBackRestDataSource{ + Stanza: "db", + Repo: v1beta1.PGBackRestRepo{ + Name: "repo1", + }, + Global: map[string]string{ + "repo1-path": "elephant", + }, + }}, + clusterBootstrapped: false, + result: testResult{ + configCount: 1, jobCount: 1, pvcCount: 1, + expectedClusterCondition: nil, + conf: "|\n # Generated by postgres-operator. DO NOT EDIT.\n # Your changes will not be saved.\n\n [global]\n archive-async = y\n log-path = /pgdata/pgbackrest/log\n repo1-path = elephant\n spool-path = /pgdata/pgbackrest-spool\n\n [db]\n pg1-path = /pgdata/pg13\n pg1-port = 5432\n pg1-socket-path = /tmp/postgres\n", + }, + }, { + desc: "invalid option: stanza", + dataSource: &v1beta1.DataSource{PGBackRest: &v1beta1.PGBackRestDataSource{ + Stanza: "db", + Repo: v1beta1.PGBackRestRepo{ + Name: "repo1", + }, + Options: []string{"--stanza"}, + }}, + clusterBootstrapped: false, + result: testResult{ + configCount: 1, jobCount: 0, pvcCount: 1, + expectedClusterCondition: nil, + conf: "|\n # Generated by postgres-operator. DO NOT EDIT.\n # Your changes will not be saved.\n\n [global]\n archive-async = y\n log-path = /pgdata/pgbackrest/log\n repo1-path = /pgbackrest/repo1\n spool-path = /pgdata/pgbackrest-spool\n\n [db]\n pg1-path = /pgdata/pg13\n pg1-port = 5432\n pg1-socket-path = /tmp/postgres\n", + }, + }, { + desc: "cluster bootstrapped init condition missing", + dataSource: &v1beta1.DataSource{PGBackRest: &v1beta1.PGBackRestDataSource{ + Stanza: "db", + Repo: v1beta1.PGBackRestRepo{ + Name: "repo1", + }, + }}, + clusterBootstrapped: true, + result: testResult{ + configCount: 0, jobCount: 0, pvcCount: 0, + expectedClusterCondition: &metav1.Condition{ + Type: ConditionPostgresDataInitialized, + Status: metav1.ConditionTrue, + Reason: "ClusterAlreadyBootstrapped", + Message: "The cluster is already bootstrapped", + }, + conf: "|\n # Generated by postgres-operator. DO NOT EDIT.\n # Your changes will not be saved.\n\n [global]\n archive-async = y\n log-path = /pgdata/pgbackrest/log\n repo1-path = /pgbackrest/repo1\n spool-path = /pgdata/pgbackrest-spool\n\n [db]\n pg1-path = /pgdata/pg13\n pg1-port = 5432\n pg1-socket-path = /tmp/postgres\n", + }, + }} + + for i, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + + clusterName := "hippocluster-" + strconv.Itoa(i) + if !dedicated { + clusterName = clusterName + "-no-repo" + } + clusterUID := "hippouid" + strconv.Itoa(i) + + cluster := fakePostgresCluster(clusterName, namespace, clusterUID, dedicated) + cluster.Spec.DataSource = tc.dataSource + assert.NilError(t, tClient.Create(ctx, cluster)) + if tc.clusterBootstrapped { + cluster.Status.Patroni = v1beta1.PatroniStatus{ + SystemIdentifier: "123456789", + } + } + cluster.Status.StartupInstance = "testinstance" + cluster.Status.StartupInstanceSet = "instance1" + assert.NilError(t, tClient.Status().Update(ctx, cluster)) + + var pgclusterDataSource *v1beta1.PGBackRestDataSource + if tc.dataSource != nil { + pgclusterDataSource = tc.dataSource.PGBackRest + } + err := r.reconcileCloudBasedDataSource(ctx, + cluster, + pgclusterDataSource, + "testhash", + nil, + ) + assert.NilError(t, err) + + restoreConfig := &corev1.ConfigMap{} + err = tClient.Get(ctx, + naming.AsObjectKey(naming.PGBackRestConfig(cluster)), restoreConfig) + + if tc.result.configCount == 0 { + assert.Assert(t, apierrors.IsNotFound(err), "expected NotFound, got %#v", err) + } else { + assert.NilError(t, err) + assert.Assert(t, cmp.MarshalMatches(restoreConfig.Data["pgbackrest_instance.conf"], tc.result.conf)) + } + + restoreJobs := &batchv1.JobList{} + assert.NilError(t, tClient.List(ctx, restoreJobs, &client.ListOptions{ + LabelSelector: naming.PGBackRestRestoreJobSelector(clusterName), + Namespace: cluster.Namespace, + })) + assert.Assert(t, tc.result.jobCount == len(restoreJobs.Items)) + if len(restoreJobs.Items) == 1 { + assert.Assert(t, restoreJobs.Items[0].Labels[naming.LabelStartupInstance] != "") + assert.Assert(t, restoreJobs.Items[0].Annotations[naming.PGBackRestConfigHash] != "") + } + + dataPVCs := &corev1.PersistentVolumeClaimList{} + selector, err := naming.AsSelector(naming.Cluster(cluster.Name)) + assert.NilError(t, err) + dataRoleReq, err := labels.NewRequirement(naming.LabelRole, selection.Equals, + []string{naming.RolePostgresData}) + assert.NilError(t, err) + selector.Add(*dataRoleReq) + assert.NilError(t, tClient.List(ctx, dataPVCs, &client.ListOptions{ + LabelSelector: selector, + Namespace: cluster.Namespace, + })) + + assert.Assert(t, tc.result.pvcCount == len(dataPVCs.Items)) + + if tc.result.expectedClusterCondition != nil { + condition := meta.FindStatusCondition(cluster.Status.Conditions, + tc.result.expectedClusterCondition.Type) + if assert.Check(t, condition != nil) { + assert.Equal(t, tc.result.expectedClusterCondition.Status, condition.Status) + assert.Equal(t, tc.result.expectedClusterCondition.Reason, condition.Reason) + assert.Equal(t, tc.result.expectedClusterCondition.Message, condition.Message) + } + } + }) + } + } +} + +func TestCopyConfigurationResources(t *testing.T) { + _, tClient := setupKubernetes(t) + ctx := context.Background() + require.ParallelCapacity(t, 2) + + r := &Reconciler{Client: tClient, Owner: client.FieldOwner(t.Name())} + + ns1 := setupNamespace(t, tClient) + ns2 := setupNamespace(t, tClient) + + secret := func(testNum string) *corev1.Secret { + return &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "source-secret" + testNum, + Namespace: ns1.Name, + }, + } + } + + configMap := func(testNum string) *corev1.ConfigMap { + return &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "source-configmap" + testNum, + Namespace: ns1.Name, + }, + } + } + + clusterUID := "hippouid" + + sourceCluster := func(testNum string) *v1beta1.PostgresCluster { + return &v1beta1.PostgresCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "source-cluster" + testNum, + Namespace: ns1.Name, + UID: types.UID(clusterUID), + }, + Spec: v1beta1.PostgresClusterSpec{ + PostgresVersion: 13, + Image: "example.com/crunchy-postgres-ha:test", + InstanceSets: []v1beta1.PostgresInstanceSetSpec{{ + Name: "instance1", + DataVolumeClaimSpec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteMany}, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1Gi"), + }, + }, + }, + }}, + Backups: v1beta1.Backups{ + PGBackRest: v1beta1.PGBackRestArchive{ + Configuration: []corev1.VolumeProjection{{ + Secret: &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "source-secret" + testNum, + }, + }}, { + ConfigMap: &corev1.ConfigMapProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "source-configmap" + testNum, + }, + }}, + }, + Image: "example.com/crunchy-pgbackrest:test", + Repos: []v1beta1.PGBackRestRepo{{ + Name: "repo1", + }}, + }, + }, + }, + } + } + + cluster := func(testNum, scName, scNamespace string) *v1beta1.PostgresCluster { + return &v1beta1.PostgresCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "new-cluster" + testNum, + Namespace: ns2.Name, + UID: types.UID(clusterUID), + }, + Spec: v1beta1.PostgresClusterSpec{ + PostgresVersion: 13, + Image: "example.com/crunchy-postgres-ha:test", + DataSource: &v1beta1.DataSource{ + PostgresCluster: &v1beta1.PostgresClusterDataSource{ + ClusterName: scName, + ClusterNamespace: scNamespace, + RepoName: "repo1", + }, + }, + InstanceSets: []v1beta1.PostgresInstanceSetSpec{{ + Name: "instance1", + DataVolumeClaimSpec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteMany}, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1Gi"), + }, + }, + }, + }}, + Backups: v1beta1.Backups{ + PGBackRest: v1beta1.PGBackRestArchive{ + Image: "example.com/crunchy-pgbackrest:test", + Repos: []v1beta1.PGBackRestRepo{{ + Name: "repo1", + }}, + }, + }, + }, + } + } + + checkSecret := func(secretName, nsName string) error { + secretCopy := &corev1.Secret{} + err := tClient.Get(ctx, types.NamespacedName{ + Name: secretName, + Namespace: nsName, + }, secretCopy) + return err + } + + checkConfigMap := func(configMapName, nsName string) error { + configMapCopy := &corev1.ConfigMap{} + err := tClient.Get(ctx, types.NamespacedName{ + Name: configMapName, + Namespace: nsName, + }, configMapCopy) + return err + } + + t.Run("No Secret or ConfigMap", func(t *testing.T) { + sc := sourceCluster("0") + + assert.Check(t, apierrors.IsNotFound( + r.copyConfigurationResources(ctx, cluster("0", sc.Name, sc.Namespace), sc))) + }) + t.Run("Only Secret", func(t *testing.T) { + secret := secret("1") + if err := tClient.Create(ctx, secret); err != nil { + t.Fatal(err) + } + assert.NilError(t, checkSecret(secret.Name, ns1.Name)) + + sc := sourceCluster("1") + + assert.Check(t, apierrors.IsNotFound( + r.copyConfigurationResources(ctx, cluster("1", sc.Name, sc.Namespace), sc))) + }) + t.Run("Only ConfigMap", func(t *testing.T) { + configMap := configMap("2") + if err := tClient.Create(ctx, configMap); err != nil { + t.Fatal(err) + } + assert.NilError(t, checkConfigMap(configMap.Name, ns1.Name)) + + sc := sourceCluster("2") + + assert.Check(t, apierrors.IsNotFound( + r.copyConfigurationResources(ctx, cluster("2", sc.Name, sc.Namespace), sc))) + }) + t.Run("Secret and ConfigMap, neither optional", func(t *testing.T) { + secret := secret("3") + if err := tClient.Create(ctx, secret); err != nil { + t.Fatal(err) + } + assert.NilError(t, checkSecret(secret.Name, ns1.Name)) + + configMap := configMap("3") + if err := tClient.Create(ctx, configMap); err != nil { + t.Fatal(err) + } + assert.NilError(t, checkConfigMap(configMap.Name, ns1.Name)) + + sc := sourceCluster("3") + nc := cluster("3", sc.Name, sc.Namespace) + if err := tClient.Create(ctx, nc); err != nil { + t.Fatal(err) + } + + assert.NilError(t, r.copyConfigurationResources(ctx, nc, sc)) + + assert.NilError(t, checkSecret(secret.Name+"-restorecopy-0", ns2.Name)) + assert.NilError(t, checkConfigMap(configMap.Name+"-restorecopy-1", ns2.Name)) + }) + t.Run("Secret and ConfigMap configured, Secret missing but optional", func(t *testing.T) { + secret := secret("4") + configMap := configMap("4") + if err := tClient.Create(ctx, configMap); err != nil { + t.Fatal(err) + } + assert.NilError(t, checkConfigMap(configMap.Name, ns1.Name)) + + sc := sourceCluster("4") + sc.Spec.Backups.PGBackRest.Configuration[0].Secret.Optional = initialize.Bool(true) + + nc := cluster("4", sc.Name, sc.Namespace) + if err := tClient.Create(ctx, nc); err != nil { + t.Fatal(err) + } + + assert.NilError(t, r.copyConfigurationResources(ctx, nc, sc)) + + assert.Check(t, apierrors.IsNotFound(checkSecret(secret.Name+"-restorecopy-0", ns2.Name))) + assert.NilError(t, checkConfigMap(configMap.Name+"-restorecopy-1", ns2.Name)) + }) + t.Run("Secret and ConfigMap configured, ConfigMap missing but optional", func(t *testing.T) { + secret := secret("5") + configMap := configMap("5") + if err := tClient.Create(ctx, secret); err != nil { + t.Fatal(err) + } + assert.NilError(t, checkSecret(secret.Name, ns1.Name)) + + sc := sourceCluster("5") + sc.Spec.Backups.PGBackRest.Configuration[1].ConfigMap.Optional = initialize.Bool(true) + + nc := cluster("5", sc.Name, sc.Namespace) + if err := tClient.Create(ctx, nc); err != nil { + t.Fatal(err) + } + + assert.NilError(t, r.copyConfigurationResources(ctx, nc, sc)) + + assert.NilError(t, checkSecret(secret.Name+"-restorecopy-0", ns2.Name)) + assert.Check(t, apierrors.IsNotFound(checkConfigMap(configMap.Name+"-restorecopy-1", ns2.Name))) + }) + t.Run("Secret and ConfigMap configured, both optional", func(t *testing.T) { + secret := secret("6") + configMap := configMap("6") + sc := sourceCluster("6") + sc.Spec.Backups.PGBackRest.Configuration[0].Secret.Optional = initialize.Bool(true) + sc.Spec.Backups.PGBackRest.Configuration[1].ConfigMap.Optional = initialize.Bool(true) + + nc := cluster("6", sc.Name, sc.Namespace) + if err := tClient.Create(ctx, nc); err != nil { + t.Fatal(err) + } + + assert.NilError(t, r.copyConfigurationResources(ctx, nc, sc)) + + assert.Assert(t, apierrors.IsNotFound(checkSecret(secret.Name+"-restorecopy-0", ns2.Name))) + assert.Assert(t, apierrors.IsNotFound(checkConfigMap(configMap.Name+"-restorecopy-1", ns2.Name))) + }) +} + +func TestGenerateBackupJobIntent(t *testing.T) { + ctx := context.Background() + t.Run("empty", func(t *testing.T) { + spec := generateBackupJobSpecIntent(ctx, + &v1beta1.PostgresCluster{}, v1beta1.PGBackRestRepo{}, + "", + nil, nil, + ) + assert.Assert(t, cmp.MarshalMatches(spec.Template.Spec, ` +containers: +- command: + - /opt/crunchy/bin/pgbackrest + env: + - name: COMMAND + value: backup + - name: COMMAND_OPTS + value: --stanza=db --repo= + - name: COMPARE_HASH + value: "true" + - name: CONTAINER + value: pgbackrest + - name: NAMESPACE + - name: SELECTOR + value: postgres-operator.crunchydata.com/cluster=,postgres-operator.crunchydata.com/pgbackrest=,postgres-operator.crunchydata.com/pgbackrest-dedicated= + name: pgbackrest + resources: {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /etc/pgbackrest/conf.d + name: pgbackrest-config + readOnly: true +enableServiceLinks: false +restartPolicy: Never +securityContext: + fsGroupChangePolicy: OnRootMismatch +volumes: +- name: pgbackrest-config + projected: + sources: + - configMap: + items: + - key: pgbackrest_repo.conf + path: pgbackrest_repo.conf + - key: config-hash + path: config-hash + - key: pgbackrest-server.conf + path: ~postgres-operator_server.conf + name: -pgbackrest-config + - secret: + items: + - key: pgbackrest.ca-roots + path: ~postgres-operator/tls-ca.crt + - key: pgbackrest-client.crt + path: ~postgres-operator/client-tls.crt + - key: pgbackrest-client.key + mode: 384 + path: ~postgres-operator/client-tls.key + name: -pgbackrest + `)) + }) + + t.Run("ImagePullPolicy", func(t *testing.T) { + cluster := &v1beta1.PostgresCluster{ + Spec: v1beta1.PostgresClusterSpec{ + ImagePullPolicy: corev1.PullAlways, + }, + } + job := generateBackupJobSpecIntent(ctx, + cluster, v1beta1.PGBackRestRepo{}, + "", + nil, nil, + ) + assert.Equal(t, job.Template.Spec.Containers[0].ImagePullPolicy, corev1.PullAlways) + }) + + t.Run("Resources", func(t *testing.T) { + cluster := &v1beta1.PostgresCluster{} + + t.Run("Resources not defined in jobs", func(t *testing.T) { + cluster.Spec.Backups = v1beta1.Backups{ + PGBackRest: v1beta1.PGBackRestArchive{}, + } + job := generateBackupJobSpecIntent(ctx, + cluster, v1beta1.PGBackRestRepo{}, + "", + nil, nil, + ) + assert.DeepEqual(t, job.Template.Spec.Containers[0].Resources, + corev1.ResourceRequirements{}) + }) + + t.Run("Resources defined", func(t *testing.T) { + cluster.Spec.Backups.PGBackRest.Jobs = &v1beta1.BackupJobs{ + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1m"), + }, + }, + } + job := generateBackupJobSpecIntent(ctx, + cluster, v1beta1.PGBackRestRepo{}, + "", + nil, nil, + ) + assert.DeepEqual(t, job.Template.Spec.Containers[0].Resources, + corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1m"), + }}, + ) + }) + }) + + t.Run("Affinity", func(t *testing.T) { + affinity := &corev1.Affinity{ + NodeAffinity: &corev1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ + NodeSelectorTerms: []corev1.NodeSelectorTerm{{ + MatchExpressions: []corev1.NodeSelectorRequirement{{ + Key: "key", + Operator: "Exist", + }}, + }}, + }, + }, + } + + cluster := &v1beta1.PostgresCluster{ + Spec: v1beta1.PostgresClusterSpec{ + Backups: v1beta1.Backups{ + PGBackRest: v1beta1.PGBackRestArchive{ + Jobs: &v1beta1.BackupJobs{ + Affinity: affinity, + }, + }, + }, + }, + } + job := generateBackupJobSpecIntent(ctx, + cluster, v1beta1.PGBackRestRepo{}, + "", + nil, nil, + ) + assert.Equal(t, job.Template.Spec.Affinity, affinity) + }) + + t.Run("PriorityClassName", func(t *testing.T) { + cluster := &v1beta1.PostgresCluster{} + cluster.Spec.Backups.PGBackRest.Jobs = &v1beta1.BackupJobs{ + PriorityClassName: initialize.String("some-priority-class"), + } + job := generateBackupJobSpecIntent(ctx, + cluster, v1beta1.PGBackRestRepo{}, + "", + nil, nil, + ) + assert.Equal(t, job.Template.Spec.PriorityClassName, "some-priority-class") + }) + + t.Run("Tolerations", func(t *testing.T) { + tolerations := []corev1.Toleration{{ + Key: "key", + Operator: "Exist", + }} + + cluster := &v1beta1.PostgresCluster{} + cluster.Spec.Backups.PGBackRest.Jobs = &v1beta1.BackupJobs{ + Tolerations: tolerations, + } + job := generateBackupJobSpecIntent(ctx, + cluster, v1beta1.PGBackRestRepo{}, + "", + nil, nil, + ) + assert.DeepEqual(t, job.Template.Spec.Tolerations, tolerations) + }) + + t.Run("TTLSecondsAfterFinished", func(t *testing.T) { + cluster := &v1beta1.PostgresCluster{} + + t.Run("Undefined", func(t *testing.T) { + cluster.Spec.Backups.PGBackRest.Jobs = nil + + spec := generateBackupJobSpecIntent(ctx, + cluster, v1beta1.PGBackRestRepo{}, "", nil, nil, + ) + assert.Assert(t, spec.TTLSecondsAfterFinished == nil) + + cluster.Spec.Backups.PGBackRest.Jobs = &v1beta1.BackupJobs{} + + spec = generateBackupJobSpecIntent(ctx, + cluster, v1beta1.PGBackRestRepo{}, "", nil, nil, + ) + assert.Assert(t, spec.TTLSecondsAfterFinished == nil) + }) + + t.Run("Zero", func(t *testing.T) { + cluster.Spec.Backups.PGBackRest.Jobs = &v1beta1.BackupJobs{ + TTLSecondsAfterFinished: initialize.Int32(0), + } + + spec := generateBackupJobSpecIntent(ctx, + cluster, v1beta1.PGBackRestRepo{}, "", nil, nil, + ) + if assert.Check(t, spec.TTLSecondsAfterFinished != nil) { + assert.Equal(t, *spec.TTLSecondsAfterFinished, int32(0)) + } + }) + + t.Run("Positive", func(t *testing.T) { + cluster.Spec.Backups.PGBackRest.Jobs = &v1beta1.BackupJobs{ + TTLSecondsAfterFinished: initialize.Int32(100), + } + + spec := generateBackupJobSpecIntent(ctx, + cluster, v1beta1.PGBackRestRepo{}, "", nil, nil, + ) + if assert.Check(t, spec.TTLSecondsAfterFinished != nil) { + assert.Equal(t, *spec.TTLSecondsAfterFinished, int32(100)) + } + }) + }) +} + +func TestGenerateRepoHostIntent(t *testing.T) { + _, cc := setupKubernetes(t) + require.ParallelCapacity(t, 0) - sourceClusterPrimary := &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "primary-" + tc.sourceClusterName, - Namespace: namespace, - Labels: map[string]string{ - naming.LabelCluster: tc.sourceClusterName, - naming.LabelInstanceSet: "test", - naming.LabelInstance: "test-abcd", - naming.LabelRole: naming.RolePatroniLeader, - }, - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{{ - Name: "test", - Image: "test", - Command: []string{"test"}, - }}, - }, - } - assert.NilError(t, tClient.Create(ctx, sourceClusterPrimary)) + ctx := context.Background() + r := Reconciler{Client: cc} - var pgclusterDataSource *v1beta1.PostgresClusterDataSource - if tc.dataSource != nil { - pgclusterDataSource = tc.dataSource.PostgresCluster - } - err := r.reconcilePostgresClusterDataSource(ctx, cluster, pgclusterDataSource, - "testhash", nil) - assert.NilError(t, err) + t.Run("empty", func(t *testing.T) { + _, err := r.generateRepoHostIntent(ctx, &v1beta1.PostgresCluster{}, "", &RepoResources{}, + &observedInstances{}) + assert.NilError(t, err) + }) - restoreJobs := &batchv1.JobList{} - assert.NilError(t, tClient.List(ctx, restoreJobs, &client.ListOptions{ - LabelSelector: naming.PGBackRestRestoreJobSelector(clusterName), - })) - assert.Assert(t, tc.result.jobCount == len(restoreJobs.Items)) - if len(restoreJobs.Items) == 1 { - assert.Assert(t, restoreJobs.Items[0].Labels[naming.LabelStartupInstance] != "") - assert.Assert(t, restoreJobs.Items[0].Annotations[naming.PGBackRestConfigHash] != "") - } + cluster := &v1beta1.PostgresCluster{} + sts, err := r.generateRepoHostIntent(ctx, cluster, "", &RepoResources{}, &observedInstances{}) + assert.NilError(t, err) - dataPVCs := &v1.PersistentVolumeClaimList{} - selector, err := naming.AsSelector(naming.Cluster(cluster.Name)) - assert.NilError(t, err) - dataRoleReq, err := labels.NewRequirement(naming.LabelRole, selection.Equals, - []string{naming.RolePostgresData}) - assert.NilError(t, err) - selector.Add(*dataRoleReq) - assert.NilError(t, tClient.List(ctx, dataPVCs, &client.ListOptions{ - LabelSelector: selector, - })) + t.Run("ServiceAccount", func(t *testing.T) { + assert.Equal(t, sts.Spec.Template.Spec.ServiceAccountName, "") + if assert.Check(t, sts.Spec.Template.Spec.AutomountServiceAccountToken != nil) { + assert.Equal(t, *sts.Spec.Template.Spec.AutomountServiceAccountToken, false) + } + }) - assert.Assert(t, tc.result.pvcCount == len(dataPVCs.Items)) + t.Run("Replicas", func(t *testing.T) { + assert.Equal(t, *sts.Spec.Replicas, int32(1)) + }) - if tc.result.expectedClusterCondition != nil { - condition := meta.FindStatusCondition(cluster.Status.Conditions, - tc.result.expectedClusterCondition.Type) - if assert.Check(t, condition != nil) { - assert.Equal(t, tc.result.expectedClusterCondition.Status, condition.Status) - assert.Equal(t, tc.result.expectedClusterCondition.Reason, condition.Reason) - assert.Equal(t, tc.result.expectedClusterCondition.Message, condition.Message) - } - } + t.Run("PG instances observed, do not shutdown repo host", func(t *testing.T) { + cluster := &v1beta1.PostgresCluster{ + Spec: v1beta1.PostgresClusterSpec{ + Shutdown: initialize.Bool(true), + }, + } + observed := &observedInstances{forCluster: []*Instance{{Pods: []*corev1.Pod{{}}}}} + sts, err := r.generateRepoHostIntent(ctx, cluster, "", &RepoResources{}, observed) + assert.NilError(t, err) + assert.Equal(t, *sts.Spec.Replicas, int32(1)) + }) - if tc.result.invalidSourceCluster || tc.result.invalidSourceRepo || - tc.result.invalidOptions { - events := &corev1.EventList{} - if err := wait.Poll(time.Second/2, Scale(time.Second*2), func() (bool, error) { - if err := tClient.List(ctx, events, &client.MatchingFields{ - "involvedObject.kind": "PostgresCluster", - "involvedObject.name": clusterName, - "involvedObject.namespace": namespace, - "reason": "InvalidDataSource", - }); err != nil { - return false, err - } - if len(events.Items) != 1 { - return false, nil - } - return true, nil - }); err != nil { - t.Error(err) - } - } - }) + t.Run("No PG instances observed, shutdown repo host", func(t *testing.T) { + cluster := &v1beta1.PostgresCluster{ + Spec: v1beta1.PostgresClusterSpec{ + Shutdown: initialize.Bool(true), + }, } - } + observed := &observedInstances{forCluster: []*Instance{{}}} + sts, err := r.generateRepoHostIntent(ctx, cluster, "", &RepoResources{}, observed) + assert.NilError(t, err) + assert.Equal(t, *sts.Spec.Replicas, int32(0)) + }) } func TestGenerateRestoreJobIntent(t *testing.T) { - env, cc, _ := setupTestEnv(t, ControllerName) - t.Cleanup(func() { teardownTestEnv(t, env) }) + _, cc := setupKubernetes(t) + require.ParallelCapacity(t, 0) r := Reconciler{ Client: cc, @@ -2234,8 +2771,12 @@ func TestGenerateRestoreJobIntent(t *testing.T) { Key: "key", Operator: "Exist", }}, + PriorityClassName: initialize.String("some-priority-class"), } cluster := &v1beta1.PostgresCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, Spec: v1beta1.PostgresClusterSpec{ Metadata: &v1beta1.Metadata{ Labels: map[string]string{"Global": "test"}, @@ -2249,6 +2790,7 @@ func TestGenerateRestoreJobIntent(t *testing.T) { }}, Image: "image", ImagePullSecrets: []corev1.LocalObjectReference{{Name: "Secret"}}, + ImagePullPolicy: corev1.PullAlways, }, } @@ -2311,6 +2853,8 @@ func TestGenerateRestoreJobIntent(t *testing.T) { t.Run("Image", func(t *testing.T) { assert.Equal(t, job.Spec.Template.Spec.Containers[0].Image, "image") + assert.Equal(t, job.Spec.Template.Spec.Containers[0].ImagePullPolicy, + corev1.PullAlways) }) t.Run("Name", func(t *testing.T) { assert.Equal(t, job.Spec.Template.Spec.Containers[0].Name, @@ -2353,6 +2897,10 @@ func TestGenerateRestoreJobIntent(t *testing.T) { assert.DeepEqual(t, job.Spec.Template.Spec.Tolerations, dataSource.Tolerations) }) + t.Run("Pod Priority Class", func(t *testing.T) { + assert.DeepEqual(t, job.Spec.Template.Spec.PriorityClassName, + "some-priority-class") + }) t.Run("ImagePullSecret", func(t *testing.T) { assert.DeepEqual(t, job.Spec.Template.Spec.ImagePullSecrets, []corev1.LocalObjectReference{{ @@ -2362,6 +2910,17 @@ func TestGenerateRestoreJobIntent(t *testing.T) { t.Run("PodSecurityContext", func(t *testing.T) { assert.Assert(t, job.Spec.Template.Spec.SecurityContext != nil) }) + t.Run("EnableServiceLinks", func(t *testing.T) { + if assert.Check(t, job.Spec.Template.Spec.EnableServiceLinks != nil) { + assert.Equal(t, *job.Spec.Template.Spec.EnableServiceLinks, false) + } + }) + t.Run("ServiceAccount", func(t *testing.T) { + assert.Equal(t, job.Spec.Template.Spec.ServiceAccountName, "test-instance") + if assert.Check(t, job.Spec.Template.Spec.AutomountServiceAccountToken != nil) { + assert.Equal(t, *job.Spec.Template.Spec.AutomountServiceAccountToken, false) + } + }) }) }) }) @@ -2370,26 +2929,12 @@ func TestGenerateRestoreJobIntent(t *testing.T) { } func TestObserveRestoreEnv(t *testing.T) { + ctx := context.Background() + _, tClient := setupKubernetes(t) + require.ParallelCapacity(t, 1) - // setup the test environment and ensure a clean teardown - tEnv, tClient, cfg := setupTestEnv(t, ControllerName) - t.Cleanup(func() { teardownTestEnv(t, tEnv) }) - r := &Reconciler{} - ctx, cancel := setupManager(t, cfg, func(mgr manager.Manager) { - r = &Reconciler{ - Client: tClient, - Recorder: mgr.GetEventRecorderFor(ControllerName), - Tracer: otel.Tracer(ControllerName), - Owner: ControllerName, - } - }) - t.Cleanup(func() { teardownManager(cancel, t) }) - - ns := &v1.Namespace{} - ns.GenerateName = "postgres-operator-test-" - assert.NilError(t, tClient.Create(ctx, ns)) - t.Cleanup(func() { assert.Check(t, tClient.Delete(ctx, ns)) }) - namespace := ns.Name + r := &Reconciler{Client: tClient, Owner: client.FieldOwner(t.Name())} + namespace := setupNamespace(t, tClient).Name generateJob := func(clusterName string, completed, failed *bool) *batchv1.Job { @@ -2407,14 +2952,14 @@ func TestObserveRestoreEnv(t *testing.T) { restoreJob := &batchv1.Job{ ObjectMeta: meta, Spec: batchv1.JobSpec{ - Template: v1.PodTemplateSpec{ + Template: corev1.PodTemplateSpec{ ObjectMeta: meta, - Spec: v1.PodSpec{ - Containers: []v1.Container{{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ Image: "test", Name: naming.PGBackRestRestoreContainerName, }}, - RestartPolicy: v1.RestartPolicyNever, + RestartPolicy: corev1.RestartPolicyNever, }, }, }, @@ -2471,15 +3016,15 @@ func TestObserveRestoreEnv(t *testing.T) { }{{ desc: "restore job and all patroni endpoints exist", createResources: func(t *testing.T, cluster *v1beta1.PostgresCluster) { - fakeLeaderEP := &v1.Endpoints{} + fakeLeaderEP := &corev1.Endpoints{} fakeLeaderEP.ObjectMeta = naming.PatroniLeaderEndpoints(cluster) fakeLeaderEP.ObjectMeta.Namespace = namespace assert.NilError(t, r.Client.Create(ctx, fakeLeaderEP)) - fakeDCSEP := &v1.Endpoints{} + fakeDCSEP := &corev1.Endpoints{} fakeDCSEP.ObjectMeta = naming.PatroniDistributedConfiguration(cluster) fakeDCSEP.ObjectMeta.Namespace = namespace assert.NilError(t, r.Client.Create(ctx, fakeDCSEP)) - fakeFailoverEP := &v1.Endpoints{} + fakeFailoverEP := &corev1.Endpoints{} fakeFailoverEP.ObjectMeta = naming.PatroniTrigger(cluster) fakeFailoverEP.ObjectMeta.Namespace = namespace assert.NilError(t, r.Client.Create(ctx, fakeFailoverEP)) @@ -2495,15 +3040,15 @@ func TestObserveRestoreEnv(t *testing.T) { }, { desc: "patroni endpoints only exist", createResources: func(t *testing.T, cluster *v1beta1.PostgresCluster) { - fakeLeaderEP := &v1.Endpoints{} + fakeLeaderEP := &corev1.Endpoints{} fakeLeaderEP.ObjectMeta = naming.PatroniLeaderEndpoints(cluster) fakeLeaderEP.ObjectMeta.Namespace = namespace assert.NilError(t, r.Client.Create(ctx, fakeLeaderEP)) - fakeDCSEP := &v1.Endpoints{} + fakeDCSEP := &corev1.Endpoints{} fakeDCSEP.ObjectMeta = naming.PatroniDistributedConfiguration(cluster) fakeDCSEP.ObjectMeta.Namespace = namespace assert.NilError(t, r.Client.Create(ctx, fakeDCSEP)) - fakeFailoverEP := &v1.Endpoints{} + fakeFailoverEP := &corev1.Endpoints{} fakeFailoverEP.ObjectMeta = naming.PatroniTrigger(cluster) fakeFailoverEP.ObjectMeta.Namespace = namespace assert.NilError(t, r.Client.Create(ctx, fakeFailoverEP)) @@ -2598,26 +3143,12 @@ func TestObserveRestoreEnv(t *testing.T) { } func TestPrepareForRestore(t *testing.T) { + ctx := context.Background() + _, tClient := setupKubernetes(t) + require.ParallelCapacity(t, 1) - // setup the test environment and ensure a clean teardown - tEnv, tClient, cfg := setupTestEnv(t, ControllerName) - t.Cleanup(func() { teardownTestEnv(t, tEnv) }) - r := &Reconciler{} - ctx, cancel := setupManager(t, cfg, func(mgr manager.Manager) { - r = &Reconciler{ - Client: tClient, - Recorder: mgr.GetEventRecorderFor(ControllerName), - Tracer: otel.Tracer(ControllerName), - Owner: ControllerName, - } - }) - t.Cleanup(func() { teardownManager(cancel, t) }) - - ns := &v1.Namespace{} - ns.GenerateName = "postgres-operator-test-" - assert.NilError(t, tClient.Create(ctx, ns)) - t.Cleanup(func() { assert.Check(t, tClient.Delete(ctx, ns)) }) - namespace := ns.Name + r := &Reconciler{Client: tClient, Owner: client.FieldOwner(t.Name())} + namespace := setupNamespace(t, tClient).Name generateJob := func(clusterName string) *batchv1.Job { @@ -2635,14 +3166,14 @@ func TestPrepareForRestore(t *testing.T) { restoreJob := &batchv1.Job{ ObjectMeta: meta, Spec: batchv1.JobSpec{ - Template: v1.PodTemplateSpec{ + Template: corev1.PodTemplateSpec{ ObjectMeta: meta, - Spec: v1.PodSpec{ - Containers: []v1.Container{{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ Image: "test", Name: naming.PGBackRestRestoreContainerName, }}, - RestartPolicy: v1.RestartPolicyNever, + RestartPolicy: corev1.RestartPolicyNever, }, }, }, @@ -2687,15 +3218,15 @@ func TestPrepareForRestore(t *testing.T) { desc: "remove patroni endpoints", createResources: func(t *testing.T, cluster *v1beta1.PostgresCluster) (*batchv1.Job, []corev1.Endpoints) { - fakeLeaderEP := v1.Endpoints{} + fakeLeaderEP := corev1.Endpoints{} fakeLeaderEP.ObjectMeta = naming.PatroniLeaderEndpoints(cluster) fakeLeaderEP.ObjectMeta.Namespace = namespace assert.NilError(t, r.Client.Create(ctx, &fakeLeaderEP)) - fakeDCSEP := v1.Endpoints{} + fakeDCSEP := corev1.Endpoints{} fakeDCSEP.ObjectMeta = naming.PatroniDistributedConfiguration(cluster) fakeDCSEP.ObjectMeta.Namespace = namespace assert.NilError(t, r.Client.Create(ctx, &fakeDCSEP)) - fakeFailoverEP := v1.Endpoints{} + fakeFailoverEP := corev1.Endpoints{} fakeFailoverEP.ObjectMeta = naming.PatroniTrigger(cluster) fakeFailoverEP.ObjectMeta.Namespace = namespace assert.NilError(t, r.Client.Create(ctx, &fakeFailoverEP)) @@ -2732,7 +3263,7 @@ func TestPrepareForRestore(t *testing.T) { fakeObserved: &observedInstances{forCluster: []*Instance{{ Name: primaryInstanceName, Spec: &v1beta1.PostgresInstanceSetSpec{Name: primaryInstanceSetName}, - Pods: []*v1.Pod{{ + Pods: []*corev1.Pod{{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{naming.LabelRole: naming.RolePatroniLeader}, }, @@ -2767,7 +3298,7 @@ func TestPrepareForRestore(t *testing.T) { } clusterUID := clusterName cluster := fakePostgresCluster(clusterName, namespace, clusterUID, dedicated) - cluster.Status.Patroni = &v1beta1.PatroniStatus{SystemIdentifier: "abcde12345"} + cluster.Status.Patroni = v1beta1.PatroniStatus{SystemIdentifier: "abcde12345"} cluster.Status.Proxy.PGBouncer.PostgreSQLRevision = "abcde12345" cluster.Status.Monitoring.ExporterConfiguration = "abcde12345" meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{ @@ -2803,8 +3334,8 @@ func TestPrepareForRestore(t *testing.T) { naming.GenerateStartupInstance(cluster, &cluster.Spec.InstanceSets[0]).Name) } - leaderEP, dcsEP, failoverEP := v1.Endpoints{}, v1.Endpoints{}, v1.Endpoints{} - currentEndpoints := []v1.Endpoints{} + leaderEP, dcsEP, failoverEP := corev1.Endpoints{}, corev1.Endpoints{}, corev1.Endpoints{} + currentEndpoints := []corev1.Endpoints{} if err := r.Client.Get(ctx, naming.AsObjectKey(naming.PatroniLeaderEndpoints(cluster)), &leaderEP); err != nil { assert.NilError(t, client.IgnoreNotFound(err)) @@ -2826,6 +3357,7 @@ func TestPrepareForRestore(t *testing.T) { restoreJobs := &batchv1.JobList{} assert.NilError(t, r.Client.List(ctx, restoreJobs, &client.ListOptions{ + Namespace: cluster.Namespace, LabelSelector: naming.PGBackRestRestoreJobSelector(cluster.GetName()), })) @@ -2841,7 +3373,7 @@ func TestPrepareForRestore(t *testing.T) { assert.Equal(t, tc.result.expectedClusterCondition.Message, condition.Message) } if tc.result.expectedClusterCondition.Reason == ReasonReadyForRestore { - assert.Assert(t, cluster.Status.Patroni == nil) + assert.Assert(t, cluster.Status.Patroni.SystemIdentifier == "") assert.Assert(t, cluster.Status.Proxy.PGBouncer.PostgreSQLRevision == "") assert.Assert(t, cluster.Status.Monitoring.ExporterConfiguration == "") assert.Assert(t, meta.FindStatusCondition(cluster.Status.Conditions, @@ -2854,11 +3386,11 @@ func TestPrepareForRestore(t *testing.T) { } func TestReconcileScheduledBackups(t *testing.T) { - // setup the test environment and ensure a clean teardown - tEnv, tClient, cfg := setupTestEnv(t, ControllerName) - t.Cleanup(func() { teardownTestEnv(t, tEnv) }) + cfg, tClient := setupKubernetes(t) + require.ParallelCapacity(t, 2) + r := &Reconciler{} - ctx, cancel := setupManager(t, cfg, func(mgr manager.Manager) { + _, cancel := setupManager(t, cfg, func(mgr manager.Manager) { r = &Reconciler{ Client: mgr.GetClient(), Recorder: mgr.GetEventRecorderFor(ControllerName), @@ -2868,11 +3400,7 @@ func TestReconcileScheduledBackups(t *testing.T) { }) t.Cleanup(func() { teardownManager(cancel, t) }) - ns := &v1.Namespace{} - ns.GenerateName = "postgres-operator-test-" - assert.NilError(t, tClient.Create(ctx, ns)) - t.Cleanup(func() { assert.Check(t, tClient.Delete(ctx, ns)) }) - + ns := setupNamespace(t, tClient) sa := &corev1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{Name: "hippo-sa"}, } @@ -2895,6 +3423,8 @@ func TestReconcileScheduledBackups(t *testing.T) { expectedEventReason string // the observed instances instances *observedInstances + // CronJobs exist + cronJobs bool }{ { testDesc: "should reconcile, no requeue", @@ -2903,12 +3433,26 @@ func TestReconcileScheduledBackups(t *testing.T) { ConditionReplicaCreate: metav1.ConditionTrue, }, status: &v1beta1.PostgresClusterStatus{ - Patroni: &v1beta1.PatroniStatus{SystemIdentifier: "12345abcde"}, + Patroni: v1beta1.PatroniStatus{SystemIdentifier: "12345abcde"}, + PGBackRest: &v1beta1.PGBackRestStatus{ + Repos: []v1beta1.RepoStatus{{Name: "repo1", StanzaCreated: true}}}, + }, + expectReconcile: true, + expectRequeue: false, + }, { + testDesc: "should reconcile, no requeue, existing cronjob", + clusterConditions: map[string]metav1.ConditionStatus{ + ConditionRepoHostReady: metav1.ConditionTrue, + ConditionReplicaCreate: metav1.ConditionTrue, + }, + status: &v1beta1.PostgresClusterStatus{ + Patroni: v1beta1.PatroniStatus{SystemIdentifier: "12345abcde"}, PGBackRest: &v1beta1.PGBackRestStatus{ Repos: []v1beta1.RepoStatus{{Name: "repo1", StanzaCreated: true}}}, }, expectReconcile: true, expectRequeue: false, + cronJobs: true, }, { testDesc: "cluster not bootstrapped, should not reconcile", status: &v1beta1.PostgresClusterStatus{ @@ -2921,7 +3465,7 @@ func TestReconcileScheduledBackups(t *testing.T) { testDesc: "no repo host ready condition, should not reconcile", dedicatedOnly: true, status: &v1beta1.PostgresClusterStatus{ - Patroni: &v1beta1.PatroniStatus{SystemIdentifier: "12345abcde"}, + Patroni: v1beta1.PatroniStatus{SystemIdentifier: "12345abcde"}, PGBackRest: &v1beta1.PGBackRestStatus{ Repos: []v1beta1.RepoStatus{{Name: "repo1", StanzaCreated: true}}}, }, @@ -2930,7 +3474,7 @@ func TestReconcileScheduledBackups(t *testing.T) { }, { testDesc: "no replica create condition, should not reconcile", status: &v1beta1.PostgresClusterStatus{ - Patroni: &v1beta1.PatroniStatus{SystemIdentifier: "12345abcde"}, + Patroni: v1beta1.PatroniStatus{SystemIdentifier: "12345abcde"}, PGBackRest: &v1beta1.PGBackRestStatus{ Repos: []v1beta1.RepoStatus{{Name: "repo1", StanzaCreated: true}}}, }, @@ -2940,7 +3484,7 @@ func TestReconcileScheduledBackups(t *testing.T) { testDesc: "false repo host ready condition, should not reconcile", dedicatedOnly: true, status: &v1beta1.PostgresClusterStatus{ - Patroni: &v1beta1.PatroniStatus{SystemIdentifier: "12345abcde"}, + Patroni: v1beta1.PatroniStatus{SystemIdentifier: "12345abcde"}, PGBackRest: &v1beta1.PGBackRestStatus{ Repos: []v1beta1.RepoStatus{{Name: "repo1", StanzaCreated: true}}}, }, @@ -2949,7 +3493,7 @@ func TestReconcileScheduledBackups(t *testing.T) { }, { testDesc: "false replica create condition, should not reconcile", status: &v1beta1.PostgresClusterStatus{ - Patroni: &v1beta1.PatroniStatus{SystemIdentifier: "12345abcde"}, + Patroni: v1beta1.PatroniStatus{SystemIdentifier: "12345abcde"}, PGBackRest: &v1beta1.PGBackRestStatus{ Repos: []v1beta1.RepoStatus{{Name: "repo1", StanzaCreated: true}}}, }, @@ -2962,7 +3506,7 @@ func TestReconcileScheduledBackups(t *testing.T) { ConditionReplicaCreate: metav1.ConditionTrue, }, status: &v1beta1.PostgresClusterStatus{ - Patroni: &v1beta1.PatroniStatus{SystemIdentifier: "12345abcde"}, + Patroni: v1beta1.PatroniStatus{SystemIdentifier: "12345abcde"}, PGBackRest: &v1beta1.PGBackRestStatus{ Repos: []v1beta1.RepoStatus{}}, }, @@ -2991,44 +3535,65 @@ func TestReconcileScheduledBackups(t *testing.T) { ctx := context.Background() postgresCluster := fakePostgresCluster(clusterName, ns.GetName(), "", dedicated) + assert.NilError(t, tClient.Create(ctx, postgresCluster)) postgresCluster.Status = *tc.status for condition, status := range tc.clusterConditions { meta.SetStatusCondition(&postgresCluster.Status.Conditions, metav1.Condition{ Type: condition, Reason: "testing", Status: status}) } - assert.NilError(t, tClient.Create(ctx, postgresCluster)) assert.NilError(t, tClient.Status().Update(ctx, postgresCluster)) var requeue bool - if tc.instances != nil { - requeue = r.reconcileScheduledBackups(ctx, postgresCluster, sa) + if tc.cronJobs { + existingCronJobs := []*batchv1.CronJob{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "existingcronjob-repo1-full", + Labels: map[string]string{ + naming.LabelCluster: clusterName, + naming.LabelPGBackRestCronJob: "full", + naming.LabelPGBackRestRepo: "repo1", + }}, + }, { + ObjectMeta: metav1.ObjectMeta{ + Name: "existingcronjob-repo1-incr", + Labels: map[string]string{ + naming.LabelCluster: clusterName, + naming.LabelPGBackRestCronJob: "incr", + naming.LabelPGBackRestRepo: "repo1", + }}, + }, { + ObjectMeta: metav1.ObjectMeta{ + Name: "existingcronjob-repo1-diff", + Labels: map[string]string{ + naming.LabelCluster: clusterName, + naming.LabelPGBackRestCronJob: "diff", + naming.LabelPGBackRestRepo: "repo1", + }}, + }, + } + requeue = r.reconcileScheduledBackups(ctx, postgresCluster, sa, existingCronJobs) } else { - requeue = r.reconcileScheduledBackups(ctx, postgresCluster, sa) + requeue = r.reconcileScheduledBackups(ctx, postgresCluster, sa, fakeObservedCronJobs()) } - if !tc.expectReconcile && !tc.expectRequeue { // expect no reconcile, no requeue assert.Assert(t, !requeue) // if an event is expected, the check for it if tc.expectedEventReason != "" { - events := &corev1.EventList{} - err := wait.Poll(time.Second/2, Scale(time.Second*2), func() (bool, error) { - if err := tClient.List(ctx, events, &client.MatchingFields{ - "involvedObject.kind": "PostgresCluster", - "involvedObject.name": clusterName, - "involvedObject.namespace": ns.GetName(), - "involvedObject.uid": string(postgresCluster.GetUID()), - "reason": tc.expectedEventReason, - }); err != nil { - return false, err - } - if len(events.Items) != 1 { - return false, nil - } - return true, nil - }) - assert.NilError(t, err) + assert.NilError(t, wait.PollUntilContextTimeout(ctx, time.Second/2, Scale(time.Second*2), false, + func(ctx context.Context) (bool, error) { + events := &corev1.EventList{} + err := tClient.List(ctx, events, &client.MatchingFields{ + "involvedObject.kind": "PostgresCluster", + "involvedObject.name": clusterName, + "involvedObject.namespace": ns.GetName(), + "involvedObject.uid": string(postgresCluster.GetUID()), + "reason": tc.expectedEventReason, + }) + return len(events.Items) == 1, err + })) } } else if !tc.expectReconcile && tc.expectRequeue { // expect requeue, no reconcile @@ -3043,17 +3608,26 @@ func TestReconcileScheduledBackups(t *testing.T) { for _, backupType := range backupTypes { - returnedCronJob := &batchv1beta1.CronJob{} + var cronJobName string + if tc.cronJobs { + cronJobName = "existingcronjob-repo1-" + backupType + } else { + cronJobName = postgresCluster.Name + "-repo1-" + backupType + } + + returnedCronJob := &batchv1.CronJob{} if err := tClient.Get(ctx, types.NamespacedName{ - Name: postgresCluster.Name + "-pgbackrest-repo1-" + backupType, + Name: cronJobName, Namespace: postgresCluster.GetNamespace(), }, returnedCronJob); err != nil { assert.NilError(t, err) } // check returned cronjob matches set spec - assert.Equal(t, returnedCronJob.Name, clusterName+"-pgbackrest-repo1-"+backupType) + assert.Equal(t, returnedCronJob.Name, cronJobName) assert.Equal(t, returnedCronJob.Spec.Schedule, testCronSchedule) + assert.Equal(t, returnedCronJob.Spec.ConcurrencyPolicy, batchv1.ForbidConcurrent) + assert.Equal(t, returnedCronJob.Spec.JobTemplate.Spec.Template.Spec.PriorityClassName, "some-priority-class") assert.Equal(t, returnedCronJob.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Name, "pgbackrest") assert.Assert(t, returnedCronJob.Spec.JobTemplate.Spec.Template.Spec.Containers[0].SecurityContext != &corev1.SecurityContext{}) @@ -3078,28 +3652,16 @@ func TestReconcileScheduledBackups(t *testing.T) { } func TestSetScheduledJobStatus(t *testing.T) { + ctx := context.Background() + _, tClient := setupKubernetes(t) + require.ParallelCapacity(t, 0) - // setup the test environment and ensure a clean teardown - tEnv, tClient, cfg := setupTestEnv(t, ControllerName) - t.Cleanup(func() { teardownTestEnv(t, tEnv) }) - r := &Reconciler{} - ctx, cancel := setupManager(t, cfg, func(mgr manager.Manager) { - r = &Reconciler{ - Client: mgr.GetClient(), - Recorder: mgr.GetEventRecorderFor(ControllerName), - Tracer: otel.Tracer(ControllerName), - Owner: ControllerName, - } - }) - t.Cleanup(func() { teardownManager(cancel, t) }) + r := &Reconciler{Client: tClient, Owner: client.FieldOwner(t.Name())} clusterName := "hippocluster" clusterUID := "hippouid" - ns := &v1.Namespace{} - ns.GenerateName = "postgres-operator-test-" - assert.NilError(t, tClient.Create(ctx, ns)) - t.Cleanup(func() { assert.Check(t, tClient.Delete(ctx, ns)) }) + ns := setupNamespace(t, tClient) t.Run("set scheduled backup status", func(t *testing.T) { // create a PostgresCluster to test with @@ -3174,3 +3736,167 @@ func TestSetScheduledJobStatus(t *testing.T) { assert.Assert(t, len(postgresCluster.Status.PGBackRest.ScheduledBackups) == 0) }) } + +func TestBackupsEnabled(t *testing.T) { + // Garbage collector cleans up test resources before the test completes + if strings.EqualFold(os.Getenv("USE_EXISTING_CLUSTER"), "true") { + t.Skip("USE_EXISTING_CLUSTER: Test fails due to garbage collection") + } + + cfg, tClient := setupKubernetes(t) + require.ParallelCapacity(t, 2) + + r := &Reconciler{} + ctx, cancel := setupManager(t, cfg, func(mgr manager.Manager) { + r = &Reconciler{ + Client: mgr.GetClient(), + Recorder: mgr.GetEventRecorderFor(ControllerName), + Tracer: otel.Tracer(ControllerName), + Owner: ControllerName, + } + }) + t.Cleanup(func() { teardownManager(cancel, t) }) + + t.Run("Cluster with backups, no sts can be reconciled", func(t *testing.T) { + clusterName := "hippocluster1" + clusterUID := "hippouid1" + + ns := setupNamespace(t, tClient) + + // create a PostgresCluster to test with + postgresCluster := fakePostgresCluster(clusterName, ns.GetName(), clusterUID, true) + + backupsSpecFound, backupsReconciliationAllowed, err := r.BackupsEnabled(ctx, postgresCluster) + + assert.NilError(t, err) + assert.Assert(t, backupsSpecFound) + assert.Assert(t, backupsReconciliationAllowed) + }) + + t.Run("Cluster with backups, sts can be reconciled", func(t *testing.T) { + clusterName := "hippocluster2" + clusterUID := "hippouid2" + + ns := setupNamespace(t, tClient) + + // create a PostgresCluster to test with + postgresCluster := fakePostgresCluster(clusterName, ns.GetName(), clusterUID, true) + + // create the 'observed' instances and set the leader + instances := &observedInstances{ + forCluster: []*Instance{{Name: "instance1", + Pods: []*corev1.Pod{{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{naming.LabelRole: naming.RolePatroniLeader}, + }, + Spec: corev1.PodSpec{}, + }}, + }, {Name: "instance2"}, {Name: "instance3"}}, + } + + rootCA, err := pki.NewRootCertificateAuthority() + assert.NilError(t, err) + + _, err = r.reconcilePGBackRest(ctx, postgresCluster, instances, rootCA, true) + assert.NilError(t, err) + + backupsSpecFound, backupsReconciliationAllowed, err := r.BackupsEnabled(ctx, postgresCluster) + + assert.NilError(t, err) + assert.Assert(t, backupsSpecFound) + assert.Assert(t, backupsReconciliationAllowed) + }) + + t.Run("Cluster with no backups, no sts can reconcile", func(t *testing.T) { + // create a PostgresCluster to test with + clusterName := "hippocluster3" + clusterUID := "hippouid3" + + ns := setupNamespace(t, tClient) + + postgresCluster := fakePostgresCluster(clusterName, ns.GetName(), clusterUID, true) + postgresCluster.Spec.Backups = v1beta1.Backups{} + + backupsSpecFound, backupsReconciliationAllowed, err := r.BackupsEnabled(ctx, postgresCluster) + + assert.NilError(t, err) + assert.Assert(t, !backupsSpecFound) + assert.Assert(t, backupsReconciliationAllowed) + }) + + t.Run("Cluster with no backups, sts cannot be reconciled", func(t *testing.T) { + clusterName := "hippocluster4" + clusterUID := "hippouid4" + + ns := setupNamespace(t, tClient) + + // create a PostgresCluster to test with + postgresCluster := fakePostgresCluster(clusterName, ns.GetName(), clusterUID, true) + + // create the 'observed' instances and set the leader + instances := &observedInstances{ + forCluster: []*Instance{{Name: "instance1", + Pods: []*corev1.Pod{{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{naming.LabelRole: naming.RolePatroniLeader}, + }, + Spec: corev1.PodSpec{}, + }}, + }, {Name: "instance2"}, {Name: "instance3"}}, + } + + rootCA, err := pki.NewRootCertificateAuthority() + assert.NilError(t, err) + + _, err = r.reconcilePGBackRest(ctx, postgresCluster, instances, rootCA, true) + assert.NilError(t, err) + + postgresCluster.Spec.Backups = v1beta1.Backups{} + + backupsSpecFound, backupsReconciliationAllowed, err := r.BackupsEnabled(ctx, postgresCluster) + + assert.NilError(t, err) + assert.Assert(t, !backupsSpecFound) + assert.Assert(t, !backupsReconciliationAllowed) + }) + + t.Run("Cluster with no backups, sts, annotation can be reconciled", func(t *testing.T) { + clusterName := "hippocluster5" + clusterUID := "hippouid5" + + ns := setupNamespace(t, tClient) + + // create a PostgresCluster to test with + postgresCluster := fakePostgresCluster(clusterName, ns.GetName(), clusterUID, true) + + // create the 'observed' instances and set the leader + instances := &observedInstances{ + forCluster: []*Instance{{Name: "instance1", + Pods: []*corev1.Pod{{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{naming.LabelRole: naming.RolePatroniLeader}, + }, + Spec: corev1.PodSpec{}, + }}, + }, {Name: "instance2"}, {Name: "instance3"}}, + } + + rootCA, err := pki.NewRootCertificateAuthority() + assert.NilError(t, err) + + _, err = r.reconcilePGBackRest(ctx, postgresCluster, instances, rootCA, true) + assert.NilError(t, err) + + postgresCluster.Spec.Backups = v1beta1.Backups{} + annotations := map[string]string{ + naming.AuthorizeBackupRemovalAnnotation: "true", + } + postgresCluster.Annotations = annotations + + backupsSpecFound, backupsReconciliationAllowed, err := r.BackupsEnabled(ctx, postgresCluster) + + assert.NilError(t, err) + assert.Assert(t, !backupsSpecFound) + assert.Assert(t, backupsReconciliationAllowed) + }) +} diff --git a/internal/controller/postgrescluster/pgbouncer.go b/internal/controller/postgrescluster/pgbouncer.go index 4328c6ef34..76207fac02 100644 --- a/internal/controller/postgrescluster/pgbouncer.go +++ b/internal/controller/postgrescluster/pgbouncer.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster @@ -23,6 +12,7 @@ import ( "github.com/pkg/errors" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + policyv1 "k8s.io/api/policy/v1" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" @@ -58,14 +48,17 @@ func (r *Reconciler) reconcilePGBouncer( if err == nil { err = r.reconcilePGBouncerDeployment(ctx, cluster, primaryCertificate, configmap, secret) } + if err == nil { + err = r.reconcilePGBouncerPodDisruptionBudget(ctx, cluster) + } if err == nil { err = r.reconcilePGBouncerInPostgreSQL(ctx, cluster, instances, secret) } return err } -// +kubebuilder:rbac:groups="",resources=configmaps,verbs=get -// +kubebuilder:rbac:groups="",resources=configmaps,verbs=create;delete;patch +// +kubebuilder:rbac:groups="",resources="configmaps",verbs={get} +// +kubebuilder:rbac:groups="",resources="configmaps",verbs={create,delete,patch} // reconcilePGBouncerConfigMap writes the ConfigMap for a PgBouncer Pod. func (r *Reconciler) reconcilePGBouncerConfigMap( @@ -108,7 +101,7 @@ func (r *Reconciler) reconcilePGBouncerConfigMap( return configmap, err } -// +kubebuilder:rbac:groups="",resources=pods,verbs=get;list +// +kubebuilder:rbac:groups="",resources="pods",verbs={get,list} // reconcilePGBouncerInPostgreSQL writes the user and other objects needed by // PgBouncer inside of PostgreSQL. @@ -177,8 +170,8 @@ func (r *Reconciler) reconcilePGBouncerInPostgreSQL( if err == nil { ctx := logging.NewContext(ctx, logging.FromContext(ctx).WithValues("revision", revision)) - err = action(ctx, func(_ context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string) error { - return r.PodExec(pod.Namespace, pod.Name, naming.ContainerDatabase, stdin, stdout, stderr, command...) + err = action(ctx, func(ctx context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string) error { + return r.PodExec(ctx, pod.Namespace, pod.Name, naming.ContainerDatabase, stdin, stdout, stderr, command...) }) } if err == nil { @@ -188,8 +181,8 @@ func (r *Reconciler) reconcilePGBouncerInPostgreSQL( return err } -// +kubebuilder:rbac:groups="",resources=secrets,verbs=get -// +kubebuilder:rbac:groups="",resources=secrets,verbs=create;delete;patch +// +kubebuilder:rbac:groups="",resources="secrets",verbs={get} +// +kubebuilder:rbac:groups="",resources="secrets",verbs={create,delete,patch} // reconcilePGBouncerSecret writes the Secret for a PgBouncer Pod. func (r *Reconciler) reconcilePGBouncerSecret( @@ -259,7 +252,17 @@ func (r *Reconciler) generatePGBouncerService( cluster.Spec.Proxy.PGBouncer.Metadata.GetAnnotationsOrNil()) service.Labels = naming.Merge( cluster.Spec.Metadata.GetLabelsOrNil(), - cluster.Spec.Proxy.PGBouncer.Metadata.GetLabelsOrNil(), + cluster.Spec.Proxy.PGBouncer.Metadata.GetLabelsOrNil()) + + if spec := cluster.Spec.Proxy.PGBouncer.Service; spec != nil { + service.Annotations = naming.Merge(service.Annotations, + spec.Metadata.GetAnnotationsOrNil()) + service.Labels = naming.Merge(service.Labels, + spec.Metadata.GetLabelsOrNil()) + } + + // add our labels last so they aren't overwritten + service.Labels = naming.Merge(service.Labels, map[string]string{ naming.LabelCluster: cluster.Name, naming.LabelRole: naming.RolePGBouncer, @@ -272,21 +275,39 @@ func (r *Reconciler) generatePGBouncerService( naming.LabelCluster: cluster.Name, naming.LabelRole: naming.RolePGBouncer, } - if spec := cluster.Spec.Proxy.PGBouncer.Service; spec != nil { - service.Spec.Type = corev1.ServiceType(spec.Type) - } else { - service.Spec.Type = corev1.ServiceTypeClusterIP - } // The TargetPort must be the name (not the number) of the PgBouncer // ContainerPort. This name allows the port number to differ between Pods, // which can happen during a rolling update. - service.Spec.Ports = []corev1.ServicePort{{ + servicePort := corev1.ServicePort{ Name: naming.PortPGBouncer, Port: *cluster.Spec.Proxy.PGBouncer.Port, Protocol: corev1.ProtocolTCP, TargetPort: intstr.FromString(naming.PortPGBouncer), - }} + } + + if spec := cluster.Spec.Proxy.PGBouncer.Service; spec == nil { + service.Spec.Type = corev1.ServiceTypeClusterIP + } else { + service.Spec.Type = corev1.ServiceType(spec.Type) + if spec.NodePort != nil { + if service.Spec.Type == corev1.ServiceTypeClusterIP { + // The NodePort can only be set when the Service type is NodePort or + // LoadBalancer. However, due to a known issue prior to Kubernetes + // 1.20, we clear these errors during our apply. To preserve the + // appropriate behavior, we log an Event and return an error. + // TODO(tjmoore4): Once Validation Rules are available, this check + // and event could potentially be removed in favor of that validation + r.Recorder.Eventf(cluster, corev1.EventTypeWarning, "MisconfiguredClusterIP", + "NodePort cannot be set with type ClusterIP on Service %q", service.Name) + return nil, true, fmt.Errorf("NodePort cannot be set with type ClusterIP on Service %q", service.Name) + } + servicePort.NodePort = *spec.NodePort + } + service.Spec.ExternalTrafficPolicy = initialize.FromPointer(spec.ExternalTrafficPolicy) + service.Spec.InternalTrafficPolicy = spec.InternalTrafficPolicy + } + service.Spec.Ports = []corev1.ServicePort{servicePort} err := errors.WithStack(r.setControllerReference(cluster, service)) @@ -319,65 +340,19 @@ func (r *Reconciler) reconcilePGBouncerService( return service, err } -// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=get -// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=create;delete;patch - -// reconcilePGBouncerDeployment writes the Deployment that runs PgBouncer. -func (r *Reconciler) reconcilePGBouncerDeployment( +// generatePGBouncerDeployment returns an appsv1.Deployment that runs PgBouncer pods. +func (r *Reconciler) generatePGBouncerDeployment( ctx context.Context, cluster *v1beta1.PostgresCluster, primaryCertificate *corev1.SecretProjection, configmap *corev1.ConfigMap, secret *corev1.Secret, -) error { +) (*appsv1.Deployment, bool, error) { deploy := &appsv1.Deployment{ObjectMeta: naming.ClusterPGBouncer(cluster)} deploy.SetGroupVersionKind(appsv1.SchemeGroupVersion.WithKind("Deployment")) - // Set observations whether the deployment exists or not. - defer func() { - cluster.Status.Proxy.PGBouncer.Replicas = deploy.Status.Replicas - cluster.Status.Proxy.PGBouncer.ReadyReplicas = deploy.Status.ReadyReplicas - - // NOTE(cbandy): This should be somewhere else when there is more than - // one proxy implementation. - - var available *appsv1.DeploymentCondition - for i := range deploy.Status.Conditions { - if deploy.Status.Conditions[i].Type == appsv1.DeploymentAvailable { - available = &deploy.Status.Conditions[i] - } - } - - if available == nil { - // Avoid a panic! Fixed in Kubernetes v1.21.0 and controller-runtime v0.9.0-alpha.0. - // - https://issue.k8s.io/99714 - if len(cluster.Status.Conditions) > 0 { - meta.RemoveStatusCondition(&cluster.Status.Conditions, v1beta1.ProxyAvailable) - } - } else { - meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{ - Type: v1beta1.ProxyAvailable, - Status: metav1.ConditionStatus(available.Status), - Reason: available.Reason, - Message: available.Message, - - LastTransitionTime: available.LastTransitionTime, - ObservedGeneration: cluster.Generation, - }) - } - }() - if cluster.Spec.Proxy == nil || cluster.Spec.Proxy.PGBouncer == nil { - // PgBouncer is disabled; delete the Deployment if it exists. Check the - // client cache first using Get. - key := client.ObjectKeyFromObject(deploy) - err := errors.WithStack(r.Client.Get(ctx, key, deploy)) - if err == nil { - err = errors.WithStack(r.deleteControlled(ctx, cluster, deploy)) - } - return client.IgnoreNotFound(err) + return deploy, false, nil } - err := errors.WithStack(r.setControllerReference(cluster, deploy)) - deploy.Annotations = naming.Merge( cluster.Spec.Metadata.GetAnnotationsOrNil(), cluster.Spec.Proxy.PGBouncer.Metadata.GetAnnotationsOrNil()) @@ -420,12 +395,24 @@ func (r *Reconciler) reconcilePGBouncerDeployment( // - https://docs.k8s.io/concepts/workloads/controllers/deployment/#rolling-update-deployment deploy.Spec.Strategy.Type = appsv1.RollingUpdateDeploymentStrategyType deploy.Spec.Strategy.RollingUpdate = &appsv1.RollingUpdateDeployment{ - MaxUnavailable: intstr.ValueOrDefault(nil, intstr.FromInt(0)), + MaxUnavailable: initialize.Pointer(intstr.FromInt32(0)), } // Use scheduling constraints from the cluster spec. deploy.Spec.Template.Spec.Affinity = cluster.Spec.Proxy.PGBouncer.Affinity deploy.Spec.Template.Spec.Tolerations = cluster.Spec.Proxy.PGBouncer.Tolerations + deploy.Spec.Template.Spec.PriorityClassName = + initialize.FromPointer(cluster.Spec.Proxy.PGBouncer.PriorityClassName) + deploy.Spec.Template.Spec.TopologySpreadConstraints = + cluster.Spec.Proxy.PGBouncer.TopologySpreadConstraints + + // if default pod scheduling is not explicitly disabled, add the default + // pod topology spread constraints + if !initialize.FromPointer(cluster.Spec.DisableDefaultPodScheduling) { + deploy.Spec.Template.Spec.TopologySpreadConstraints = append( + deploy.Spec.Template.Spec.TopologySpreadConstraints, + defaultTopologySpreadConstraints(*deploy.Spec.Selector)...) + } // Restart containers any time they stop, die, are killed, etc. // - https://docs.k8s.io/concepts/workloads/pods/pod-lifecycle/#restart-policy @@ -443,17 +430,138 @@ func (r *Reconciler) reconcilePGBouncerDeployment( // ServiceAccount and do not mount its credentials. deploy.Spec.Template.Spec.AutomountServiceAccountToken = initialize.Bool(false) - deploy.Spec.Template.Spec.SecurityContext = initialize.RestrictedPodSecurityContext() + // Do not add environment variables describing services in this namespace. + deploy.Spec.Template.Spec.EnableServiceLinks = initialize.Bool(false) + + deploy.Spec.Template.Spec.SecurityContext = initialize.PodSecurityContext() // set the image pull secrets, if any exist deploy.Spec.Template.Spec.ImagePullSecrets = cluster.Spec.ImagePullSecrets + err := errors.WithStack(r.setControllerReference(cluster, deploy)) + if err == nil { - pgbouncer.Pod(cluster, configmap, primaryCertificate, secret, &deploy.Spec.Template.Spec) + pgbouncer.Pod(ctx, cluster, configmap, primaryCertificate, secret, &deploy.Spec.Template.Spec) } + + return deploy, true, err +} + +// +kubebuilder:rbac:groups="apps",resources="deployments",verbs={get} +// +kubebuilder:rbac:groups="apps",resources="deployments",verbs={create,delete,patch} + +// reconcilePGBouncerDeployment writes the Deployment that runs PgBouncer. +func (r *Reconciler) reconcilePGBouncerDeployment( + ctx context.Context, cluster *v1beta1.PostgresCluster, + primaryCertificate *corev1.SecretProjection, + configmap *corev1.ConfigMap, secret *corev1.Secret, +) error { + deploy, specified, err := r.generatePGBouncerDeployment( + ctx, cluster, primaryCertificate, configmap, secret) + + // Set observations whether the deployment exists or not. + defer func() { + cluster.Status.Proxy.PGBouncer.Replicas = deploy.Status.Replicas + cluster.Status.Proxy.PGBouncer.ReadyReplicas = deploy.Status.ReadyReplicas + + // NOTE(cbandy): This should be somewhere else when there is more than + // one proxy implementation. + + var available *appsv1.DeploymentCondition + for i := range deploy.Status.Conditions { + if deploy.Status.Conditions[i].Type == appsv1.DeploymentAvailable { + available = &deploy.Status.Conditions[i] + } + } + + if available == nil { + meta.RemoveStatusCondition(&cluster.Status.Conditions, v1beta1.ProxyAvailable) + } else { + meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{ + Type: v1beta1.ProxyAvailable, + Status: metav1.ConditionStatus(available.Status), + Reason: available.Reason, + Message: available.Message, + + LastTransitionTime: available.LastTransitionTime, + ObservedGeneration: cluster.Generation, + }) + } + }() + + if err == nil && !specified { + // PgBouncer is disabled; delete the Deployment if it exists. Check the + // client cache first using Get. + key := client.ObjectKeyFromObject(deploy) + err := errors.WithStack(r.Client.Get(ctx, key, deploy)) + if err == nil { + err = errors.WithStack(r.deleteControlled(ctx, cluster, deploy)) + } + return client.IgnoreNotFound(err) + } + if err == nil { err = errors.WithStack(r.apply(ctx, deploy)) } + return err +} + +// +kubebuilder:rbac:groups="policy",resources="poddisruptionbudgets",verbs={create,patch,get,delete} + +// reconcilePGBouncerPodDisruptionBudget creates a PDB for the PGBouncer deployment. +// A PDB will be created when minAvailable is determined to be greater than 0 and +// a PGBouncer proxy is defined in the spec. MinAvailable can be defined in the spec +// or a default value will be set based on the number of replicas defined for PGBouncer. +func (r *Reconciler) reconcilePGBouncerPodDisruptionBudget( + ctx context.Context, + cluster *v1beta1.PostgresCluster, +) error { + deleteExistingPDB := func(cluster *v1beta1.PostgresCluster) error { + existing := &policyv1.PodDisruptionBudget{ObjectMeta: naming.ClusterPGBouncer(cluster)} + err := errors.WithStack(r.Client.Get(ctx, client.ObjectKeyFromObject(existing), existing)) + if err == nil { + err = errors.WithStack(r.deleteControlled(ctx, cluster, existing)) + } + return client.IgnoreNotFound(err) + } + if cluster.Spec.Proxy == nil || cluster.Spec.Proxy.PGBouncer == nil { + return deleteExistingPDB(cluster) + } + + if cluster.Spec.Proxy.PGBouncer.Replicas == nil { + // Replicas should always have a value because of defaults in the spec + return errors.New("Replicas should be defined") + } + minAvailable := getMinAvailable(cluster.Spec.Proxy.PGBouncer.MinAvailable, + *cluster.Spec.Proxy.PGBouncer.Replicas) + + // If 'minAvailable' is set to '0', we will not reconcile the PDB. If one + // already exists, we will remove it. + scaled, err := intstr.GetScaledValueFromIntOrPercent(minAvailable, + int(*cluster.Spec.Proxy.PGBouncer.Replicas), true) + if err == nil && scaled <= 0 { + return deleteExistingPDB(cluster) + } + + meta := naming.ClusterPGBouncer(cluster) + meta.Labels = naming.Merge(cluster.Spec.Metadata.GetLabelsOrNil(), + cluster.Spec.Proxy.PGBouncer.Metadata.GetLabelsOrNil(), + map[string]string{ + naming.LabelCluster: cluster.Name, + naming.LabelRole: naming.RolePGBouncer, + }) + meta.Annotations = naming.Merge(cluster.Spec.Metadata.GetAnnotationsOrNil(), + cluster.Spec.Proxy.PGBouncer.Metadata.GetAnnotationsOrNil()) + + selector := naming.ClusterPGBouncerSelector(cluster) + pdb := &policyv1.PodDisruptionBudget{} + if err == nil { + pdb, err = r.generatePodDisruptionBudget(cluster, meta, minAvailable, selector) + } + + if err == nil { + err = errors.WithStack(r.apply(ctx, pdb)) + } return err } diff --git a/internal/controller/postgrescluster/pgbouncer_test.go b/internal/controller/postgrescluster/pgbouncer_test.go index 1a73a079e6..9bbced5247 100644 --- a/internal/controller/postgrescluster/pgbouncer_test.go +++ b/internal/controller/postgrescluster/pgbouncer_test.go @@ -1,39 +1,39 @@ -// +build envtest - -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster import ( "context" + "strconv" "testing" + "github.com/pkg/errors" "gotest.tools/v3/assert" corev1 "k8s.io/api/core/v1" + policyv1 "k8s.io/api/policy/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/crunchydata/postgres-operator/internal/initialize" + "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/internal/testing/cmp" + "github.com/crunchydata/postgres-operator/internal/testing/require" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) func TestGeneratePGBouncerService(t *testing.T) { - env, cc, _ := setupTestEnv(t, ControllerName) - t.Cleanup(func() { teardownTestEnv(t, env) }) + _, cc := setupKubernetes(t) + require.ParallelCapacity(t, 0) - reconciler := &Reconciler{Client: cc} + reconciler := &Reconciler{ + Client: cc, + Recorder: new(record.FakeRecorder), + } cluster := &v1beta1.PostgresCluster{} cluster.Namespace = "ns5" @@ -50,7 +50,7 @@ func TestGeneratePGBouncerService(t *testing.T) { assert.NilError(t, err) assert.Assert(t, !specified) - assert.Assert(t, marshalMatches(service.ObjectMeta, ` + assert.Assert(t, cmp.MarshalMatches(service.ObjectMeta, ` creationTimestamp: null name: pg7-pgbouncer namespace: ns5 @@ -65,11 +65,11 @@ namespace: ns5 } alwaysExpect := func(t testing.TB, service *corev1.Service) { - assert.Assert(t, marshalMatches(service.TypeMeta, ` + assert.Assert(t, cmp.MarshalMatches(service.TypeMeta, ` apiVersion: v1 kind: Service `)) - assert.Assert(t, marshalMatches(service.ObjectMeta, ` + assert.Assert(t, cmp.MarshalMatches(service.ObjectMeta, ` creationTimestamp: null labels: postgres-operator.crunchydata.com/cluster: pg7 @@ -84,12 +84,6 @@ ownerReferences: name: pg7 uid: "" `)) - assert.Assert(t, marshalMatches(service.Spec.Ports, ` -- name: pgbouncer - port: 9651 - protocol: TCP - targetPort: pgbouncer - `)) // Always gets a ClusterIP (never None). assert.Equal(t, service.Spec.ClusterIP, "") @@ -127,6 +121,39 @@ ownerReferences: "postgres-operator.crunchydata.com/cluster": "pg7", "postgres-operator.crunchydata.com/role": "pgbouncer", }) + + // Add metadata to individual service + cluster.Spec.Proxy.PGBouncer.Service = &v1beta1.ServiceSpec{ + Metadata: &v1beta1.Metadata{ + Annotations: map[string]string{"c": "v3"}, + Labels: map[string]string{"d": "v4", + "postgres-operator.crunchydata.com/cluster": "wrongName"}, + }, + } + + service, specified, err = reconciler.generatePGBouncerService(cluster) + assert.NilError(t, err) + assert.Assert(t, specified) + + // Annotations present in the metadata. + assert.DeepEqual(t, service.ObjectMeta.Annotations, map[string]string{ + "a": "v1", + "c": "v3", + }) + + // Labels present in the metadata. + assert.DeepEqual(t, service.ObjectMeta.Labels, map[string]string{ + "b": "v2", + "d": "v4", + "postgres-operator.crunchydata.com/cluster": "pg7", + "postgres-operator.crunchydata.com/role": "pgbouncer", + }) + + // Labels not in the selector. + assert.DeepEqual(t, service.Spec.Selector, map[string]string{ + "postgres-operator.crunchydata.com/cluster": "pg7", + "postgres-operator.crunchydata.com/role": "pgbouncer", + }) }) t.Run("NoServiceSpec", func(t *testing.T) { @@ -134,9 +161,14 @@ ownerReferences: assert.NilError(t, err) assert.Assert(t, specified) alwaysExpect(t, service) - // Defaults to ClusterIP. assert.Equal(t, service.Spec.Type, corev1.ServiceTypeClusterIP) + assert.Assert(t, cmp.MarshalMatches(service.Spec.Ports, ` +- name: pgbouncer + port: 9651 + protocol: TCP + targetPort: pgbouncer + `)) }) types := []struct { @@ -164,28 +196,77 @@ ownerReferences: assert.Assert(t, specified) alwaysExpect(t, service) test.Expect(t, service) + assert.Assert(t, cmp.MarshalMatches(service.Spec.Ports, ` +- name: pgbouncer + port: 9651 + protocol: TCP + targetPort: pgbouncer + `)) + }) + } + + typesAndPort := []struct { + Description string + Type string + NodePort *int32 + Expect func(testing.TB, *corev1.Service, error) + }{ + {Description: "ClusterIP with Port 32000", Type: "ClusterIP", + NodePort: initialize.Int32(32000), Expect: func(t testing.TB, service *corev1.Service, err error) { + assert.ErrorContains(t, err, "NodePort cannot be set with type ClusterIP on Service \"pg7-pgbouncer\"") + assert.Assert(t, service == nil) + }}, + {Description: "NodePort with Port 32001", Type: "NodePort", + NodePort: initialize.Int32(32001), Expect: func(t testing.TB, service *corev1.Service, err error) { + assert.NilError(t, err) + assert.Equal(t, service.Spec.Type, corev1.ServiceTypeNodePort) + alwaysExpect(t, service) + assert.Assert(t, cmp.MarshalMatches(service.Spec.Ports, ` +- name: pgbouncer + nodePort: 32001 + port: 9651 + protocol: TCP + targetPort: pgbouncer +`)) + }}, + {Description: "LoadBalancer with Port 32002", Type: "LoadBalancer", + NodePort: initialize.Int32(32002), Expect: func(t testing.TB, service *corev1.Service, err error) { + assert.NilError(t, err) + assert.Equal(t, service.Spec.Type, corev1.ServiceTypeLoadBalancer) + alwaysExpect(t, service) + assert.Assert(t, cmp.MarshalMatches(service.Spec.Ports, ` +- name: pgbouncer + nodePort: 32002 + port: 9651 + protocol: TCP + targetPort: pgbouncer +`)) + }}, + } + + for _, test := range typesAndPort { + t.Run(test.Type, func(t *testing.T) { + cluster := cluster.DeepCopy() + cluster.Spec.Proxy.PGBouncer.Service = &v1beta1.ServiceSpec{Type: test.Type, NodePort: test.NodePort} + + service, specified, err := reconciler.generatePGBouncerService(cluster) + test.Expect(t, service, err) + // whether or not an error is encountered, 'specified' is true because + // the service *should* exist + assert.Assert(t, specified) }) } } func TestReconcilePGBouncerService(t *testing.T) { ctx := context.Background() - env, cc, _ := setupTestEnv(t, ControllerName) - t.Cleanup(func() { teardownTestEnv(t, env) }) - - ns := &corev1.Namespace{} - ns.GenerateName = "postgres-operator-test-" - assert.NilError(t, cc.Create(ctx, ns)) - t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, ns)) }) + _, cc := setupKubernetes(t) + require.ParallelCapacity(t, 1) reconciler := &Reconciler{Client: cc, Owner: client.FieldOwner(t.Name())} - cluster := &v1beta1.PostgresCluster{} - cluster.Namespace = ns.Name - cluster.Name = "pg2" - cluster.Spec.PostgresVersion = 12 - cluster.Spec.InstanceSets = []v1beta1.PostgresInstanceSetSpec{{}} - + cluster := testCluster() + cluster.Namespace = setupNamespace(t, cc).Name assert.NilError(t, cc.Create(ctx, cluster)) t.Run("Unspecified", func(t *testing.T) { @@ -234,10 +315,27 @@ func TestReconcilePGBouncerService(t *testing.T) { // CRD validation looks only at the new/incoming value of fields. Confirm // that each ServiceType can change to any other ServiceType. Forbidding // certain transitions requires a validating webhook. + serviceTypeChangeClusterCounter := 0 for _, beforeType := range serviceTypes { for _, changeType := range serviceTypes { t.Run(beforeType+"To"+changeType, func(t *testing.T) { - cluster := cluster.DeepCopy() + // Creating fresh clusters for these tests + clusterNamespace := cluster.Namespace + cluster := testCluster() + cluster.Namespace = clusterNamespace + + // Note (dsessler): Adding a number to each cluster name to make cluster/service + // names unique to work around an intermittent race condition where a service + // from a prior test has not been deleted yet when the next test runs, causing + // the test to fail due to non-matching IP addresses. + cluster.Name += "-" + strconv.Itoa(serviceTypeChangeClusterCounter) + assert.NilError(t, cc.Create(ctx, cluster)) + + cluster.Spec.Proxy = &v1beta1.PostgresProxySpec{ + PGBouncer: &v1beta1.PGBouncerPodSpec{ + Port: initialize.Int32(19041), + }, + } cluster.Spec.Proxy.PGBouncer.Service = &v1beta1.ServiceSpec{Type: beforeType} before, err := reconciler.reconcilePGBouncerService(ctx, cluster) @@ -247,10 +345,290 @@ func TestReconcilePGBouncerService(t *testing.T) { cluster.Spec.Proxy.PGBouncer.Service.Type = changeType after, err := reconciler.reconcilePGBouncerService(ctx, cluster) - assert.NilError(t, err) + + // LoadBalancers are provisioned by a separate controller that + // updates the Service soon after creation. The API may return + // a conflict error when we race to update it, even though we + // don't send a resourceVersion in our payload. Retry. + if apierrors.IsConflict(err) { + t.Log("conflict:", err) + after, err = reconciler.reconcilePGBouncerService(ctx, cluster) + } + + assert.NilError(t, err, "\n%#v", errors.Unwrap(err)) assert.Equal(t, after.Spec.ClusterIP, before.Spec.ClusterIP, "expected to keep the same ClusterIP") + serviceTypeChangeClusterCounter++ }) } } } + +func TestGeneratePGBouncerDeployment(t *testing.T) { + _, cc := setupKubernetes(t) + require.ParallelCapacity(t, 0) + + ctx := context.Background() + reconciler := &Reconciler{Client: cc} + + cluster := &v1beta1.PostgresCluster{} + cluster.Namespace = "ns3" + cluster.Name = "test-cluster" + + t.Run("Unspecified", func(t *testing.T) { + for _, spec := range []*v1beta1.PostgresProxySpec{ + nil, new(v1beta1.PostgresProxySpec), + } { + cluster := cluster.DeepCopy() + cluster.Spec.Proxy = spec + + deploy, specified, err := reconciler.generatePGBouncerDeployment(ctx, cluster, nil, nil, nil) + assert.NilError(t, err) + assert.Assert(t, !specified) + + assert.Assert(t, cmp.MarshalMatches(deploy.ObjectMeta, ` +creationTimestamp: null +name: test-cluster-pgbouncer +namespace: ns3 + `)) + } + }) + + cluster.Spec.Proxy = &v1beta1.PostgresProxySpec{ + PGBouncer: &v1beta1.PGBouncerPodSpec{}, + } + cluster.Default() + + configmap := &corev1.ConfigMap{} + configmap.Name = "some-cm2" + + secret := &corev1.Secret{} + secret.Name = "some-secret3" + + primary := &corev1.SecretProjection{} + + t.Run("AnnotationsLabels", func(t *testing.T) { + cluster := cluster.DeepCopy() + cluster.Spec.Metadata = &v1beta1.Metadata{ + Annotations: map[string]string{"a": "v1"}, + Labels: map[string]string{"b": "v2"}, + } + + deploy, specified, err := reconciler.generatePGBouncerDeployment( + ctx, cluster, primary, configmap, secret) + assert.NilError(t, err) + assert.Assert(t, specified) + + // Annotations present in the metadata. + assert.DeepEqual(t, deploy.ObjectMeta.Annotations, map[string]string{ + "a": "v1", + }) + + // Labels present in the metadata. + assert.DeepEqual(t, deploy.ObjectMeta.Labels, map[string]string{ + "b": "v2", + "postgres-operator.crunchydata.com/cluster": "test-cluster", + "postgres-operator.crunchydata.com/role": "pgbouncer", + }) + + // Labels not in the pod selector. + assert.DeepEqual(t, deploy.Spec.Selector, + &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "postgres-operator.crunchydata.com/cluster": "test-cluster", + "postgres-operator.crunchydata.com/role": "pgbouncer", + }, + }) + + // Annotations present in the pod template. + assert.DeepEqual(t, deploy.Spec.Template.Annotations, map[string]string{ + "a": "v1", + }) + + // Labels present in the pod template. + assert.DeepEqual(t, deploy.Spec.Template.Labels, map[string]string{ + "b": "v2", + "postgres-operator.crunchydata.com/cluster": "test-cluster", + "postgres-operator.crunchydata.com/role": "pgbouncer", + }) + }) + + t.Run("PodSpec", func(t *testing.T) { + deploy, specified, err := reconciler.generatePGBouncerDeployment( + ctx, cluster, primary, configmap, secret) + assert.NilError(t, err) + assert.Assert(t, specified) + + // Containers and Volumes should be populated. + assert.Assert(t, len(deploy.Spec.Template.Spec.Containers) != 0) + assert.Assert(t, len(deploy.Spec.Template.Spec.Volumes) != 0) + + // Ignore Containers and Volumes in the comparison below. + deploy.Spec.Template.Spec.Containers = nil + deploy.Spec.Template.Spec.Volumes = nil + + // TODO(tjmoore4): Add additional tests to test appending existing + // topology spread constraints and spec.disableDefaultPodScheduling being + // set to true (as done in instance StatefulSet tests). + + assert.Assert(t, cmp.MarshalMatches(deploy.Spec.Template.Spec, ` +automountServiceAccountToken: false +containers: null +enableServiceLinks: false +restartPolicy: Always +securityContext: + fsGroupChangePolicy: OnRootMismatch +shareProcessNamespace: true +topologySpreadConstraints: +- labelSelector: + matchLabels: + postgres-operator.crunchydata.com/cluster: test-cluster + postgres-operator.crunchydata.com/role: pgbouncer + maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway +- labelSelector: + matchLabels: + postgres-operator.crunchydata.com/cluster: test-cluster + postgres-operator.crunchydata.com/role: pgbouncer + maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway + `)) + + t.Run("DisableDefaultPodScheduling", func(t *testing.T) { + cluster := cluster.DeepCopy() + cluster.Spec.DisableDefaultPodScheduling = initialize.Bool(true) + + deploy, specified, err := reconciler.generatePGBouncerDeployment( + ctx, cluster, primary, configmap, secret) + assert.NilError(t, err) + assert.Assert(t, specified) + + assert.Assert(t, deploy.Spec.Template.Spec.TopologySpreadConstraints == nil) + }) + }) +} + +func TestReconcilePGBouncerDisruptionBudget(t *testing.T) { + ctx := context.Background() + _, cc := setupKubernetes(t) + require.ParallelCapacity(t, 0) + + r := &Reconciler{ + Client: cc, + Owner: client.FieldOwner(t.Name()), + } + + foundPDB := func( + cluster *v1beta1.PostgresCluster, + ) bool { + got := &policyv1.PodDisruptionBudget{} + err := r.Client.Get(ctx, + naming.AsObjectKey(naming.ClusterPGBouncer(cluster)), + got) + return !apierrors.IsNotFound(err) + } + + ns := setupNamespace(t, cc) + + t.Run("empty", func(t *testing.T) { + cluster := testCluster() + cluster.Namespace = ns.Name + cluster.Spec.Proxy = nil + + assert.NilError(t, r.reconcilePGBouncerPodDisruptionBudget(ctx, cluster)) + }) + + t.Run("no replicas in spec", func(t *testing.T) { + cluster := testCluster() + cluster.Namespace = ns.Name + cluster.Spec.Proxy.PGBouncer.Replicas = nil + assert.Error(t, r.reconcilePGBouncerPodDisruptionBudget(ctx, cluster), + "Replicas should be defined") + }) + + t.Run("not created", func(t *testing.T) { + cluster := testCluster() + cluster.Namespace = ns.Name + cluster.Spec.Proxy.PGBouncer.Replicas = initialize.Int32(1) + cluster.Spec.Proxy.PGBouncer.MinAvailable = initialize.Pointer(intstr.FromInt32(0)) + assert.NilError(t, r.reconcilePGBouncerPodDisruptionBudget(ctx, cluster)) + assert.Assert(t, !foundPDB(cluster)) + }) + + t.Run("int created", func(t *testing.T) { + cluster := testCluster() + cluster.Namespace = ns.Name + cluster.Spec.Proxy.PGBouncer.Replicas = initialize.Int32(1) + cluster.Spec.Proxy.PGBouncer.MinAvailable = initialize.Pointer(intstr.FromInt32(1)) + + assert.NilError(t, r.Client.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + + assert.NilError(t, r.reconcilePGBouncerPodDisruptionBudget(ctx, cluster)) + assert.Assert(t, foundPDB(cluster)) + + t.Run("deleted", func(t *testing.T) { + cluster.Spec.Proxy.PGBouncer.MinAvailable = initialize.Pointer(intstr.FromInt32(0)) + err := r.reconcilePGBouncerPodDisruptionBudget(ctx, cluster) + if apierrors.IsConflict(err) { + // When running in an existing environment another controller will sometimes update + // the object. This leads to an error where the ResourceVersion of the object does + // not match what we expect. When we run into this conflict, try to reconcile the + // object again. + err = r.reconcilePGBouncerPodDisruptionBudget(ctx, cluster) + } + assert.NilError(t, err, errors.Unwrap(err)) + assert.Assert(t, !foundPDB(cluster)) + }) + }) + + t.Run("str created", func(t *testing.T) { + cluster := testCluster() + cluster.Namespace = ns.Name + cluster.Spec.Proxy.PGBouncer.Replicas = initialize.Int32(1) + cluster.Spec.Proxy.PGBouncer.MinAvailable = initialize.Pointer(intstr.FromString("50%")) + + assert.NilError(t, r.Client.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + + assert.NilError(t, r.reconcilePGBouncerPodDisruptionBudget(ctx, cluster)) + assert.Assert(t, foundPDB(cluster)) + + t.Run("deleted", func(t *testing.T) { + cluster.Spec.Proxy.PGBouncer.MinAvailable = initialize.Pointer(intstr.FromString("0%")) + err := r.reconcilePGBouncerPodDisruptionBudget(ctx, cluster) + if apierrors.IsConflict(err) { + // When running in an existing environment another controller will sometimes update + // the object. This leads to an error where the ResourceVersion of the object does + // not match what we expect. When we run into this conflict, try to reconcile the + // object again. + err = r.reconcilePGBouncerPodDisruptionBudget(ctx, cluster) + } + assert.NilError(t, err, errors.Unwrap(err)) + assert.Assert(t, !foundPDB(cluster)) + }) + + t.Run("delete with 00%", func(t *testing.T) { + cluster.Spec.Proxy.PGBouncer.MinAvailable = initialize.Pointer(intstr.FromString("50%")) + + assert.NilError(t, r.reconcilePGBouncerPodDisruptionBudget(ctx, cluster)) + assert.Assert(t, foundPDB(cluster)) + + t.Run("deleted", func(t *testing.T) { + cluster.Spec.Proxy.PGBouncer.MinAvailable = initialize.Pointer(intstr.FromString("00%")) + err := r.reconcilePGBouncerPodDisruptionBudget(ctx, cluster) + if apierrors.IsConflict(err) { + // When running in an existing environment another controller will sometimes update + // the object. This leads to an error where the ResourceVersion of the object does + // not match what we expect. When we run into this conflict, try to reconcile the + // object again. + err = r.reconcilePGBouncerPodDisruptionBudget(ctx, cluster) + } + assert.NilError(t, err, errors.Unwrap(err)) + assert.Assert(t, !foundPDB(cluster)) + }) + }) + }) +} diff --git a/internal/controller/postgrescluster/pgmonitor.go b/internal/controller/postgrescluster/pgmonitor.go index 6bdeb1dc7b..e1b5186cb4 100644 --- a/internal/controller/postgrescluster/pgmonitor.go +++ b/internal/controller/postgrescluster/pgmonitor.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster @@ -19,8 +8,15 @@ import ( "context" "fmt" "io" + "os" + "strings" + + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" "github.com/crunchydata/postgres-operator/internal/config" + "github.com/crunchydata/postgres-operator/internal/feature" "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/logging" "github.com/crunchydata/postgres-operator/internal/naming" @@ -29,31 +25,6 @@ import ( pgpassword "github.com/crunchydata/postgres-operator/internal/postgres/password" "github.com/crunchydata/postgres-operator/internal/util" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" - "github.com/pkg/errors" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -const ( - exporterPort = int32(9187) - - // TODO: With the current implementation of the crunchy-postgres-exporter - // it makes sense to hard-code the database. When moving away from the - // crunchy-postgres-exporter start.sh script we should re-evaluate always - // setting the exporter database to `postgres`. - exporterDB = "postgres" - - // The exporter connects to all databases over loopback using a password. - // Kubernetes guarantees localhost resolves to loopback: - // https://kubernetes.io/docs/concepts/cluster-administration/networking/ - // https://releases.k8s.io/v1.21.0/pkg/kubelet/kubelet_pods.go#L343 - exporterHost = "localhost" -) - -var ( - oneMillicore = resource.MustParse("1m") - oneMebibyte = resource.MustParse("1Mi") ) // If pgMonitor is enabled the pgMonitor sidecar(s) have been added to the @@ -69,12 +40,11 @@ func (r *Reconciler) reconcilePGMonitor(ctx context.Context, } // reconcilePGMonitorExporter performs setup the postgres_exporter sidecar -// - PodExec to get setup.sql file for the postgres version // - PodExec to run the sql in the primary database // Status.Monitoring.ExporterConfiguration is used to determine when the // pgMonitor postgres_exporter configuration should be added/changed to // limit how often PodExec is used -// - TODO jmckulk: kube perms comment? +// - TODO (jmckulk): kube perms comment? func (r *Reconciler) reconcilePGMonitorExporter(ctx context.Context, cluster *v1beta1.PostgresCluster, instances *observedInstances, monitoringSecret *corev1.Secret) error { @@ -83,45 +53,44 @@ func (r *Reconciler) reconcilePGMonitorExporter(ctx context.Context, writableInstance *Instance writablePod *corev1.Pod setup string + pgImageSHA string ) // Find the PostgreSQL instance that can execute SQL that writes to every // database. When there is none, return early. - - for _, instance := range instances.forCluster { - if terminating, known := instance.IsTerminating(); terminating || !known { - continue - } - if writable, known := instance.IsWritable(); !writable || !known { - continue - } - - running, known := instance.IsRunning(naming.ContainerDatabase) - if running && known && len(instance.Pods) > 0 { - writableInstance = instance - } - } - - if writableInstance == nil { + writablePod, writableInstance = instances.writablePod(naming.ContainerDatabase) + if writableInstance == nil || writablePod == nil { return nil } - writablePod = writableInstance.Pods[0] - + // For the writableInstance found above + // 1) get and save the imageIDs for `database` container, and + // 2) exit early if we can't get the ImageID of this container. + // We use this ImageID and the setup.sql file in the hash we make to see if the operator needs to rerun + // the `EnableExporterInPostgreSQL` funcs; that way we are always running + // that function against an updated and running pod. if pgmonitor.ExporterEnabled(cluster) { - running, known := writableInstance.IsRunning(naming.ContainerPGMonitorExporter) - if !running || !known { - // Exporter container needs to be available to get setup.sql; - return nil + sql, err := os.ReadFile(fmt.Sprintf("%s/pg%d/setup.sql", pgmonitor.GetQueriesConfigDir(ctx), cluster.Spec.PostgresVersion)) + if err != nil { + return err } + // TODO: Revisit how pgbackrest_info.sh is used with pgMonitor. + // pgMonitor queries expect a path to a script that runs pgBackRest + // info and provides json output. In the queries yaml for pgBackRest + // the default path is `/usr/bin/pgbackrest-info.sh`. We update + // the path to point to the script in our database image. + setup = strings.ReplaceAll(string(sql), "/usr/bin/pgbackrest-info.sh", + "/opt/crunchy/bin/postgres/pgbackrest_info.sh") + for _, containerStatus := range writablePod.Status.ContainerStatuses { - if containerStatus.Name == naming.ContainerPGMonitorExporter { - setup = containerStatus.ImageID + if containerStatus.Name == naming.ContainerDatabase { + pgImageSHA = containerStatus.ImageID } } - if setup == "" { - // Could not get exporter container imageID + + // Could not get container imageID + if pgImageSHA == "" { return nil } } @@ -130,7 +99,7 @@ func (r *Reconciler) reconcilePGMonitorExporter(ctx context.Context, // pgMonitor objects. action := func(ctx context.Context, exec postgres.Executor) error { - return pgmonitor.EnableExporterInPostgreSQL(ctx, exec, monitoringSecret, exporterDB, setup) + return pgmonitor.EnableExporterInPostgreSQL(ctx, exec, monitoringSecret, pgmonitor.ExporterDB, setup) } if !pgmonitor.ExporterEnabled(cluster) { @@ -147,11 +116,13 @@ func (r *Reconciler) reconcilePGMonitorExporter(ctx context.Context, ) error { _, err := io.Copy(hasher, stdin) if err == nil { - _, err = fmt.Fprint(hasher, command) + // Use command and image tag in hash to execute hash on image update + _, err = fmt.Fprint(hasher, command, pgImageSHA, setup) } return err }) }) + if err != nil { return err } @@ -161,19 +132,11 @@ func (r *Reconciler) reconcilePGMonitorExporter(ctx context.Context, // Include the revision hash in any log messages. ctx := logging.NewContext(ctx, logging.FromContext(ctx).WithValues("revision", revision)) - if pgmonitor.ExporterEnabled(cluster) { - exec := func(_ context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string) error { - return r.PodExec(writablePod.Namespace, writablePod.Name, naming.ContainerPGMonitorExporter, stdin, stdout, stderr, command...) - } - setup, _, err = pgmonitor.Executor(exec).GetExporterSetupSQL(ctx, cluster.Spec.PostgresVersion) - } - // Apply the necessary SQL and record its hash in cluster.Status - if err == nil { - err = action(ctx, func(_ context.Context, stdin io.Reader, + err = action(ctx, func(ctx context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string) error { - return r.PodExec(writablePod.Namespace, writablePod.Name, naming.ContainerDatabase, stdin, stdout, stderr, command...) + return r.PodExec(ctx, writablePod.Namespace, writablePod.Name, naming.ContainerDatabase, stdin, stdout, stderr, command...) }) } if err == nil { @@ -224,25 +187,33 @@ func (r *Reconciler) reconcileMonitoringSecret( intent.Data = make(map[string][]byte) - if len(existing.Data["password"]) == 0 || len(existing.Data["verifier"]) == 0 { - password, err := util.GeneratePassword(util.DefaultGeneratedPasswordLength) + // Copy existing password and verifier into the intent + if existing.Data != nil { + intent.Data["password"] = existing.Data["password"] + intent.Data["verifier"] = existing.Data["verifier"] + } + + // When password is unset, generate a new one + if len(intent.Data["password"]) == 0 { + password, err := util.GenerateASCIIPassword(util.DefaultGeneratedPasswordLength) if err != nil { return nil, err } + intent.Data["password"] = []byte(password) + // We generated a new password, unset the verifier so that it is regenerated + intent.Data["verifier"] = nil + } - // Generate the SCRAM verifier now and store alongside the plaintext - // password so that later reconciles don't generate it repeatedly. - // NOTE(cbandy): We don't have a function to compare a plaintext password - // to a SCRAM verifier. - verifier, err := pgpassword.NewSCRAMPassword(password).Build() + // When a password has been generated or the verifier is empty, + // generate a verifier based on the current password. + // NOTE(cbandy): We don't have a function to compare a plaintext + // password to a SCRAM verifier. + if len(intent.Data["verifier"]) == 0 { + verifier, err := pgpassword.NewSCRAMPassword(string(intent.Data["password"])).Build() if err != nil { - return nil, err + return nil, errors.WithStack(err) } - intent.Data["password"] = []byte(password) intent.Data["verifier"] = []byte(verifier) - } else { - intent.Data["password"] = existing.Data["password"] - intent.Data["verifier"] = existing.Data["verifier"] } err = errors.WithStack(r.setControllerReference(cluster, intent)) @@ -259,74 +230,94 @@ func (r *Reconciler) reconcileMonitoringSecret( // addPGMonitorToInstancePodSpec performs the necessary setup to add // pgMonitor resources on a PodTemplateSpec func addPGMonitorToInstancePodSpec( + ctx context.Context, cluster *v1beta1.PostgresCluster, - template *corev1.PodTemplateSpec) error { + template *corev1.PodTemplateSpec, + exporterQueriesConfig, exporterWebConfig *corev1.ConfigMap) error { - err := addPGMonitorExporterToInstancePodSpec(cluster, template) + err := addPGMonitorExporterToInstancePodSpec(ctx, cluster, template, exporterQueriesConfig, exporterWebConfig) return err } // addPGMonitorExporterToInstancePodSpec performs the necessary setup to // add pgMonitor exporter resources to a PodTemplateSpec -// TODO jmckulk: refactor to pass around monitoring secret; Without the secret +// TODO (jmckulk): refactor to pass around monitoring secret; Without the secret // the exporter container cannot be created; Testing relies on ensuring the // monitoring secret is available func addPGMonitorExporterToInstancePodSpec( + ctx context.Context, cluster *v1beta1.PostgresCluster, - template *corev1.PodTemplateSpec) error { + template *corev1.PodTemplateSpec, + exporterQueriesConfig, exporterWebConfig *corev1.ConfigMap) error { if !pgmonitor.ExporterEnabled(cluster) { return nil } + certSecret := cluster.Spec.Monitoring.PGMonitor.Exporter.CustomTLSSecret + withBuiltInCollectors := + !strings.EqualFold(cluster.Annotations[naming.PostgresExporterCollectorsAnnotation], "None") + + var cmd []string + // PG 17 does not include some of the columns found in stat_bgwriter with older PGs. + // Selectively turn off the collector for stat_bgwriter in PG 17, unless the user + // requests all collectors to be turned off. + switch { + case cluster.Spec.PostgresVersion == 17 && withBuiltInCollectors && certSecret == nil: + cmd = pgmonitor.ExporterStartCommand(withBuiltInCollectors, + pgmonitor.ExporterDeactivateStatBGWriterFlag) + case cluster.Spec.PostgresVersion == 17 && withBuiltInCollectors && certSecret != nil: + cmd = pgmonitor.ExporterStartCommand(withBuiltInCollectors, + pgmonitor.ExporterWebConfigFileFlag, + pgmonitor.ExporterDeactivateStatBGWriterFlag) + // If you're turning off all built-in collectors, we don't care which + // version of PG you're using. + case certSecret != nil: + cmd = pgmonitor.ExporterStartCommand(withBuiltInCollectors, + pgmonitor.ExporterWebConfigFileFlag) + default: + cmd = pgmonitor.ExporterStartCommand(withBuiltInCollectors) + } + securityContext := initialize.RestrictedSecurityContext() exporterContainer := corev1.Container{ - Name: naming.ContainerPGMonitorExporter, - Image: config.PGExporterContainerImage(cluster), - Resources: cluster.Spec.Monitoring.PGMonitor.Exporter.Resources, - Command: []string{ - "/opt/cpm/bin/start.sh", - }, + Name: naming.ContainerPGMonitorExporter, + Image: config.PGExporterContainerImage(cluster), + ImagePullPolicy: cluster.Spec.ImagePullPolicy, + Resources: cluster.Spec.Monitoring.PGMonitor.Exporter.Resources, + Command: cmd, Env: []corev1.EnvVar{ - {Name: "CONFIG_DIR", Value: "/opt/cpm/conf"}, - {Name: "POSTGRES_EXPORTER_PORT", Value: fmt.Sprint(exporterPort)}, - {Name: "PGBACKREST_INFO_THROTTLE_MINUTES", Value: "10"}, - {Name: "PG_STAT_STATEMENTS_LIMIT", Value: "20"}, - {Name: "PG_STAT_STATEMENTS_THROTTLE_MINUTES", Value: "-1"}, - {Name: "EXPORTER_PG_HOST", Value: exporterHost}, - {Name: "EXPORTER_PG_PORT", Value: fmt.Sprint(*cluster.Spec.Port)}, - {Name: "EXPORTER_PG_DATABASE", Value: exporterDB}, - {Name: "EXPORTER_PG_USER", Value: pgmonitor.MonitoringUser}, - {Name: "EXPORTER_PG_PASSWORD", ValueFrom: &corev1.EnvVarSource{ - // Environment variables are not updated after a secret update. - // This could lead to a state where the exporter does not have - // the correct password and the container needs to restart. - // https://kubernetes.io/docs/concepts/configuration/secret/#environment-variables-are-not-updated-after-a-secret-update - // https://github.com/kubernetes/kubernetes/issues/29761 - SecretKeyRef: &corev1.SecretKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: naming.MonitoringUserSecret(cluster).Name, - }, - Key: "password", - }, - }}, + {Name: "DATA_SOURCE_URI", Value: fmt.Sprintf("%s:%d/%s", pgmonitor.ExporterHost, *cluster.Spec.Port, pgmonitor.ExporterDB)}, + {Name: "DATA_SOURCE_USER", Value: pgmonitor.MonitoringUser}, + {Name: "DATA_SOURCE_PASS_FILE", Value: "/opt/crunchy/password"}, }, SecurityContext: securityContext, // ContainerPort is needed to support proper target discovery by Prometheus for pgMonitor // integration Ports: []corev1.ContainerPort{{ - ContainerPort: exporterPort, + ContainerPort: pgmonitor.ExporterPort, Name: naming.PortExporter, Protocol: corev1.ProtocolTCP, }}, VolumeMounts: []corev1.VolumeMount{{ Name: "exporter-config", - // this is the path for custom config as defined in the start.sh script for the exporter container + // this is the path for both custom and default queries files MountPath: "/conf", + }, { + Name: "monitoring-secret", + MountPath: "/opt/crunchy/", }}, } - template.Spec.Containers = append(template.Spec.Containers, exporterContainer) + + passwordVolume := corev1.Volume{ + Name: "monitoring-secret", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: naming.MonitoringUserSecret(cluster).Name, + }, + }, + } // add custom exporter config volume configVolume := corev1.Volume{ @@ -337,86 +328,59 @@ func addPGMonitorExporterToInstancePodSpec( }, }, } - template.Spec.Volumes = append(template.Spec.Volumes, configVolume) - - podInfoVolume := corev1.Volume{ - Name: "podinfo", - VolumeSource: corev1.VolumeSource{ - DownwardAPI: &corev1.DownwardAPIVolumeSource{ - // The paths defined in Items (cpu_limit, cpu_request, etc.) - // are hard coded in the pgnodemx queries defined by - // pgMonitor configuration (queries_nodemx.yml) - // https://github.com/CrunchyData/pgmonitor/blob/master/exporter/postgres/queries_nodemx.yml - Items: []corev1.DownwardAPIVolumeFile{{ - Path: "cpu_limit", - ResourceFieldRef: &corev1.ResourceFieldSelector{ - ContainerName: naming.ContainerDatabase, - Resource: "limits.cpu", - Divisor: oneMillicore, - }, - }, { - Path: "cpu_request", - ResourceFieldRef: &corev1.ResourceFieldSelector{ - ContainerName: naming.ContainerDatabase, - Resource: "requests.cpu", - Divisor: oneMillicore, - }, - }, { - Path: "mem_limit", - ResourceFieldRef: &corev1.ResourceFieldSelector{ - ContainerName: naming.ContainerDatabase, - Resource: "limits.memory", - Divisor: oneMebibyte, - }, - }, { - Path: "mem_request", - ResourceFieldRef: &corev1.ResourceFieldSelector{ - ContainerName: naming.ContainerDatabase, - Resource: "requests.memory", - Divisor: oneMebibyte, - }, - }, { - Path: "labels", - FieldRef: &corev1.ObjectFieldSelector{ - APIVersion: corev1.SchemeGroupVersion.Version, - FieldPath: "metadata.labels", - }, - }, { - Path: "annotations", - FieldRef: &corev1.ObjectFieldSelector{ - APIVersion: corev1.SchemeGroupVersion.Version, - FieldPath: "metadata.annotations", - }, - }}, + template.Spec.Volumes = append(template.Spec.Volumes, configVolume, passwordVolume) + + // The original "custom queries" ability allowed users to provide a file with custom queries; + // however, it would turn off the default queries. The new "custom queries" ability allows + // users to append custom queries to the default queries. This new behavior is feature gated. + // Therefore, we only want to add the default queries ConfigMap as a source for the + // "exporter-config" volume if the AppendCustomQueries feature gate is turned on OR if the + // user has not provided any custom configuration. + if feature.Enabled(ctx, feature.AppendCustomQueries) || + cluster.Spec.Monitoring.PGMonitor.Exporter.Configuration == nil { + + defaultConfigVolumeProjection := corev1.VolumeProjection{ + ConfigMap: &corev1.ConfigMapProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: exporterQueriesConfig.Name, + }, }, - }, - } - template.Spec.Volumes = append(template.Spec.Volumes, podInfoVolume) - - var index int - found := false - for i, c := range template.Spec.Containers { - if c.Name == naming.ContainerDatabase { - index = i - found = true - break } + configVolume.VolumeSource.Projected.Sources = append(configVolume.VolumeSource.Projected.Sources, + defaultConfigVolumeProjection) } - if !found { - return errors.New("could not find database container to mount downstream api") - } - volumeMount := corev1.VolumeMount{ - Name: "podinfo", - // This is the default value for `pgnodemx.kdapi_path` PostgreSQL - // parameter - // https://github.com/CrunchyData/pgnodemx#kubernetes-downwardapi-related-functions - // https://github.com/CrunchyData/pgnodemx#configuration - MountPath: "/etc/podinfo", + if certSecret != nil { + // TODO (jmckulk): params for paths and such + certVolume := corev1.Volume{Name: "exporter-certs"} + certVolume.Projected = &corev1.ProjectedVolumeSource{ + Sources: append([]corev1.VolumeProjection{}, + corev1.VolumeProjection{ + Secret: certSecret, + }, + ), + } + + webConfigVolume := corev1.Volume{Name: "web-config"} + webConfigVolume.ConfigMap = &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: exporterWebConfig.Name, + }, + } + template.Spec.Volumes = append(template.Spec.Volumes, certVolume, webConfigVolume) + + mounts := []corev1.VolumeMount{{ + Name: "exporter-certs", + MountPath: "/certs", + }, { + Name: "web-config", + MountPath: "/web-config", + }} + + exporterContainer.VolumeMounts = append(exporterContainer.VolumeMounts, mounts...) } - template.Spec.Containers[index].VolumeMounts = append( - template.Spec.Containers[index].VolumeMounts, - volumeMount) + + template.Spec.Containers = append(template.Spec.Containers, exporterContainer) // add the proper label to support Pod discovery by Prometheus per pgMonitor configuration initialize.Labels(template) @@ -424,3 +388,107 @@ func addPGMonitorExporterToInstancePodSpec( return nil } + +// reconcileExporterWebConfig reconciles the configmap containing the webconfig for exporter tls +func (r *Reconciler) reconcileExporterWebConfig(ctx context.Context, + cluster *v1beta1.PostgresCluster) (*corev1.ConfigMap, error) { + + existing := &corev1.ConfigMap{ObjectMeta: naming.ExporterWebConfigMap(cluster)} + err := errors.WithStack(r.Client.Get(ctx, client.ObjectKeyFromObject(existing), existing)) + if client.IgnoreNotFound(err) != nil { + return nil, err + } + + if !pgmonitor.ExporterEnabled(cluster) || cluster.Spec.Monitoring.PGMonitor.Exporter.CustomTLSSecret == nil { + // We could still have a NotFound error here so check the err. + // If no error that means the configmap is found and needs to be deleted + if err == nil { + err = errors.WithStack(r.deleteControlled(ctx, cluster, existing)) + } + return nil, client.IgnoreNotFound(err) + } + + intent := &corev1.ConfigMap{ + ObjectMeta: naming.ExporterWebConfigMap(cluster), + Data: map[string]string{ + "web-config.yml": ` +# Generated by postgres-operator. DO NOT EDIT. +# Your changes will not be saved. + + +# A certificate and a key file are needed to enable TLS. +tls_server_config: + cert_file: /certs/tls.crt + key_file: /certs/tls.key`, + }, + } + + intent.Annotations = naming.Merge( + cluster.Spec.Metadata.GetAnnotationsOrNil(), + ) + intent.Labels = naming.Merge( + cluster.Spec.Metadata.GetLabelsOrNil(), + map[string]string{ + naming.LabelCluster: cluster.Name, + naming.LabelRole: naming.RoleMonitoring, + }) + + intent.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("ConfigMap")) + + err = errors.WithStack(r.setControllerReference(cluster, intent)) + if err == nil { + err = errors.WithStack(r.apply(ctx, intent)) + } + if err == nil { + return intent, nil + } + + return nil, err +} + +// reconcileExporterQueriesConfig reconciles the configmap containing the default queries for exporter +func (r *Reconciler) reconcileExporterQueriesConfig(ctx context.Context, + cluster *v1beta1.PostgresCluster) (*corev1.ConfigMap, error) { + + existing := &corev1.ConfigMap{ObjectMeta: naming.ExporterQueriesConfigMap(cluster)} + err := errors.WithStack(r.Client.Get(ctx, client.ObjectKeyFromObject(existing), existing)) + if client.IgnoreNotFound(err) != nil { + return nil, err + } + + if !pgmonitor.ExporterEnabled(cluster) { + // We could still have a NotFound error here so check the err. + // If no error that means the configmap is found and needs to be deleted + if err == nil { + err = errors.WithStack(r.deleteControlled(ctx, cluster, existing)) + } + return nil, client.IgnoreNotFound(err) + } + + intent := &corev1.ConfigMap{ + ObjectMeta: naming.ExporterQueriesConfigMap(cluster), + Data: map[string]string{"defaultQueries.yml": pgmonitor.GenerateDefaultExporterQueries(ctx, cluster)}, + } + + intent.Annotations = naming.Merge( + cluster.Spec.Metadata.GetAnnotationsOrNil(), + ) + intent.Labels = naming.Merge( + cluster.Spec.Metadata.GetLabelsOrNil(), + map[string]string{ + naming.LabelCluster: cluster.Name, + naming.LabelRole: naming.RoleMonitoring, + }) + + intent.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("ConfigMap")) + + err = errors.WithStack(r.setControllerReference(cluster, intent)) + if err == nil { + err = errors.WithStack(r.apply(ctx, intent)) + } + if err == nil { + return intent, nil + } + + return nil, err +} diff --git a/internal/controller/postgrescluster/pgmonitor_test.go b/internal/controller/postgrescluster/pgmonitor_test.go index 056e1098f0..8d8c8281d0 100644 --- a/internal/controller/postgrescluster/pgmonitor_test.go +++ b/internal/controller/postgrescluster/pgmonitor_test.go @@ -1,69 +1,107 @@ -// +build envtest - -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster import ( "bytes" "context" - "fmt" "io" "os" "strings" "testing" - "github.com/crunchydata/postgres-operator/internal/initialize" - "github.com/crunchydata/postgres-operator/internal/naming" - "github.com/crunchydata/postgres-operator/internal/pgmonitor" - "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" - "go.opentelemetry.io/otel" "gotest.tools/v3/assert" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/manager" + + "github.com/crunchydata/postgres-operator/internal/feature" + "github.com/crunchydata/postgres-operator/internal/initialize" + "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/internal/testing/cmp" + "github.com/crunchydata/postgres-operator/internal/testing/require" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) +func testExporterCollectorsAnnotation(t *testing.T, ctx context.Context, cluster *v1beta1.PostgresCluster, queriesConfig, webConfig *corev1.ConfigMap) { + t.Helper() + + t.Run("ExporterCollectorsAnnotation", func(t *testing.T) { + t.Run("UnexpectedValue", func(t *testing.T) { + template := new(corev1.PodTemplateSpec) + cluster := cluster.DeepCopy() + cluster.SetAnnotations(map[string]string{ + naming.PostgresExporterCollectorsAnnotation: "wrong-value", + }) + + assert.NilError(t, addPGMonitorExporterToInstancePodSpec(ctx, cluster, template, queriesConfig, webConfig)) + + assert.Equal(t, len(template.Spec.Containers), 1) + container := template.Spec.Containers[0] + + command := strings.Join(container.Command, "\n") + assert.Assert(t, cmp.Contains(command, "postgres_exporter")) + assert.Assert(t, !strings.Contains(command, "collector")) + }) + + t.Run("ExpectedValueNone", func(t *testing.T) { + template := new(corev1.PodTemplateSpec) + cluster := cluster.DeepCopy() + cluster.SetAnnotations(map[string]string{ + naming.PostgresExporterCollectorsAnnotation: "None", + }) + + assert.NilError(t, addPGMonitorExporterToInstancePodSpec(ctx, cluster, template, queriesConfig, webConfig)) + + assert.Equal(t, len(template.Spec.Containers), 1) + container := template.Spec.Containers[0] + + command := strings.Join(container.Command, "\n") + assert.Assert(t, cmp.Contains(command, "postgres_exporter")) + assert.Assert(t, cmp.Contains(command, "--[no-]collector")) + + t.Run("LowercaseToo", func(t *testing.T) { + template := new(corev1.PodTemplateSpec) + cluster.SetAnnotations(map[string]string{ + naming.PostgresExporterCollectorsAnnotation: "none", + }) + + assert.NilError(t, addPGMonitorExporterToInstancePodSpec(ctx, cluster, template, queriesConfig, webConfig)) + assert.Assert(t, cmp.Contains(strings.Join(template.Spec.Containers[0].Command, "\n"), "--[no-]collector")) + }) + }) + }) +} + func TestAddPGMonitorExporterToInstancePodSpec(t *testing.T) { - cluster := &v1beta1.PostgresCluster{} - cluster.Spec.Port = initialize.Int32(5432) + t.Parallel() + ctx := context.Background() image := "test/image:tag" - resources := corev1.ResourceRequirements{} - getContainerWithName := func(containers []corev1.Container, name string) corev1.Container { - for _, container := range containers { - if container.Name == name { - return container - } - } - return corev1.Container{} + cluster := &v1beta1.PostgresCluster{} + cluster.Name = "pg1" + cluster.Spec.Port = initialize.Int32(5432) + cluster.Spec.ImagePullPolicy = corev1.PullAlways + + resources := corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + }, } + exporterQueriesConfig := new(corev1.ConfigMap) + exporterQueriesConfig.Name = "query-conf" + t.Run("ExporterDisabled", func(t *testing.T) { template := &corev1.PodTemplateSpec{} - assert.NilError(t, addPGMonitorExporterToInstancePodSpec(cluster, template)) - assert.DeepEqual(t, getContainerWithName(template.Spec.Containers, - naming.ContainerPGMonitorExporter), corev1.Container{}) - assert.Equal(t, len(template.Spec.Volumes), 0) - + assert.NilError(t, addPGMonitorExporterToInstancePodSpec(ctx, cluster, template, nil, nil)) + assert.DeepEqual(t, template, &corev1.PodTemplateSpec{}) }) t.Run("ExporterEnabled", func(t *testing.T) { @@ -82,60 +120,119 @@ func TestAddPGMonitorExporterToInstancePodSpec(t *testing.T) { }}, }, } - assert.NilError(t, addPGMonitorExporterToInstancePodSpec(cluster, template)) - container := getContainerWithName(template.Spec.Containers, naming.ContainerPGMonitorExporter) - assert.Equal(t, container.Image, image) - assert.DeepEqual(t, container.Resources, resources) - assert.DeepEqual(t, container.Command, []string{"/opt/cpm/bin/start.sh"}) - assert.Equal(t, *container.SecurityContext.Privileged, false) - assert.Equal(t, *container.SecurityContext.ReadOnlyRootFilesystem, true) - assert.Equal(t, *container.SecurityContext.AllowPrivilegeEscalation, false) - - expectedENV := []corev1.EnvVar{ - {Name: "CONFIG_DIR", Value: "/opt/cpm/conf"}, - {Name: "POSTGRES_EXPORTER_PORT", Value: "9187"}, - {Name: "PGBACKREST_INFO_THROTTLE_MINUTES", Value: "10"}, - {Name: "PG_STAT_STATEMENTS_LIMIT", Value: "20"}, - {Name: "PG_STAT_STATEMENTS_THROTTLE_MINUTES", Value: "-1"}, - {Name: "EXPORTER_PG_HOST", Value: "localhost"}, - {Name: "EXPORTER_PG_PORT", Value: fmt.Sprint(*cluster.Spec.Port)}, - {Name: "EXPORTER_PG_DATABASE", Value: "postgres"}, - {Name: "EXPORTER_PG_USER", Value: pgmonitor.MonitoringUser}, - {Name: "EXPORTER_PG_PASSWORD", ValueFrom: &corev1.EnvVarSource{ - SecretKeyRef: &corev1.SecretKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: naming.MonitoringUserSecret(cluster).Name, + + assert.NilError(t, addPGMonitorExporterToInstancePodSpec(ctx, cluster, template, exporterQueriesConfig, nil)) + + assert.Equal(t, len(template.Spec.Containers), 2) + container := template.Spec.Containers[1] + + command := strings.Join(container.Command, "\n") + assert.Assert(t, cmp.Contains(command, "postgres_exporter")) + assert.Assert(t, cmp.Contains(command, "--extend.query-path")) + assert.Assert(t, cmp.Contains(command, "--web.listen-address")) + + // Exclude command from the following comparison. + container.Command = nil + assert.Assert(t, cmp.MarshalMatches(container, ` +env: +- name: DATA_SOURCE_URI + value: localhost:5432/postgres +- name: DATA_SOURCE_USER + value: ccp_monitoring +- name: DATA_SOURCE_PASS_FILE + value: /opt/crunchy/password +image: test/image:tag +imagePullPolicy: Always +name: exporter +ports: +- containerPort: 9187 + name: exporter + protocol: TCP +resources: + requests: + cpu: 100m +securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault +volumeMounts: +- mountPath: /conf + name: exporter-config +- mountPath: /opt/crunchy/ + name: monitoring-secret + `)) + + assert.Assert(t, cmp.MarshalMatches(template.Spec.Volumes, ` +- name: exporter-config + projected: + sources: + - configMap: + name: query-conf +- name: monitoring-secret + secret: + secretName: pg1-monitoring + `)) + + testExporterCollectorsAnnotation(t, ctx, cluster, exporterQueriesConfig, nil) + }) + + t.Run("CustomConfigAppendCustomQueriesOff", func(t *testing.T) { + cluster.Spec.Monitoring = &v1beta1.MonitoringSpec{ + PGMonitor: &v1beta1.PGMonitorSpec{ + Exporter: &v1beta1.ExporterSpec{ + Image: image, + Resources: resources, + Configuration: []corev1.VolumeProjection{{ConfigMap: &corev1.ConfigMapProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "exporter-custom-config-test", + }, + }}, }, - Key: "password", }, - }}} - assert.DeepEqual(t, container.Env, expectedENV) - - assert.Assert(t, container.Ports[0].ContainerPort == int32(9187)) - assert.Assert(t, container.Ports[0].Name == "exporter") - assert.Assert(t, container.Ports[0].Protocol == "TCP") - - assert.Assert(t, template.Spec.Volumes != nil) - found := false - for _, v := range template.Spec.Volumes { - if v.Name == "podinfo" { - found = true - } + }, } - assert.Assert(t, found) - - dbContainer := getContainerWithName(template.Spec.Containers, naming.ContainerDatabase) - assert.Assert(t, dbContainer.VolumeMounts != nil) - found = false - for _, vm := range dbContainer.VolumeMounts { - if vm.Name == "podinfo" { - found = true - } + template := &corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: naming.ContainerDatabase, + }}, + }, } - assert.Assert(t, found) + + assert.NilError(t, addPGMonitorExporterToInstancePodSpec(ctx, cluster, template, exporterQueriesConfig, nil)) + + assert.Equal(t, len(template.Spec.Containers), 2) + container := template.Spec.Containers[1] + + assert.Assert(t, len(template.Spec.Volumes) > 0) + assert.Assert(t, cmp.MarshalMatches(template.Spec.Volumes[0], ` +name: exporter-config +projected: + sources: + - configMap: + name: exporter-custom-config-test + `)) + + assert.Assert(t, len(container.VolumeMounts) > 0) + assert.Assert(t, cmp.MarshalMatches(container.VolumeMounts[0], ` +mountPath: /conf +name: exporter-config + `)) }) - t.Run("CustomConfig", func(t *testing.T) { + t.Run("CustomConfigAppendCustomQueriesOn", func(t *testing.T) { + gate := feature.NewGate() + assert.NilError(t, gate.SetFromMap(map[string]bool{ + feature.AppendCustomQueries: true, + })) + ctx := feature.NewContext(ctx, gate) + cluster.Spec.Monitoring = &v1beta1.MonitoringSpec{ PGMonitor: &v1beta1.PGMonitorSpec{ Exporter: &v1beta1.ExporterSpec{ @@ -158,38 +255,93 @@ func TestAddPGMonitorExporterToInstancePodSpec(t *testing.T) { }, } - assert.NilError(t, addPGMonitorExporterToInstancePodSpec(cluster, template)) + assert.NilError(t, addPGMonitorExporterToInstancePodSpec(ctx, cluster, template, exporterQueriesConfig, nil)) + + assert.Equal(t, len(template.Spec.Containers), 2) + container := template.Spec.Containers[1] + + assert.Assert(t, len(template.Spec.Volumes) > 0) + assert.Assert(t, cmp.MarshalMatches(template.Spec.Volumes[0], ` +name: exporter-config +projected: + sources: + - configMap: + name: exporter-custom-config-test + - configMap: + name: query-conf + `)) + + assert.Assert(t, len(container.VolumeMounts) > 0) + assert.Assert(t, cmp.MarshalMatches(container.VolumeMounts[0], ` +mountPath: /conf +name: exporter-config + `)) + }) - var foundConfigVolume bool - for _, v := range template.Spec.Volumes { - if v.Name == "exporter-config" { - assert.DeepEqual(t, v, corev1.Volume{ - Name: "exporter-config", - VolumeSource: corev1.VolumeSource{ - Projected: &corev1.ProjectedVolumeSource{ - Sources: cluster.Spec.Monitoring.PGMonitor.Exporter.Configuration, + t.Run("CustomTLS", func(t *testing.T) { + cluster.Spec.Monitoring = &v1beta1.MonitoringSpec{ + PGMonitor: &v1beta1.PGMonitorSpec{ + Exporter: &v1beta1.ExporterSpec{ + CustomTLSSecret: &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "custom-exporter-certs", }, }, - }) - foundConfigVolume = true - break - } + }, + }, } - assert.Assert(t, foundConfigVolume) - - container := getContainerWithName(template.Spec.Containers, naming.ContainerPGMonitorExporter) - var foundConfigMount bool - for _, vm := range container.VolumeMounts { - if vm.Name == "exporter-config" && vm.MountPath == "/conf" { - foundConfigMount = true - break - } + template := &corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: naming.ContainerDatabase, + }}, + }, } - assert.Assert(t, foundConfigMount) + + testConfigMap := new(corev1.ConfigMap) + testConfigMap.Name = "test-web-conf" + + assert.NilError(t, addPGMonitorExporterToInstancePodSpec(ctx, cluster, template, exporterQueriesConfig, testConfigMap)) + + assert.Equal(t, len(template.Spec.Containers), 2) + container := template.Spec.Containers[1] + + assert.Assert(t, len(template.Spec.Volumes) > 2, "Expected the original two volumes") + assert.Assert(t, cmp.MarshalMatches(template.Spec.Volumes[2:], ` +- name: exporter-certs + projected: + sources: + - secret: + name: custom-exporter-certs +- configMap: + name: test-web-conf + name: web-config + `)) + + assert.Assert(t, len(container.VolumeMounts) > 2, "Expected the original two mounts") + assert.Assert(t, cmp.MarshalMatches(container.VolumeMounts[2:], ` +- mountPath: /certs + name: exporter-certs +- mountPath: /web-config + name: web-config + `)) + + command := strings.Join(container.Command, "\n") + assert.Assert(t, cmp.Contains(command, "postgres_exporter")) + assert.Assert(t, cmp.Contains(command, "--web.config.file")) + + testExporterCollectorsAnnotation(t, ctx, cluster, exporterQueriesConfig, testConfigMap) }) } +// TestReconcilePGMonitorExporterSetupErrors tests how reconcilePGMonitorExporter +// reacts when the kubernetes resources are in different states (e.g., checks +// what happens when the database pod is terminating) func TestReconcilePGMonitorExporterSetupErrors(t *testing.T) { + if os.Getenv("QUERIES_CONFIG_DIR") == "" { + t.Skip("QUERIES_CONFIG_DIR must be set") + } + for _, test := range []struct { name string podExecCalled bool @@ -321,8 +473,9 @@ func TestReconcilePGMonitorExporterSetupErrors(t *testing.T) { }, Status: corev1.PodStatus{ ContainerStatuses: []corev1.ContainerStatus{{ - Name: naming.ContainerDatabase, - State: corev1.ContainerState{Running: &corev1.ContainerStateRunning{}}, + Name: naming.ContainerDatabase, + State: corev1.ContainerState{Running: &corev1.ContainerStateRunning{}}, + ImageID: "image@sha123", }, { Name: naming.ContainerPGMonitorExporter, State: corev1.ContainerState{Running: &corev1.ContainerStateRunning{}}, @@ -343,14 +496,15 @@ func TestReconcilePGMonitorExporterSetupErrors(t *testing.T) { ctx := context.Background() var called bool reconciler := &Reconciler{ - PodExec: func(namespace, pod, container string, stdin io.Reader, stdout, - stderr io.Writer, command ...string) error { + PodExec: func(ctx context.Context, namespace, pod, container string, stdin io.Reader, + stdout, stderr io.Writer, command ...string) error { called = true return nil }, } cluster := &v1beta1.PostgresCluster{} + cluster.Spec.PostgresVersion = 15 cluster.Spec.Monitoring = test.monitoring cluster.Status.Monitoring.ExporterConfiguration = test.status.ExporterConfiguration observed := &observedInstances{forCluster: test.instances} @@ -366,8 +520,8 @@ func TestReconcilePGMonitorExporter(t *testing.T) { ctx := context.Background() var called bool reconciler := &Reconciler{ - PodExec: func(namespace, pod, container string, stdin io.Reader, stdout, - stderr io.Writer, command ...string) error { + PodExec: func(ctx context.Context, namespace, pod, container string, stdin io.Reader, + stdout, stderr io.Writer, command ...string) error { called = true return nil }, @@ -403,12 +557,19 @@ func TestReconcilePGMonitorExporter(t *testing.T) { called = false assert.NilError(t, reconciler.reconcilePGMonitorExporter(ctx, cluster, observed, nil)) - assert.Assert(t, called) - assert.Assert(t, cluster.Status.Monitoring.ExporterConfiguration != "") + assert.Assert(t, called, "PodExec was not called.") + assert.Assert(t, cluster.Status.Monitoring.ExporterConfiguration != "", "ExporterConfiguration was empty.") }) } +// TestReconcilePGMonitorExporterStatus checks that the exporter status is updated +// when it should be. Because the status updated when we update the setup sql from +// pgmonitor (by using podExec), we check if podExec is called when a change is needed. func TestReconcilePGMonitorExporterStatus(t *testing.T) { + if os.Getenv("QUERIES_CONFIG_DIR") == "" { + t.Skip("QUERIES_CONFIG_DIR must be set") + } + for _, test := range []struct { name string exporterEnabled bool @@ -440,8 +601,8 @@ func TestReconcilePGMonitorExporterStatus(t *testing.T) { exporterEnabled: true, podExecCalled: false, // Status was generated manually for this test case - // TODO jmckulk: add code to generate status - status: v1beta1.MonitoringStatus{ExporterConfiguration: "6465cb4855"}, + // TODO (jmckulk): add code to generate status + status: v1beta1.MonitoringStatus{ExporterConfiguration: "6d874c58df"}, statusChangedAfterReconcile: false, }} { t.Run(test.name, func(t *testing.T) { @@ -451,17 +612,22 @@ func TestReconcilePGMonitorExporterStatus(t *testing.T) { secret *corev1.Secret ) + // Create reconciler with mock PodExec function reconciler := &Reconciler{ - PodExec: func(namespace, pod, container string, stdin io.Reader, stdout, - stderr io.Writer, command ...string) error { + PodExec: func(ctx context.Context, namespace, pod, container string, stdin io.Reader, + stdout, stderr io.Writer, command ...string) error { called = true return nil }, } + // Create the test cluster spec with the exporter status set cluster := &v1beta1.PostgresCluster{} + cluster.Spec.PostgresVersion = 15 cluster.Status.Monitoring.ExporterConfiguration = test.status.ExporterConfiguration + // Mock up an instances that will be defined in the cluster. The instances should + // have all necessary fields that will be needed to reconcile the exporter instances := []*Instance{ { Name: "daisy", @@ -472,8 +638,9 @@ func TestReconcilePGMonitorExporterStatus(t *testing.T) { }, Status: corev1.PodStatus{ ContainerStatuses: []corev1.ContainerStatus{{ - Name: naming.ContainerDatabase, - State: corev1.ContainerState{Running: &corev1.ContainerStateRunning{}}, + Name: naming.ContainerDatabase, + State: corev1.ContainerState{Running: &corev1.ContainerStateRunning{}}, + ImageID: "image@sha123", }}, }, }}, @@ -482,6 +649,7 @@ func TestReconcilePGMonitorExporterStatus(t *testing.T) { } if test.exporterEnabled { + // When testing with exporter enabled update the spec with exporter fields cluster.Spec.Monitoring = &v1beta1.MonitoringSpec{ PGMonitor: &v1beta1.PGMonitorSpec{ Exporter: &v1beta1.ExporterSpec{ @@ -490,6 +658,7 @@ func TestReconcilePGMonitorExporterStatus(t *testing.T) { }, } + // Update mock instances to include the exporter container instances[0].Pods[0].Status.ContainerStatuses = append( instances[0].Pods[0].Status.ContainerStatuses, corev1.ContainerStatus{ Name: naming.ContainerPGMonitorExporter, @@ -504,54 +673,53 @@ func TestReconcilePGMonitorExporterStatus(t *testing.T) { } } + // Mock up observed instances based on our mock instances observed := &observedInstances{forCluster: instances} + // Check that we can reconcile with the test resources assert.NilError(t, reconciler.reconcilePGMonitorExporter(ctx, cluster, observed, secret)) - assert.Equal(t, called, test.podExecCalled) + // Check that the exporter status changes when it needs to assert.Assert(t, test.statusChangedAfterReconcile == (cluster.Status.Monitoring.ExporterConfiguration != test.status.ExporterConfiguration), "got %v", cluster.Status.Monitoring.ExporterConfiguration) + // Check that pod exec is called correctly + assert.Equal(t, called, test.podExecCalled) }) } } -func TestReconcilePGMonitorSecret(t *testing.T) { - // TODO jmckulk: debug test with existing cluster +// TestReconcileMonitoringSecret checks that the secret intent returned by reconcileMonitoringSecret +// is correct. If exporter is enabled, the return shouldn't be nil. If the exporter is disabled, the +// return should be nil. +func TestReconcileMonitoringSecret(t *testing.T) { + // TODO (jmckulk): debug test with existing cluster // Seems to be an issue when running with other tests if strings.EqualFold(os.Getenv("USE_EXISTING_CLUSTER"), "true") { t.Skip("Test failing with existing cluster") } - env, cc, config := setupTestEnv(t, ControllerName) - t.Cleanup(func() { teardownTestEnv(t, env) }) + ctx := context.Background() - reconciler := &Reconciler{} - ctx, cancel := setupManager(t, config, func(mgr manager.Manager) { - reconciler = &Reconciler{ - Client: cc, - Owner: client.FieldOwner(t.Name()), - Recorder: mgr.GetEventRecorderFor(ControllerName), - Tracer: otel.Tracer(t.Name()), - } - }) - t.Cleanup(func() { teardownManager(cancel, t) }) + // Kubernetes is required because reconcileMonitoringSecret + // (1) uses the client to get existing secrets + // (2) sets the controller reference on the new secret + _, cc := setupKubernetes(t) + require.ParallelCapacity(t, 0) - ns := &corev1.Namespace{} - ns.GenerateName = "postgres-operator-test-" - ns.Labels = labels.Set{"postgres-operator-test": ""} - assert.NilError(t, cc.Create(ctx, ns)) - t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, ns)) }) + reconciler := &Reconciler{Client: cc, Owner: client.FieldOwner(t.Name())} cluster := testCluster() cluster.Default() cluster.UID = types.UID("hippouid") - cluster.Namespace = ns.Name + cluster.Namespace = setupNamespace(t, cc).Name + // If the exporter is disabled then the secret should not exist + // Existing secrets should be removed t.Run("ExporterDisabled", func(t *testing.T) { t.Run("NotExisting", func(t *testing.T) { secret, err := reconciler.reconcileMonitoringSecret(ctx, cluster) assert.NilError(t, err) - assert.Assert(t, secret == nil) + assert.Assert(t, secret == nil, "Monitoring secret was not nil.") }) t.Run("Existing", func(t *testing.T) { @@ -565,16 +733,19 @@ func TestReconcilePGMonitorSecret(t *testing.T) { cluster.Spec.Monitoring = nil actual, err := reconciler.reconcileMonitoringSecret(ctx, cluster) assert.NilError(t, err) - assert.Assert(t, actual == nil) + assert.Assert(t, actual == nil, "Monitoring secret still exists after turning exporter off.") }) }) + // If the exporter is enabled then a monitoring secret should exist + // It will need to be created or left in place with existing password t.Run("ExporterEnabled", func(t *testing.T) { var ( existing, actual *corev1.Secret err error ) + // Enable monitoring in the test cluster spec cluster.Spec.Monitoring = &v1beta1.MonitoringSpec{ PGMonitor: &v1beta1.PGMonitorSpec{ Exporter: &v1beta1.ExporterSpec{ @@ -586,13 +757,83 @@ func TestReconcilePGMonitorSecret(t *testing.T) { t.Run("NotExisting", func(t *testing.T) { existing, err = reconciler.reconcileMonitoringSecret(ctx, cluster) assert.NilError(t, err) - assert.Assert(t, existing != nil) + assert.Assert(t, existing != nil, "Monitoring secret does not exist.") }) t.Run("Existing", func(t *testing.T) { actual, err = reconciler.reconcileMonitoringSecret(ctx, cluster) assert.NilError(t, err) - assert.Assert(t, bytes.Equal(actual.Data["password"], existing.Data["password"]), ns.Name) + assert.Assert(t, bytes.Equal(actual.Data["password"], existing.Data["password"]), "Passwords do not match.") + }) + }) +} + +// TestReconcileExporterQueriesConfig checks that the ConfigMap intent returned by +// reconcileExporterQueriesConfig is correct. If exporter is enabled, the return +// shouldn't be nil. If the exporter is disabled, the return should be nil. +func TestReconcileExporterQueriesConfig(t *testing.T) { + ctx := context.Background() + + // Kubernetes is required because reconcileExporterQueriesConfig + // (1) uses the client to get existing ConfigMaps + // (2) sets the controller reference on the new ConfigMap + _, cc := setupKubernetes(t) + require.ParallelCapacity(t, 0) + + reconciler := &Reconciler{Client: cc, Owner: client.FieldOwner(t.Name())} + + cluster := testCluster() + cluster.Default() + cluster.UID = types.UID("hippouid") + cluster.Namespace = setupNamespace(t, cc).Name + + t.Run("ExporterDisabled", func(t *testing.T) { + t.Run("NotExisting", func(t *testing.T) { + queriesConfig, err := reconciler.reconcileExporterQueriesConfig(ctx, cluster) + assert.NilError(t, err) + assert.Assert(t, queriesConfig == nil, "Default queries ConfigMap is present.") + }) + + t.Run("Existing", func(t *testing.T) { + cluster.Spec.Monitoring = &v1beta1.MonitoringSpec{ + PGMonitor: &v1beta1.PGMonitorSpec{ + Exporter: &v1beta1.ExporterSpec{Image: "image"}}} + existing, err := reconciler.reconcileExporterQueriesConfig(ctx, cluster) + assert.NilError(t, err, "error in test; existing config not created") + assert.Assert(t, existing != nil, "error in test; existing config not created") + + cluster.Spec.Monitoring = nil + actual, err := reconciler.reconcileExporterQueriesConfig(ctx, cluster) + assert.NilError(t, err) + assert.Assert(t, actual == nil, "Default queries config still present after disabling exporter.") + }) + }) + + t.Run("ExporterEnabled", func(t *testing.T) { + var ( + existing, actual *corev1.ConfigMap + err error + ) + + // Enable monitoring in the test cluster spec + cluster.Spec.Monitoring = &v1beta1.MonitoringSpec{ + PGMonitor: &v1beta1.PGMonitorSpec{ + Exporter: &v1beta1.ExporterSpec{ + Image: "image", + }, + }, + } + + t.Run("NotExisting", func(t *testing.T) { + existing, err = reconciler.reconcileExporterQueriesConfig(ctx, cluster) + assert.NilError(t, err) + assert.Assert(t, existing != nil, "Default queries config does not exist.") + }) + + t.Run("Existing", func(t *testing.T) { + actual, err = reconciler.reconcileExporterQueriesConfig(ctx, cluster) + assert.NilError(t, err) + assert.Assert(t, actual.Data["defaultQueries.yml"] == existing.Data["defaultQueries.yml"], "Data does not align.") }) }) } diff --git a/internal/controller/postgrescluster/pki.go b/internal/controller/postgrescluster/pki.go index 92a4f2c8ed..0314ad4406 100644 --- a/internal/controller/postgrescluster/pki.go +++ b/internal/controller/postgrescluster/pki.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster @@ -20,7 +9,7 @@ import ( "github.com/pkg/errors" appsv1 "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/crunchydata/postgres-operator/internal/naming" @@ -35,8 +24,8 @@ const ( rootCertFile = "ca.crt" ) -// +kubebuilder:rbac:groups="",resources=secrets,verbs=get -// +kubebuilder:rbac:groups="",resources=secrets,verbs=create;patch +// +kubebuilder:rbac:groups="",resources="secrets",verbs={get} +// +kubebuilder:rbac:groups="",resources="secrets",verbs={create,patch} // reconcileRootCertificate ensures the root certificate, stored // in the relevant secret, has been created and is not 'bad' due @@ -50,29 +39,28 @@ func (r *Reconciler) reconcileRootCertificate( ) { const keyCertificate, keyPrivateKey = "root.crt", "root.key" - existing := &v1.Secret{} + existing := &corev1.Secret{} existing.Namespace, existing.Name = cluster.Namespace, naming.RootCertSecret err := errors.WithStack(client.IgnoreNotFound( r.Client.Get(ctx, client.ObjectKeyFromObject(existing), existing))) - root := pki.NewRootCertificateAuthority() + root := &pki.RootCertificateAuthority{} - if data, ok := existing.Data[keyCertificate]; err == nil && ok { - root.Certificate, err = pki.ParseCertificate(data) - err = errors.WithStack(err) - } - if data, ok := existing.Data[keyPrivateKey]; err == nil && ok { - root.PrivateKey, err = pki.ParsePrivateKey(data) - err = errors.WithStack(err) - } + if err == nil { + // Unmarshal and validate the stored root. These first errors can + // be ignored because they result in an invalid root which is then + // correctly regenerated. + _ = root.Certificate.UnmarshalText(existing.Data[keyCertificate]) + _ = root.PrivateKey.UnmarshalText(existing.Data[keyPrivateKey]) - // if there is an error or the root CA is bad, generate a new one - if err != nil || pki.RootCAIsBad(root) { - err = errors.WithStack(root.Generate()) + if !pki.RootIsValid(root) { + root, err = pki.NewRootCertificateAuthority() + err = errors.WithStack(err) + } } - intent := &v1.Secret{} - intent.SetGroupVersionKind(v1.SchemeGroupVersion.WithKind("Secret")) + intent := &corev1.Secret{} + intent.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("Secret")) intent.Namespace, intent.Name = cluster.Namespace, naming.RootCertSecret intent.Data = make(map[string][]byte) intent.ObjectMeta.OwnerReferences = existing.ObjectMeta.OwnerReferences @@ -104,8 +92,8 @@ func (r *Reconciler) reconcileRootCertificate( return root, err } -// +kubebuilder:rbac:groups="",resources=secrets,verbs=get -// +kubebuilder:rbac:groups="",resources=secrets,verbs=create;patch +// +kubebuilder:rbac:groups="",resources="secrets",verbs={get} +// +kubebuilder:rbac:groups="",resources="secrets",verbs={create,patch} // reconcileClusterCertificate first checks if a custom certificate // secret is configured. If so, that secret projection is returned. @@ -117,10 +105,11 @@ func (r *Reconciler) reconcileRootCertificate( // tls.crt, tls.key and ca.crt which are the TLS certificate, private key // and CA certificate, respectively. func (r *Reconciler) reconcileClusterCertificate( - ctx context.Context, rootCACert *pki.RootCertificateAuthority, - cluster *v1beta1.PostgresCluster, + ctx context.Context, root *pki.RootCertificateAuthority, + cluster *v1beta1.PostgresCluster, primaryService *corev1.Service, + replicaService *corev1.Service, ) ( - *v1.SecretProjection, error, + *corev1.SecretProjection, error, ) { // if a custom postgrescluster secret is provided, just return it if cluster.Spec.CustomTLSSecret != nil { @@ -129,33 +118,27 @@ func (r *Reconciler) reconcileClusterCertificate( const keyCertificate, keyPrivateKey, rootCA = "tls.crt", "tls.key", "ca.crt" - existing := &v1.Secret{ObjectMeta: naming.PostgresTLSSecret(cluster)} + existing := &corev1.Secret{ObjectMeta: naming.PostgresTLSSecret(cluster)} err := errors.WithStack(client.IgnoreNotFound( r.Client.Get(ctx, client.ObjectKeyFromObject(existing), existing))) - leaf := pki.NewLeafCertificate("", nil, nil) - // TODO(tjmoore4): currently set to the primary service, but this will likely - // need to be adjusted when server verification and/or client certificate - // authentication is added - leaf.DNSNames = []string{naming.ClusterPrimaryService(cluster).Name} - leaf.CommonName = leaf.DNSNames[0] + leaf := &pki.LeafCertificate{} + dnsNames := append(naming.ServiceDNSNames(ctx, primaryService), naming.ServiceDNSNames(ctx, replicaService)...) + dnsFQDN := dnsNames[0] - if data, ok := existing.Data[keyCertificate]; err == nil && ok { - leaf.Certificate, err = pki.ParseCertificate(data) - err = errors.WithStack(err) - } - if data, ok := existing.Data[keyPrivateKey]; err == nil && ok { - leaf.PrivateKey, err = pki.ParsePrivateKey(data) - err = errors.WithStack(err) - } + if err == nil { + // Unmarshal and validate the stored leaf. These first errors can + // be ignored because they result in an invalid leaf which is then + // correctly regenerated. + _ = leaf.Certificate.UnmarshalText(existing.Data[keyCertificate]) + _ = leaf.PrivateKey.UnmarshalText(existing.Data[keyPrivateKey]) - // if there is an error or the leaf certificate is bad, generate a new one - if err != nil || pki.LeafCertIsBad(ctx, leaf, rootCACert, cluster.Namespace) { - err = errors.WithStack(leaf.Generate(rootCACert)) + leaf, err = root.RegenerateLeafWhenNecessary(leaf, dnsFQDN, dnsNames) + err = errors.WithStack(err) } - intent := &v1.Secret{ObjectMeta: naming.PostgresTLSSecret(cluster)} - intent.SetGroupVersionKind(v1.SchemeGroupVersion.WithKind("Secret")) + intent := &corev1.Secret{ObjectMeta: naming.PostgresTLSSecret(cluster)} + intent.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("Secret")) intent.Data = make(map[string][]byte) intent.ObjectMeta.OwnerReferences = existing.ObjectMeta.OwnerReferences @@ -180,7 +163,7 @@ func (r *Reconciler) reconcileClusterCertificate( err = errors.WithStack(err) } if err == nil { - intent.Data[rootCA], err = rootCACert.Certificate.MarshalText() + intent.Data[rootCA], err = root.Certificate.MarshalText() err = errors.WithStack(err) } @@ -195,8 +178,8 @@ func (r *Reconciler) reconcileClusterCertificate( return clusterCertSecretProjection(intent), err } -// +kubebuilder:rbac:groups="",resources=secrets,verbs=get -// +kubebuilder:rbac:groups="",resources=secrets,verbs=create;patch +// +kubebuilder:rbac:groups="",resources="secrets",verbs={get} +// +kubebuilder:rbac:groups="",resources="secrets",verbs={create,patch} // instanceCertificate populates intent with the DNS leaf certificate and // returns it. It also ensures the leaf certificate, stored in the relevant @@ -208,31 +191,29 @@ func (r *Reconciler) reconcileClusterCertificate( // using the current root certificate func (*Reconciler) instanceCertificate( ctx context.Context, instance *appsv1.StatefulSet, - existing, intent *v1.Secret, rootCACert *pki.RootCertificateAuthority, + existing, intent *corev1.Secret, root *pki.RootCertificateAuthority, ) ( *pki.LeafCertificate, error, ) { var err error const keyCertificate, keyPrivateKey = "dns.crt", "dns.key" + leaf := &pki.LeafCertificate{} + // RFC 2818 states that the certificate DNS names must be used to verify // HTTPS identity. - leaf := pki.NewLeafCertificate("", nil, nil) - leaf.DNSNames = naming.InstancePodDNSNames(ctx, instance) - leaf.CommonName = leaf.DNSNames[0] // FQDN + dnsNames := naming.InstancePodDNSNames(ctx, instance) + dnsFQDN := dnsNames[0] - if data, ok := existing.Data[keyCertificate]; err == nil && ok { - leaf.Certificate, err = pki.ParseCertificate(data) - err = errors.WithStack(err) - } - if data, ok := existing.Data[keyPrivateKey]; err == nil && ok { - leaf.PrivateKey, err = pki.ParsePrivateKey(data) - err = errors.WithStack(err) - } + if err == nil { + // Unmarshal and validate the stored leaf. These first errors can + // be ignored because they result in an invalid leaf which is then + // correctly regenerated. + _ = leaf.Certificate.UnmarshalText(existing.Data[keyCertificate]) + _ = leaf.PrivateKey.UnmarshalText(existing.Data[keyPrivateKey]) - // if there is an error or the leaf certificate is bad, generate a new one - if err != nil || pki.LeafCertIsBad(ctx, leaf, rootCACert, instance.Namespace) { - err = errors.WithStack(leaf.Generate(rootCACert)) + leaf, err = root.RegenerateLeafWhenNecessary(leaf, dnsFQDN, dnsNames) + err = errors.WithStack(err) } if err == nil { @@ -249,12 +230,12 @@ func (*Reconciler) instanceCertificate( // clusterCertSecretProjection returns a secret projection of the postgrescluster's // CA, key, and certificate to include in the instance configuration volume. -func clusterCertSecretProjection(certificate *v1.Secret) *v1.SecretProjection { - return &v1.SecretProjection{ - LocalObjectReference: v1.LocalObjectReference{ +func clusterCertSecretProjection(certificate *corev1.Secret) *corev1.SecretProjection { + return &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{ Name: certificate.Name, }, - Items: []v1.KeyToPath{ + Items: []corev1.KeyToPath{ { Key: clusterCertFile, Path: clusterCertFile, diff --git a/internal/controller/postgrescluster/pki_test.go b/internal/controller/postgrescluster/pki_test.go index d35a54e30f..c2fe7af82a 100644 --- a/internal/controller/postgrescluster/pki_test.go +++ b/internal/controller/postgrescluster/pki_test.go @@ -1,65 +1,46 @@ -// +build envtest - -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + package postgrescluster import ( - "bytes" "context" "fmt" "os" "reflect" "strings" "testing" - "time" "github.com/pkg/errors" - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/util/wait" - "gotest.tools/v3/assert" appsv1 "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/internal/pki" + "github.com/crunchydata/postgres-operator/internal/testing/require" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) +// TestReconcileCerts tests the proper reconciliation of the root ca certificate +// secret, leaf certificate secrets and the updates that occur when updates are +// made to the cluster certificates generally. For the removal of ownership +// references and deletion of the root CA cert secret, a separate Kuttl test is +// used due to the need for proper garbage collection. func TestReconcileCerts(t *testing.T) { // Garbage collector cleans up test resources before the test completes if strings.EqualFold(os.Getenv("USE_EXISTING_CLUSTER"), "true") { t.Skip("USE_EXISTING_CLUSTER: Test fails due to garbage collection") } - // setup the test environment and ensure a clean teardown - tEnv, tClient, _ := setupTestEnv(t, ControllerName) + _, tClient := setupKubernetes(t) + require.ParallelCapacity(t, 2) ctx := context.Background() - // set namespace name - ns := &v1.Namespace{} - ns.GenerateName = "postgres-operator-test-" - assert.NilError(t, tClient.Create(ctx, ns)) - t.Cleanup(func() { - assert.Check(t, tClient.Delete(ctx, ns)) - teardownTestEnv(t, tEnv) - }) - namespace := ns.Name + namespace := setupNamespace(t, tClient).Name r := &Reconciler{ Client: tClient, @@ -70,19 +51,9 @@ func TestReconcileCerts(t *testing.T) { clusterName1 := "hippocluster1" // set up test cluster1 - cluster1 := &v1beta1.PostgresCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: clusterName1, - Namespace: namespace, - }, - Spec: v1beta1.PostgresClusterSpec{ - PostgresVersion: 12, - InstanceSets: []v1beta1.PostgresInstanceSetSpec{{ - Name: "instance", - }}, - }, - } - cluster1.SetGroupVersionKind(v1.SchemeGroupVersion.WithKind("postgrescluster")) + cluster1 := testCluster() + cluster1.Name = clusterName1 + cluster1.Namespace = namespace if err := tClient.Create(ctx, cluster1); err != nil { t.Error(err) } @@ -90,31 +61,29 @@ func TestReconcileCerts(t *testing.T) { // set up test cluster2 cluster2Name := "hippocluster2" - cluster2 := &v1beta1.PostgresCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: cluster2Name, - Namespace: namespace, - }, - Spec: v1beta1.PostgresClusterSpec{ - PostgresVersion: 12, - InstanceSets: []v1beta1.PostgresInstanceSetSpec{{ - Name: "instance", - }}, - }, - } - cluster2.SetGroupVersionKind(v1.SchemeGroupVersion.WithKind("postgrescluster")) + cluster2 := testCluster() + cluster2.Name = cluster2Name + cluster2.Namespace = namespace if err := tClient.Create(ctx, cluster2); err != nil { t.Error(err) } + primaryService := new(corev1.Service) + primaryService.Namespace = namespace + primaryService.Name = "the-primary" + + replicaService := new(corev1.Service) + replicaService.Namespace = namespace + replicaService.Name = "the-replicas" + t.Run("check root certificate reconciliation", func(t *testing.T) { initialRoot, err := r.reconcileRootCertificate(ctx, cluster1) assert.NilError(t, err) - rootSecret := &v1.Secret{} + rootSecret := &corev1.Secret{} rootSecret.Namespace, rootSecret.Name = namespace, naming.RootCertSecret - rootSecret.SetGroupVersionKind(v1.SchemeGroupVersion.WithKind("Secret")) + rootSecret.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("Secret")) t.Run("check root CA secret first owner reference", func(t *testing.T) { @@ -160,52 +129,20 @@ func TestReconcileCerts(t *testing.T) { } }) - t.Run("remove owner references after deleting first cluster", func(t *testing.T) { - - if !strings.EqualFold(os.Getenv("USE_EXISTING_CLUSTER"), "true") { - t.Skip("requires a running garbage collection controller") - } - - err = tClient.Get(ctx, client.ObjectKeyFromObject(cluster1), cluster1) - assert.NilError(t, err) - - err = tClient.Delete(ctx, cluster1) - assert.NilError(t, err) - - err = wait.Poll(time.Second/2, Scale(time.Second*15), func() (bool, error) { - err := tClient.Get(ctx, client.ObjectKeyFromObject(rootSecret), rootSecret) - return len(rootSecret.ObjectMeta.OwnerReferences) == 1, err - }) - assert.NilError(t, err) - - assert.Check(t, len(rootSecret.ObjectMeta.OwnerReferences) == 1, "owner reference not removed") - - expectedOR := metav1.OwnerReference{ - APIVersion: "postgres-operator.crunchydata.com/v1beta1", - Kind: "PostgresCluster", - Name: "hippocluster2", - UID: cluster2.UID, - } - - if len(rootSecret.ObjectMeta.OwnerReferences) > 0 { - assert.Equal(t, rootSecret.ObjectMeta.OwnerReferences[0], expectedOR) - } - }) - t.Run("root certificate is returned correctly", func(t *testing.T) { fromSecret, err := getCertFromSecret(ctx, tClient, naming.RootCertSecret, namespace, "root.crt") assert.NilError(t, err) // assert returned certificate matches the one created earlier - assert.Assert(t, bytes.Equal(fromSecret.Certificate, initialRoot.Certificate.Certificate)) + assert.DeepEqual(t, *fromSecret, initialRoot.Certificate) }) t.Run("root certificate changes", func(t *testing.T) { // force the generation of a new root cert // create an empty secret and apply the change - emptyRootSecret := &v1.Secret{} - emptyRootSecret.SetGroupVersionKind(v1.SchemeGroupVersion.WithKind("Secret")) + emptyRootSecret := &corev1.Secret{} + emptyRootSecret.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("Secret")) emptyRootSecret.Namespace, emptyRootSecret.Name = namespace, naming.RootCertSecret emptyRootSecret.Data = make(map[string][]byte) err = errors.WithStack(r.apply(ctx, emptyRootSecret)) @@ -219,32 +156,10 @@ func TestReconcileCerts(t *testing.T) { assert.NilError(t, err) // check that the cert from the secret does not equal the initial certificate - assert.Assert(t, !bytes.Equal(fromSecret.Certificate, initialRoot.Certificate.Certificate)) + assert.Assert(t, !fromSecret.Equal(initialRoot.Certificate)) // check that the returned cert matches the cert from the secret - assert.Assert(t, bytes.Equal(fromSecret.Certificate, returnedRoot.Certificate.Certificate)) - }) - - t.Run("root CA secret is deleted after final cluster is deleted", func(t *testing.T) { - - if !strings.EqualFold(os.Getenv("USE_EXISTING_CLUSTER"), "true") { - t.Skip("requires a running garbage collection controller") - } - - err = tClient.Get(ctx, client.ObjectKeyFromObject(cluster2), cluster2) - assert.NilError(t, err) - - err = tClient.Delete(ctx, cluster2) - assert.NilError(t, err) - - err = wait.Poll(time.Second/2, Scale(time.Second*15), func() (bool, error) { - if err := tClient.Get(ctx, - client.ObjectKeyFromObject(rootSecret), rootSecret); apierrors.ReasonForError(err) == metav1.StatusReasonNotFound { - return true, err - } - return false, nil - }) - assert.Assert(t, apierrors.IsNotFound(err)) + assert.DeepEqual(t, *fromSecret, returnedRoot.Certificate) }) }) @@ -269,30 +184,26 @@ func TestReconcileCerts(t *testing.T) { }, } - intent, existing, err := createInstanceSecrets(ctx, tClient, instance, initialRoot) - - // apply the secret changes - err = errors.WithStack(r.apply(ctx, existing)) - assert.NilError(t, err) - - initialLeafCert, err := r.instanceCertificate(ctx, instance, existing, intent, initialRoot) - assert.NilError(t, err) - t.Run("check leaf certificate in secret", func(t *testing.T) { + existing := &corev1.Secret{Data: make(map[string][]byte)} + intent := &corev1.Secret{Data: make(map[string][]byte)} - fromSecret, err := getCertFromSecret(ctx, tClient, instance.GetName()+"-certs", namespace, "dns.crt") + initialLeafCert, err := r.instanceCertificate(ctx, instance, existing, intent, initialRoot) assert.NilError(t, err) - // assert returned certificate matches the one created earlier - assert.Assert(t, bytes.Equal(fromSecret.Certificate, initialLeafCert.Certificate.Certificate)) + fromSecret := &pki.LeafCertificate{} + assert.NilError(t, fromSecret.Certificate.UnmarshalText(intent.Data["dns.crt"])) + assert.NilError(t, fromSecret.PrivateKey.UnmarshalText(intent.Data["dns.key"])) + + assert.DeepEqual(t, fromSecret, initialLeafCert) }) t.Run("check that the leaf certs update when root changes", func(t *testing.T) { // force the generation of a new root cert // create an empty secret and apply the change - emptyRootSecret := &v1.Secret{} - emptyRootSecret.SetGroupVersionKind(v1.SchemeGroupVersion.WithKind("Secret")) + emptyRootSecret := &corev1.Secret{} + emptyRootSecret.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("Secret")) emptyRootSecret.Namespace, emptyRootSecret.Name = namespace, naming.RootCertSecret emptyRootSecret.Data = make(map[string][]byte) err = errors.WithStack(r.apply(ctx, emptyRootSecret)) @@ -301,36 +212,25 @@ func TestReconcileCerts(t *testing.T) { newRootCert, err := r.reconcileRootCertificate(ctx, cluster1) assert.NilError(t, err) - // get the existing leaf/instance secret which will receive a new certificate during reconciliation - existingInstanceSecret := &v1.Secret{} - assert.NilError(t, tClient.Get(ctx, types.NamespacedName{ - Name: instance.GetName() + "-certs", - Namespace: namespace, - }, existingInstanceSecret)) - - // create an empty 'intent' secret for the reconcile function - instanceIntentSecret := &v1.Secret{ObjectMeta: naming.InstanceCertificates(instance)} - instanceIntentSecret.SetGroupVersionKind(v1.SchemeGroupVersion.WithKind("Secret")) - instanceIntentSecret.Type = v1.SecretTypeOpaque - instanceIntentSecret.Data = make(map[string][]byte) + existing := &corev1.Secret{Data: make(map[string][]byte)} + intent := &corev1.Secret{Data: make(map[string][]byte)} - // save a copy of the 'pre-reconciled' certificate - oldLeafFromSecret, err := getCertFromSecret(ctx, tClient, instance.GetName()+"-certs", namespace, "dns.crt") + initialLeaf, err := r.instanceCertificate(ctx, instance, existing, intent, initialRoot) assert.NilError(t, err) // reconcile the certificate - newLeaf, err := r.instanceCertificate(ctx, instance, existingInstanceSecret, instanceIntentSecret, newRootCert) + newLeaf, err := r.instanceCertificate(ctx, instance, existing, intent, newRootCert) assert.NilError(t, err) // assert old leaf cert does not match the newly reconciled one - assert.Assert(t, !bytes.Equal(oldLeafFromSecret.Certificate, newLeaf.Certificate.Certificate)) + assert.Assert(t, !initialLeaf.Certificate.Equal(newLeaf.Certificate)) // 'reconcile' the certificate when the secret does not change. The returned leaf certificate should not change - newLeaf2, err := r.instanceCertificate(ctx, instance, instanceIntentSecret, instanceIntentSecret, newRootCert) + newLeaf2, err := r.instanceCertificate(ctx, instance, intent, intent, newRootCert) assert.NilError(t, err) // check that the leaf cert did not change after another reconciliation - assert.Assert(t, bytes.Equal(newLeaf2.Certificate.Certificate, newLeaf.Certificate.Certificate)) + assert.DeepEqual(t, newLeaf2, newLeaf) }) @@ -338,11 +238,11 @@ func TestReconcileCerts(t *testing.T) { t.Run("check cluster certificate secret reconciliation", func(t *testing.T) { // example auto-generated secret projection - testSecretProjection := &v1.SecretProjection{ - LocalObjectReference: v1.LocalObjectReference{ + testSecretProjection := &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{ Name: fmt.Sprintf(naming.ClusterCertSecret, cluster1.Name), }, - Items: []v1.KeyToPath{ + Items: []corev1.KeyToPath{ { Key: clusterCertFile, Path: clusterCertFile, @@ -359,11 +259,11 @@ func TestReconcileCerts(t *testing.T) { } // example custom secret projection - customSecretProjection := &v1.SecretProjection{ - LocalObjectReference: v1.LocalObjectReference{ + customSecretProjection := &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{ Name: "customsecret", }, - Items: []v1.KeyToPath{ + Items: []corev1.KeyToPath{ { Key: clusterCertFile, Path: clusterCertFile, @@ -385,14 +285,14 @@ func TestReconcileCerts(t *testing.T) { assert.NilError(t, err) t.Run("check standard secret projection", func(t *testing.T) { - secretCertProj, err := r.reconcileClusterCertificate(ctx, initialRoot, cluster1) + secretCertProj, err := r.reconcileClusterCertificate(ctx, initialRoot, cluster1, primaryService, replicaService) assert.NilError(t, err) assert.DeepEqual(t, testSecretProjection, secretCertProj) }) t.Run("check custom secret projection", func(t *testing.T) { - customSecretCertProj, err := r.reconcileClusterCertificate(ctx, initialRoot, cluster2) + customSecretCertProj, err := r.reconcileClusterCertificate(ctx, initialRoot, cluster2, primaryService, replicaService) assert.NilError(t, err) assert.DeepEqual(t, customSecretProjection, customSecretCertProj) @@ -400,7 +300,7 @@ func TestReconcileCerts(t *testing.T) { t.Run("check switch to a custom secret projection", func(t *testing.T) { // simulate a new custom secret - testSecret := &v1.Secret{} + testSecret := &corev1.Secret{} testSecret.Namespace, testSecret.Name = namespace, "newcustomsecret" // simulate cluster spec update cluster2.Spec.CustomTLSSecret.LocalObjectReference.Name = "newcustomsecret" @@ -409,7 +309,7 @@ func TestReconcileCerts(t *testing.T) { testSecretProjection := clusterCertSecretProjection(testSecret) // reconcile the secret project using the normal process - customSecretCertProj, err := r.reconcileClusterCertificate(ctx, initialRoot, cluster2) + customSecretCertProj, err := r.reconcileClusterCertificate(ctx, initialRoot, cluster2, primaryService, replicaService) assert.NilError(t, err) // results should be the same @@ -418,7 +318,7 @@ func TestReconcileCerts(t *testing.T) { t.Run("check cluster certificate secret", func(t *testing.T) { // get the cluster cert secret - initialClusterCertSecret := &v1.Secret{} + initialClusterCertSecret := &corev1.Secret{} err := tClient.Get(ctx, types.NamespacedName{ Name: fmt.Sprintf(naming.ClusterCertSecret, cluster1.Name), Namespace: namespace, @@ -427,8 +327,8 @@ func TestReconcileCerts(t *testing.T) { // force the generation of a new root cert // create an empty secret and apply the change - emptyRootSecret := &v1.Secret{} - emptyRootSecret.SetGroupVersionKind(v1.SchemeGroupVersion.WithKind("Secret")) + emptyRootSecret := &corev1.Secret{} + emptyRootSecret.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("Secret")) emptyRootSecret.Namespace, emptyRootSecret.Name = namespace, naming.RootCertSecret emptyRootSecret.Data = make(map[string][]byte) err = errors.WithStack(r.apply(ctx, emptyRootSecret)) @@ -439,11 +339,11 @@ func TestReconcileCerts(t *testing.T) { assert.NilError(t, err) // pass in the new root, which should result in a new cluster cert - _, err = r.reconcileClusterCertificate(ctx, returnedRoot, cluster1) + _, err = r.reconcileClusterCertificate(ctx, returnedRoot, cluster1, primaryService, replicaService) assert.NilError(t, err) // get the new cluster cert secret - newClusterCertSecret := &v1.Secret{} + newClusterCertSecret := &corev1.Secret{} err = tClient.Get(ctx, types.NamespacedName{ Name: fmt.Sprintf(naming.ClusterCertSecret, cluster1.Name), Namespace: namespace, @@ -451,6 +351,27 @@ func TestReconcileCerts(t *testing.T) { assert.NilError(t, err) assert.Assert(t, !reflect.DeepEqual(initialClusterCertSecret, newClusterCertSecret)) + + leaf := &pki.LeafCertificate{} + assert.NilError(t, leaf.Certificate.UnmarshalText(newClusterCertSecret.Data["tls.crt"])) + assert.NilError(t, leaf.PrivateKey.UnmarshalText(newClusterCertSecret.Data["tls.key"])) + + assert.Assert(t, + strings.HasPrefix(leaf.Certificate.CommonName(), "the-primary."+namespace+".svc."), + "got %q", leaf.Certificate.CommonName()) + + if dnsNames := leaf.Certificate.DNSNames(); assert.Check(t, len(dnsNames) > 1) { + assert.DeepEqual(t, dnsNames[1:4], []string{ + "the-primary." + namespace + ".svc", + "the-primary." + namespace, + "the-primary", + }) + assert.DeepEqual(t, dnsNames[5:8], []string{ + "the-replicas." + namespace + ".svc", + "the-replicas." + namespace, + "the-replicas", + }) + } }) }) } @@ -460,7 +381,7 @@ func getCertFromSecret( ctx context.Context, tClient client.Client, name, namespace, dataKey string, ) (*pki.Certificate, error) { // get cert secret - secret := &v1.Secret{} + secret := &corev1.Secret{} if err := tClient.Get(ctx, types.NamespacedName{ Name: name, Namespace: namespace, @@ -475,56 +396,6 @@ func getCertFromSecret( } // parse the cert from binary encoded data - if fromSecret, err := pki.ParseCertificate(secretCRT); fromSecret == nil || err != nil { - return nil, fmt.Errorf("error parsing %s", dataKey) - } else { - return fromSecret, nil - } -} - -// createInstanceSecrets creates the two initial leaf instance secrets for use when -// testing the leaf cert reconciliation -func createInstanceSecrets( - ctx context.Context, tClient client.Client, instance *appsv1.StatefulSet, - rootCA *pki.RootCertificateAuthority, -) (*v1.Secret, *v1.Secret, error) { - // create two secret structs for reconciliation - intent := &v1.Secret{ObjectMeta: naming.InstanceCertificates(instance)} - existing := &v1.Secret{ObjectMeta: naming.InstanceCertificates(instance)} - - // populate the 'intent' secret - err := errors.WithStack(client.IgnoreNotFound( - tClient.Get(ctx, client.ObjectKeyFromObject(intent), intent))) - intent.Data = make(map[string][]byte) - if err != nil { - return intent, existing, err - } - - // generate a leaf cert for the 'existing' secret - leafCert := pki.NewLeafCertificate("", nil, nil) - leafCert.DNSNames = naming.InstancePodDNSNames(ctx, instance) - leafCert.CommonName = leafCert.DNSNames[0] // FQDN - err = errors.WithStack(leafCert.Generate(rootCA)) - if err != nil { - return intent, existing, err - } - - // populate the 'existing' secret - existing.SetGroupVersionKind(v1.SchemeGroupVersion.WithKind("Secret")) - existing.Data = make(map[string][]byte) - - if err == nil { - existing.Data["dns.crt"], err = leafCert.Certificate.MarshalText() - err = errors.WithStack(err) - } - if err != nil { - return intent, existing, err - } - - if err == nil { - existing.Data["dns.key"], err = leafCert.PrivateKey.MarshalText() - err = errors.WithStack(err) - } - - return intent, existing, err + fromSecret := &pki.Certificate{} + return fromSecret, fromSecret.UnmarshalText(secretCRT) } diff --git a/internal/controller/postgrescluster/pod_disruption_budget.go b/internal/controller/postgrescluster/pod_disruption_budget.go new file mode 100644 index 0000000000..4bff4a9743 --- /dev/null +++ b/internal/controller/postgrescluster/pod_disruption_budget.go @@ -0,0 +1,68 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package postgrescluster + +// Note: The behavior for an empty selector differs between the +// policy/v1beta1 and policy/v1 APIs for PodDisruptionBudgets. For +// policy/v1beta1 an empty selector matches zero pods, while for +// policy/v1 an empty selector matches every pod in the namespace. +// https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget +import ( + "github.com/pkg/errors" + policyv1 "k8s.io/api/policy/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + + "github.com/crunchydata/postgres-operator/internal/initialize" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +// generatePodDisruptionBudget takes parameters required to fill out a PDB and +// returns the PDB +func (r *Reconciler) generatePodDisruptionBudget( + cluster *v1beta1.PostgresCluster, + meta metav1.ObjectMeta, + minAvailable *intstr.IntOrString, + selector metav1.LabelSelector, +) (*policyv1.PodDisruptionBudget, error) { + pdb := &policyv1.PodDisruptionBudget{ + ObjectMeta: meta, + Spec: policyv1.PodDisruptionBudgetSpec{ + MinAvailable: minAvailable, + Selector: &selector, + }, + } + pdb.SetGroupVersionKind(policyv1.SchemeGroupVersion.WithKind("PodDisruptionBudget")) + err := errors.WithStack(r.setControllerReference(cluster, pdb)) + return pdb, err +} + +// getMinAvailable contains logic to either parse a user provided IntOrString +// value or determine a default minimum available based on replicas. In both +// cases it returns the minAvailable as an int32 that should be set on a +// PodDisruptionBudget +func getMinAvailable( + minAvailable *intstr.IntOrString, + replicas int32, +) *intstr.IntOrString { + // TODO: Webhook Validation for minAvailable in the spec + // - MinAvailable should be less than replicas + // - MinAvailable as a string value should be a percentage string <= 100% + if minAvailable != nil { + return minAvailable + } + + // If the user does not provide 'minAvailable', we will set a default + // based on the number of replicas. + var expect int32 + + // We default to '1' if they have more than one replica defined. + if replicas > 1 { + expect = 1 + } + + // If more than one replica is not defined, we will default to '0' + return initialize.Pointer(intstr.FromInt32(expect)) +} diff --git a/internal/controller/postgrescluster/pod_disruption_budget_test.go b/internal/controller/postgrescluster/pod_disruption_budget_test.go new file mode 100644 index 0000000000..55e2bb63c6 --- /dev/null +++ b/internal/controller/postgrescluster/pod_disruption_budget_test.go @@ -0,0 +1,107 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package postgrescluster + +import ( + "testing" + + "gotest.tools/v3/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/intstr" + + "github.com/crunchydata/postgres-operator/internal/initialize" + "github.com/crunchydata/postgres-operator/internal/testing/require" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +func TestGeneratePodDisruptionBudget(t *testing.T) { + _, cc := setupKubernetes(t) + r := &Reconciler{Client: cc} + require.ParallelCapacity(t, 0) + + var ( + minAvailable *intstr.IntOrString + selector metav1.LabelSelector + ) + + t.Run("empty", func(t *testing.T) { + // If empty values are passed into the function does it blow up + _, err := r.generatePodDisruptionBudget( + &v1beta1.PostgresCluster{}, + metav1.ObjectMeta{}, + minAvailable, + selector, + ) + assert.NilError(t, err) + }) + + t.Run("valid", func(t *testing.T) { + cluster := testCluster() + meta := metav1.ObjectMeta{ + Name: "test-pdb", + Namespace: "test-ns", + Labels: map[string]string{ + "label-key": "label-value", + }, + Annotations: map[string]string{ + "anno-key": "anno-value", + }, + } + minAvailable = initialize.Pointer(intstr.FromInt32(1)) + selector := metav1.LabelSelector{ + MatchLabels: map[string]string{ + "key": "value", + }, + } + pdb, err := r.generatePodDisruptionBudget( + cluster, + meta, + minAvailable, + selector, + ) + assert.NilError(t, err) + assert.Equal(t, pdb.Name, meta.Name) + assert.Equal(t, pdb.Namespace, meta.Namespace) + assert.Assert(t, labels.Set(pdb.Labels).Has("label-key")) + assert.Assert(t, labels.Set(pdb.Annotations).Has("anno-key")) + assert.Equal(t, pdb.Spec.MinAvailable, minAvailable) + assert.DeepEqual(t, pdb.Spec.Selector.MatchLabels, map[string]string{ + "key": "value", + }) + assert.Assert(t, metav1.IsControlledBy(pdb, cluster)) + }) +} + +func TestGetMinAvailable(t *testing.T) { + t.Run("minAvailable provided", func(t *testing.T) { + // minAvailable is defined so use that value + ma := initialize.Pointer(intstr.FromInt32(0)) + expect := getMinAvailable(ma, 1) + assert.Equal(t, *expect, intstr.FromInt(0)) + + ma = initialize.Pointer(intstr.FromInt32(1)) + expect = getMinAvailable(ma, 2) + assert.Equal(t, *expect, intstr.FromInt(1)) + + ma = initialize.Pointer(intstr.FromString("50%")) + expect = getMinAvailable(ma, 3) + assert.Equal(t, *expect, intstr.FromString("50%")) + + ma = initialize.Pointer(intstr.FromString("200%")) + expect = getMinAvailable(ma, 2147483647) + assert.Equal(t, *expect, intstr.FromString("200%")) + }) + + // When minAvailable is not defined we need to decide what value to use + t.Run("defaulting logic", func(t *testing.T) { + // When we have one replica minAvailable should be 0 + expect := getMinAvailable(nil, 1) + assert.Equal(t, *expect, intstr.FromInt(0)) + // When we have more than one replica minAvailable should be 1 + expect = getMinAvailable(nil, 2) + assert.Equal(t, *expect, intstr.FromInt(1)) + }) +} diff --git a/internal/controller/postgrescluster/postgres.go b/internal/controller/postgrescluster/postgres.go index db100dd6a3..312079d824 100644 --- a/internal/controller/postgrescluster/postgres.go +++ b/internal/controller/postgrescluster/postgres.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster @@ -23,20 +12,24 @@ import ( "net" "net/url" "regexp" + "sort" "strings" "github.com/pkg/errors" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" - v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/validation/field" "sigs.k8s.io/controller-runtime/pkg/client" + "github.com/crunchydata/postgres-operator/internal/feature" "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/logging" "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/internal/pgaudit" + "github.com/crunchydata/postgres-operator/internal/postgis" "github.com/crunchydata/postgres-operator/internal/postgres" pgpassword "github.com/crunchydata/postgres-operator/internal/postgres/password" "github.com/crunchydata/postgres-operator/internal/util" @@ -52,7 +45,7 @@ func (r *Reconciler) generatePostgresUserSecret( username := string(spec.Name) intent := &corev1.Secret{ObjectMeta: naming.PostgresUserSecret(cluster, username)} intent.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("Secret")) - initialize.ByteMap(&intent.Data) + initialize.Map(&intent.Data) // Populate the Secret with libpq keywords for connecting through // the primary Service. @@ -65,27 +58,41 @@ func (r *Reconciler) generatePostgresUserSecret( intent.Data["port"] = []byte(port) intent.Data["user"] = []byte(username) - // Use the existing password and verifier. Generate both when either is missing. + // Use the existing password and verifier. if existing != nil { intent.Data["password"] = existing.Data["password"] intent.Data["verifier"] = existing.Data["verifier"] } - if len(intent.Data["password"]) == 0 || len(intent.Data["verifier"]) == 0 { - password, err := util.GeneratePassword(util.DefaultGeneratedPasswordLength) + + // When password is unset, generate a new one according to the specified policy. + if len(intent.Data["password"]) == 0 { + // NOTE: The tests around ASCII passwords are lacking. When changing + // this, make sure that ASCII is the default. + generate := util.GenerateASCIIPassword + if spec.Password != nil { + switch spec.Password.Type { + case v1beta1.PostgresPasswordTypeAlphaNumeric: + generate = util.GenerateAlphaNumericPassword + } + } + + password, err := generate(util.DefaultGeneratedPasswordLength) if err != nil { return nil, errors.WithStack(err) } + intent.Data["password"] = []byte(password) + intent.Data["verifier"] = nil + } - // Generate the SCRAM verifier now and store alongside the plaintext - // password so that later reconciles don't generate it repeatedly. - // NOTE(cbandy): We don't have a function to compare a plaintext - // password to a SCRAM verifier. - verifier, err := pgpassword.NewSCRAMPassword(password).Build() + // When a password has been generated or the verifier is empty, + // generate a verifier based on the current password. + // NOTE(cbandy): We don't have a function to compare a plaintext + // password to a SCRAM verifier. + if len(intent.Data["verifier"]) == 0 { + verifier, err := pgpassword.NewSCRAMPassword(string(intent.Data["password"])).Build() if err != nil { return nil, errors.WithStack(err) } - - intent.Data["password"] = []byte(password) intent.Data["verifier"] = []byte(verifier) } @@ -101,6 +108,18 @@ func (r *Reconciler) generatePostgresUserSecret( Host: net.JoinHostPort(hostname, port), Path: database, }).String()) + + // The JDBC driver requires a different URI scheme and query component. + // - https://jdbc.postgresql.org/documentation/use/#connection-parameters + query := url.Values{} + query.Set("user", username) + query.Set("password", string(intent.Data["password"])) + intent.Data["jdbc-uri"] = []byte((&url.URL{ + Scheme: "jdbc:postgresql", + Host: net.JoinHostPort(hostname, port), + Path: database, + RawQuery: query.Encode(), + }).String()) } // When PgBouncer is enabled, include values for connecting through it. @@ -121,6 +140,22 @@ func (r *Reconciler) generatePostgresUserSecret( Host: net.JoinHostPort(hostname, port), Path: database, }).String()) + + // The JDBC driver requires a different URI scheme and query component. + // Disable prepared statements to be compatible with PgBouncer's + // transaction pooling. + // - https://jdbc.postgresql.org/documentation/use/#connection-parameters + // - https://www.pgbouncer.org/faq.html#how-to-use-prepared-statements-with-transaction-pooling + query := url.Values{} + query.Set("user", username) + query.Set("password", string(intent.Data["password"])) + query.Set("prepareThreshold", "0") + intent.Data["pgbouncer-jdbc-uri"] = []byte((&url.URL{ + Scheme: "jdbc:postgresql", + Host: net.JoinHostPort(hostname, port), + Path: database, + RawQuery: query.Encode(), + }).String()) } } @@ -147,34 +182,21 @@ func (r *Reconciler) reconcilePostgresDatabases( // Find the PostgreSQL instance that can execute SQL that writes system // catalogs. When there is none, return early. - - for _, instance := range instances.forCluster { - if terminating, known := instance.IsTerminating(); terminating || !known { - continue - } - if writable, known := instance.IsWritable(); !writable || !known { - continue - } - running, known := instance.IsRunning(container) - if running && known && len(instance.Pods) > 0 { - pod := instance.Pods[0] - ctx = logging.NewContext(ctx, logging.FromContext(ctx).WithValues("pod", pod.Name)) - - podExecutor = func( - _ context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, - ) error { - return r.PodExec(pod.Namespace, pod.Name, container, stdin, stdout, stderr, command...) - } - break - } - } - if podExecutor == nil { + pod, _ := instances.writablePod(container) + if pod == nil { return nil } + ctx = logging.NewContext(ctx, logging.FromContext(ctx).WithValues("pod", pod.Name)) + podExecutor = func( + ctx context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, + ) error { + return r.PodExec(ctx, pod.Namespace, pod.Name, container, stdin, stdout, stderr, command...) + } + // Gather the list of database that should exist in PostgreSQL. - databases := sets.String{} + databases := sets.Set[string]{} if cluster.Spec.Users == nil { // Users are unspecified; create one database matching the cluster name // if it is also a valid database name. @@ -199,12 +221,33 @@ func (r *Reconciler) reconcilePostgresDatabases( } } - // Calculate a hash of the SQL that should be executed in PostgreSQL. - + var pgAuditOK, postgisInstallOK bool create := func(ctx context.Context, exec postgres.Executor) error { - return postgres.CreateDatabasesInPostgreSQL(ctx, exec, databases.List()) + if pgAuditOK = pgaudit.EnableInPostgreSQL(ctx, exec) == nil; !pgAuditOK { + // pgAudit can only be enabled after its shared library is loaded, + // but early versions of PGO do not load it automatically. Assume + // that an error here is because the cluster started during one of + // those versions and has not been restarted. + r.Recorder.Event(cluster, corev1.EventTypeWarning, "pgAuditDisabled", + "Unable to install pgAudit") + } + + // Enabling PostGIS extensions is a one-way operation + // e.g., you can take a PostgresCluster and turn it into a PostGISCluster, + // but you cannot reverse the process, as that would potentially remove an extension + // that is being used by some database/tables + if cluster.Spec.PostGISVersion == "" { + postgisInstallOK = true + } else if postgisInstallOK = postgis.EnableInPostgreSQL(ctx, exec) == nil; !postgisInstallOK { + // TODO(benjaminjb): Investigate under what conditions postgis would fail install + r.Recorder.Event(cluster, corev1.EventTypeWarning, "PostGISDisabled", + "Unable to install PostGIS") + } + + return postgres.CreateDatabasesInPostgreSQL(ctx, exec, sets.List(databases)) } + // Calculate a hash of the SQL that should be executed in PostgreSQL. revision, err := safeHash32(func(hasher io.Writer) error { // Discard log messages about executing SQL. return create(logging.NewContext(ctx, logging.Discard()), func( @@ -233,7 +276,7 @@ func (r *Reconciler) reconcilePostgresDatabases( log := logging.FromContext(ctx).WithValues("revision", revision) err = errors.WithStack(create(logging.NewContext(ctx, log), podExecutor)) } - if err == nil { + if err == nil && pgAuditOK && postgisInstallOK { cluster.Status.DatabaseRevision = revision } @@ -245,13 +288,56 @@ func (r *Reconciler) reconcilePostgresDatabases( func (r *Reconciler) reconcilePostgresUsers( ctx context.Context, cluster *v1beta1.PostgresCluster, instances *observedInstances, ) error { + r.validatePostgresUsers(cluster) + users, secrets, err := r.reconcilePostgresUserSecrets(ctx, cluster) if err == nil { err = r.reconcilePostgresUsersInPostgreSQL(ctx, cluster, instances, users, secrets) } + if err == nil { + // Copy PostgreSQL users and passwords into pgAdmin. This is here because + // reconcilePostgresUserSecrets is building a (default) PostgresUserSpec + // that is not in the PostgresClusterSpec. The freshly generated Secrets + // are available here, too. + err = r.reconcilePGAdminUsers(ctx, cluster, users, secrets) + } return err } +// validatePostgresUsers emits warnings when cluster.Spec.Users contains values +// that are no longer valid. NOTE(ratcheting) NOTE(validation) +// - https://docs.k8s.io/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#validation-ratcheting +func (r *Reconciler) validatePostgresUsers(cluster *v1beta1.PostgresCluster) { + if len(cluster.Spec.Users) == 0 { + return + } + + path := field.NewPath("spec", "users") + reComments := regexp.MustCompile(`(?:--|/[*]|[*]/)`) + rePassword := regexp.MustCompile(`(?i:PASSWORD)`) + + for i := range cluster.Spec.Users { + errs := field.ErrorList{} + spec := cluster.Spec.Users[i] + + if reComments.MatchString(spec.Options) { + errs = append(errs, + field.Invalid(path.Index(i).Child("options"), spec.Options, + "cannot contain comments")) + } + if rePassword.MatchString(spec.Options) { + errs = append(errs, + field.Invalid(path.Index(i).Child("options"), spec.Options, + "cannot assign password")) + } + + if len(errs) > 0 { + r.Recorder.Event(cluster, corev1.EventTypeWarning, "InvalidUser", + errs.ToAggregate().Error()) + } + } +} + // +kubebuilder:rbac:groups="",resources="secrets",verbs={list} // +kubebuilder:rbac:groups="",resources="secrets",verbs={create,delete,patch} @@ -316,6 +402,36 @@ func (r *Reconciler) reconcilePostgresUserSecrets( )) } + // Sorts the slice of secrets.Items based on secrets with identical labels + // If one secret has "pguser" in its name and the other does not, the + // one without "pguser" is moved to the front. + // If both secrets have "pguser" in their names or neither has "pguser", they + // are sorted by creation timestamp. + // If two secrets have the same creation timestamp, they are further sorted by name. + // The secret to be used by PGO is put at the end of the sorted slice. + sort.Slice(secrets.Items, func(i, j int) bool { + // Check if either secrets have "pguser" in their names + isIPgUser := strings.Contains(secrets.Items[i].Name, "pguser") + isJPgUser := strings.Contains(secrets.Items[j].Name, "pguser") + + // If one secret has "pguser" and the other does not, + // move the one without "pguser" to the front + if isIPgUser && !isJPgUser { + return false + } else if !isIPgUser && isJPgUser { + return true + } + + if secrets.Items[i].CreationTimestamp.Time.Equal(secrets.Items[j].CreationTimestamp.Time) { + // If the creation timestamps are equal, sort by name + return secrets.Items[i].Name < secrets.Items[j].Name + } + + // If both secrets have "pguser" or neither have "pguser", + // sort by creation timestamp + return secrets.Items[i].CreationTimestamp.Time.After(secrets.Items[j].CreationTimestamp.Time) + }) + // Index secrets by PostgreSQL user name and delete any that are not in the // cluster spec. Keep track of the deprecated default secret to migrate its // contents when the current secret doesn't exist. @@ -389,9 +505,9 @@ func (r *Reconciler) reconcilePostgresUsersInPostgreSQL( ctx = logging.NewContext(ctx, logging.FromContext(ctx).WithValues("pod", pod.Name)) podExecutor = func( - _ context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, + ctx context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, ) error { - return r.PodExec(pod.Namespace, pod.Name, container, stdin, stdout, stderr, command...) + return r.PodExec(ctx, pod.Namespace, pod.Name, container, stdin, stdout, stderr, command...) } break } @@ -408,7 +524,7 @@ func (r *Reconciler) reconcilePostgresUsersInPostgreSQL( } write := func(ctx context.Context, exec postgres.Executor) error { - return postgres.WriteUsersInPostgreSQL(ctx, exec, specUsers, verifiers) + return postgres.WriteUsersInPostgreSQL(ctx, cluster, exec, specUsers, verifiers) } revision, err := safeHash32(func(hasher io.Writer) error { @@ -446,14 +562,14 @@ func (r *Reconciler) reconcilePostgresUsersInPostgreSQL( return err } -// +kubebuilder:rbac:groups="",resources=persistentvolumeclaims,verbs=create;patch +// +kubebuilder:rbac:groups="",resources="persistentvolumeclaims",verbs={create,patch} // reconcilePostgresDataVolume writes the PersistentVolumeClaim for instance's // PostgreSQL data volume. func (r *Reconciler) reconcilePostgresDataVolume( ctx context.Context, cluster *v1beta1.PostgresCluster, instanceSpec *v1beta1.PostgresInstanceSetSpec, instance *appsv1.StatefulSet, - clusterVolumes []corev1.PersistentVolumeClaim, + clusterVolumes []corev1.PersistentVolumeClaim, sourceCluster *v1beta1.PostgresCluster, ) (*corev1.PersistentVolumeClaim, error) { labelMap := map[string]string{ @@ -461,9 +577,10 @@ func (r *Reconciler) reconcilePostgresDataVolume( naming.LabelInstanceSet: instanceSpec.Name, naming.LabelInstance: instance.Name, naming.LabelRole: naming.RolePostgresData, + naming.LabelData: naming.DataPostgres, } - var pvc *v1.PersistentVolumeClaim + var pvc *corev1.PersistentVolumeClaim existingPVCName, err := getPGPVCName(labelMap, clusterVolumes) if err != nil { return nil, errors.WithStack(err) @@ -493,6 +610,38 @@ func (r *Reconciler) reconcilePostgresDataVolume( pvc.Spec = instanceSpec.DataVolumeClaimSpec + // If a source cluster was provided and VolumeSnapshots are turned on in the source cluster and + // there is a VolumeSnapshot available for the source cluster that is ReadyToUse, use it as the + // source for the PVC. If there is an error when retrieving VolumeSnapshots, or no ReadyToUse + // snapshots were found, create a warning event, but continue creating PVC in the usual fashion. + if sourceCluster != nil && sourceCluster.Spec.Backups.Snapshots != nil && feature.Enabled(ctx, feature.VolumeSnapshots) { + snapshots, err := r.getSnapshotsForCluster(ctx, sourceCluster) + if err == nil { + snapshot := getLatestReadySnapshot(snapshots) + if snapshot != nil { + r.Recorder.Eventf(cluster, corev1.EventTypeNormal, "BootstrappingWithSnapshot", + "Snapshot found for %v; bootstrapping cluster with snapshot.", sourceCluster.Name) + pvc.Spec.DataSource = &corev1.TypedLocalObjectReference{ + APIGroup: initialize.String("snapshot.storage.k8s.io"), + Kind: snapshot.Kind, + Name: snapshot.Name, + } + } else { + r.Recorder.Eventf(cluster, corev1.EventTypeWarning, "SnapshotNotFound", + "No ReadyToUse snapshots were found for %v; proceeding with typical restore process.", sourceCluster.Name) + } + } else { + r.Recorder.Eventf(cluster, corev1.EventTypeWarning, "SnapshotNotFound", + "Could not get snapshots for %v, proceeding with typical restore process.", sourceCluster.Name) + } + } + + r.setVolumeSize(ctx, cluster, pvc, instanceSpec.Name) + + // Clear any set limit before applying PVC. This is needed to allow the limit + // value to change later. + pvc.Spec.Resources.Limits = nil + if err == nil { err = r.handlePersistentVolumeClaimError(cluster, errors.WithStack(r.apply(ctx, pvc))) @@ -501,8 +650,149 @@ func (r *Reconciler) reconcilePostgresDataVolume( return pvc, err } -// +kubebuilder:rbac:groups="",resources=persistentvolumeclaims,verbs=get -// +kubebuilder:rbac:groups="",resources=persistentvolumeclaims,verbs=create;delete;patch +// setVolumeSize compares the potential sizes from the instance spec, status +// and limit and sets the appropriate current value. +func (r *Reconciler) setVolumeSize(ctx context.Context, cluster *v1beta1.PostgresCluster, + pvc *corev1.PersistentVolumeClaim, instanceSpecName string) { + log := logging.FromContext(ctx) + + // Store the limit for this instance set. This value will not change below. + volumeLimitFromSpec := pvc.Spec.Resources.Limits.Storage() + + // Capture the largest pgData volume size currently defined for a given instance set. + // This value will capture our desired update. + volumeRequestSize := pvc.Spec.Resources.Requests.Storage() + + // If the request value is greater than the set limit, use the limit and issue + // a warning event. A limit of 0 is ignorned. + if !volumeLimitFromSpec.IsZero() && + volumeRequestSize.Value() > volumeLimitFromSpec.Value() { + r.Recorder.Eventf(cluster, corev1.EventTypeWarning, "VolumeRequestOverLimit", + "pgData volume request (%v) for %s/%s is greater than set limit (%v). Limit value will be used.", + volumeRequestSize, cluster.Name, instanceSpecName, volumeLimitFromSpec) + + pvc.Spec.Resources.Requests = corev1.ResourceList{ + corev1.ResourceStorage: *resource.NewQuantity(volumeLimitFromSpec.Value(), resource.BinarySI), + } + // Otherwise, if the limit is not set or the feature gate is not enabled, do not autogrow. + } else if !volumeLimitFromSpec.IsZero() && feature.Enabled(ctx, feature.AutoGrowVolumes) { + for i := range cluster.Status.InstanceSets { + if instanceSpecName == cluster.Status.InstanceSets[i].Name { + for _, dpv := range cluster.Status.InstanceSets[i].DesiredPGDataVolume { + if dpv != "" { + desiredRequest, err := resource.ParseQuantity(dpv) + if err == nil { + if desiredRequest.Value() > volumeRequestSize.Value() { + volumeRequestSize = &desiredRequest + } + } else { + log.Error(err, "Unable to parse volume request: "+dpv) + } + } + } + } + } + + // If the volume request size is greater than or equal to the limit and the + // limit is not zero, update the request size to the limit value. + // If the user manually requests a lower limit that is smaller than the current + // or requested volume size, it will be ignored in favor of the limit value. + if volumeRequestSize.Value() >= volumeLimitFromSpec.Value() { + + r.Recorder.Eventf(cluster, corev1.EventTypeNormal, "VolumeLimitReached", + "pgData volume(s) for %s/%s are at size limit (%v).", cluster.Name, + instanceSpecName, volumeLimitFromSpec) + + // If the volume size request is greater than the limit, issue an + // additional event warning. + if volumeRequestSize.Value() > volumeLimitFromSpec.Value() { + r.Recorder.Eventf(cluster, corev1.EventTypeWarning, "DesiredVolumeAboveLimit", + "The desired size (%v) for the %s/%s pgData volume(s) is greater than the size limit (%v).", + volumeRequestSize, cluster.Name, instanceSpecName, volumeLimitFromSpec) + } + + volumeRequestSize = volumeLimitFromSpec + } + pvc.Spec.Resources.Requests = corev1.ResourceList{ + corev1.ResourceStorage: *resource.NewQuantity(volumeRequestSize.Value(), resource.BinarySI), + } + } +} + +// +kubebuilder:rbac:groups="",resources="persistentvolumeclaims",verbs={create,patch} + +// reconcileTablespaceVolumes writes the PersistentVolumeClaims for instance's +// tablespace data volumes. +func (r *Reconciler) reconcileTablespaceVolumes( + ctx context.Context, cluster *v1beta1.PostgresCluster, + instanceSpec *v1beta1.PostgresInstanceSetSpec, instance *appsv1.StatefulSet, + clusterVolumes []corev1.PersistentVolumeClaim, +) (tablespaceVolumes []*corev1.PersistentVolumeClaim, err error) { + + if !feature.Enabled(ctx, feature.TablespaceVolumes) { + return + } + + if instanceSpec.TablespaceVolumes == nil { + return + } + + for _, vol := range instanceSpec.TablespaceVolumes { + labelMap := map[string]string{ + naming.LabelCluster: cluster.Name, + naming.LabelInstanceSet: instanceSpec.Name, + naming.LabelInstance: instance.Name, + naming.LabelRole: "tablespace", + naming.LabelData: vol.Name, + } + + var pvc *corev1.PersistentVolumeClaim + existingPVCName, err := getPGPVCName(labelMap, clusterVolumes) + if err != nil { + return nil, errors.WithStack(err) + } + if existingPVCName != "" { + pvc = &corev1.PersistentVolumeClaim{ObjectMeta: metav1.ObjectMeta{ + Namespace: cluster.GetNamespace(), + Name: existingPVCName, + }} + } else { + pvc = &corev1.PersistentVolumeClaim{ObjectMeta: naming.InstanceTablespaceDataVolume(instance, vol.Name)} + } + + pvc.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("PersistentVolumeClaim")) + + err = errors.WithStack(r.setControllerReference(cluster, pvc)) + + pvc.Annotations = naming.Merge( + cluster.Spec.Metadata.GetAnnotationsOrNil(), + instanceSpec.Metadata.GetAnnotationsOrNil()) + + pvc.Labels = naming.Merge( + cluster.Spec.Metadata.GetLabelsOrNil(), + instanceSpec.Metadata.GetLabelsOrNil(), + labelMap, + ) + + pvc.Spec = vol.DataVolumeClaimSpec + + if err == nil { + err = r.handlePersistentVolumeClaimError(cluster, + errors.WithStack(r.apply(ctx, pvc))) + } + + if err != nil { + return nil, err + } + + tablespaceVolumes = append(tablespaceVolumes, pvc) + } + + return +} + +// +kubebuilder:rbac:groups="",resources="persistentvolumeclaims",verbs={get} +// +kubebuilder:rbac:groups="",resources="persistentvolumeclaims",verbs={create,delete,patch} // reconcilePostgresWALVolume writes the PersistentVolumeClaim for instance's // PostgreSQL WAL volume. @@ -517,9 +807,10 @@ func (r *Reconciler) reconcilePostgresWALVolume( naming.LabelInstanceSet: instanceSpec.Name, naming.LabelInstance: instance.Name, naming.LabelRole: naming.RolePostgresWAL, + naming.LabelData: naming.DataPostgres, } - var pvc *v1.PersistentVolumeClaim + var pvc *corev1.PersistentVolumeClaim existingPVCName, err := getPGPVCName(labelMap, clusterVolumes) if err != nil { return nil, errors.WithStack(err) @@ -565,7 +856,7 @@ func (r *Reconciler) reconcilePostgresWALVolume( // This assumes that $PGDATA matches the configured PostgreSQL "data_directory". var stdout bytes.Buffer err = errors.WithStack(r.PodExec( - observed.Pods[0].Namespace, observed.Pods[0].Name, naming.ContainerDatabase, + ctx, observed.Pods[0].Namespace, observed.Pods[0].Name, naming.ContainerDatabase, nil, &stdout, nil, "bash", "-ceu", "--", `exec realpath "${PGDATA}/pg_wal"`)) walDirectory = strings.TrimRight(stdout.String(), "\n") @@ -602,3 +893,103 @@ func (r *Reconciler) reconcilePostgresWALVolume( return pvc, err } + +// reconcileDatabaseInitSQL runs custom SQL files in the database. When +// DatabaseInitSQL is defined, the function will find the primary pod and run +// SQL from the defined ConfigMap +func (r *Reconciler) reconcileDatabaseInitSQL(ctx context.Context, + cluster *v1beta1.PostgresCluster, instances *observedInstances) error { + log := logging.FromContext(ctx) + + // Spec is not defined, unset status and return + if cluster.Spec.DatabaseInitSQL == nil { + // If database init sql is not requested, we will always expect the + // status to be nil + cluster.Status.DatabaseInitSQL = nil + return nil + } + + // Spec is defined but status is already set, return + if cluster.Status.DatabaseInitSQL != nil { + return nil + } + + // Based on the previous checks, the user wants to run sql in the database. + // Check the provided ConfigMap name and key to ensure the a string + // exists in the ConfigMap data + var ( + err error + data string + ) + + getDataFromConfigMap := func() (string, error) { + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: cluster.Spec.DatabaseInitSQL.Name, + Namespace: cluster.Namespace, + }, + } + err := r.Client.Get(ctx, client.ObjectKeyFromObject(cm), cm) + if err != nil { + return "", err + } + + key := cluster.Spec.DatabaseInitSQL.Key + if _, ok := cm.Data[key]; !ok { + err := errors.Errorf("ConfigMap did not contain expected key: %s", key) + return "", err + } + + return cm.Data[key], nil + } + + if data, err = getDataFromConfigMap(); err != nil { + log.Error(err, "Could not get data from ConfigMap", + "ConfigMap", cluster.Spec.DatabaseInitSQL.Name, + "Key", cluster.Spec.DatabaseInitSQL.Key) + return err + } + + // Now that we have the data provided by the user. We can check for a + // writable pod and get the podExecutor for the pod's database container + var podExecutor postgres.Executor + pod, _ := instances.writablePod(naming.ContainerDatabase) + if pod == nil { + log.V(1).Info("Could not find a pod with a writable database container.") + return nil + } + + podExecutor = func( + ctx context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, + ) error { + return r.PodExec(ctx, pod.Namespace, pod.Name, naming.ContainerDatabase, stdin, stdout, stderr, command...) + } + + // A writable pod executor has been found and we have the sql provided by + // the user. Setup a write function to execute the sql using the podExecutor + write := func(ctx context.Context, exec postgres.Executor) error { + stdout, stderr, err := exec.Exec(ctx, strings.NewReader(data), map[string]string{}) + log.V(1).Info("applied init SQL", "stdout", stdout, "stderr", stderr) + return err + } + + // Update the logger to include fields from the user provided ResourceRef + log = log.WithValues( + "name", cluster.Spec.DatabaseInitSQL.Name, + "key", cluster.Spec.DatabaseInitSQL.Key, + ) + + // Write SQL to database using the podExecutor + err = errors.WithStack(write(logging.NewContext(ctx, log), podExecutor)) + + // If the podExec returns with exit code 0 the write is considered a + // success, keep track of the ConfigMap using a status. This helps to + // ensure SQL doesn't get run again. SQL can be run again if the + // status is lost and the DatabaseInitSQL field exists in the spec. + if err == nil { + status := cluster.Spec.DatabaseInitSQL.Name + cluster.Status.DatabaseInitSQL = &status + } + + return err +} diff --git a/internal/controller/postgrescluster/postgres_test.go b/internal/controller/postgrescluster/postgres_test.go index 78a92805a9..0780b0f577 100644 --- a/internal/controller/postgrescluster/postgres_test.go +++ b/internal/controller/postgrescluster/postgres_test.go @@ -1,46 +1,43 @@ -// +build envtest - -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster import ( "context" - "errors" + "fmt" "io" "testing" + "github.com/go-logr/logr/funcr" "github.com/google/go-cmp/cmp/cmpopts" + volumesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" + "github.com/pkg/errors" "gotest.tools/v3/assert" - "gotest.tools/v3/assert/cmp" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/yaml" + "github.com/crunchydata/postgres-operator/internal/controller/runtime" + "github.com/crunchydata/postgres-operator/internal/feature" "github.com/crunchydata/postgres-operator/internal/initialize" + "github.com/crunchydata/postgres-operator/internal/logging" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/internal/postgres" + "github.com/crunchydata/postgres-operator/internal/testing/cmp" + "github.com/crunchydata/postgres-operator/internal/testing/events" + "github.com/crunchydata/postgres-operator/internal/testing/require" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) func TestGeneratePostgresUserSecret(t *testing.T) { - tEnv, tClient, _ := setupTestEnv(t, ControllerName) - t.Cleanup(func() { teardownTestEnv(t, tEnv) }) + _, tClient := setupKubernetes(t) + require.ParallelCapacity(t, 0) reconciler := &Reconciler{Client: tClient} @@ -96,6 +93,47 @@ func TestGeneratePostgresUserSecret(t *testing.T) { assert.Assert(t, len(secret.Data["verifier"]) > 90, "got %v", len(secret.Data["verifier"])) } + t.Run("Policy", func(t *testing.T) { + spec := spec.DeepCopy() + + // ASCII when unspecified. + spec.Password = nil + secret, err = reconciler.generatePostgresUserSecret(cluster, spec, new(corev1.Secret)) + assert.NilError(t, err) + + if assert.Check(t, secret != nil) { + // This assertion is lacking, but distinguishing between "alphanumeric" + // and "alphanumeric+symbols" is hard. If our generator changes to + // guarantee at least one symbol, we can check for symbols here. + assert.Assert(t, len(secret.Data["password"]) != 0) + } + + // AlphaNumeric when specified. + spec.Password = &v1beta1.PostgresPasswordSpec{ + Type: v1beta1.PostgresPasswordTypeAlphaNumeric, + } + + secret, err = reconciler.generatePostgresUserSecret(cluster, spec, new(corev1.Secret)) + assert.NilError(t, err) + + if assert.Check(t, secret != nil) { + assert.Assert(t, cmp.Regexp(`^[A-Za-z0-9]+$`, string(secret.Data["password"]))) + } + }) + + // Verifier is generated when existing Secret contains only a password. + secret, err = reconciler.generatePostgresUserSecret(cluster, spec, &corev1.Secret{ + Data: map[string][]byte{ + "password": []byte(`asdf`), + }, + }) + assert.NilError(t, err) + + if assert.Check(t, secret != nil) { + assert.Equal(t, string(secret.Data["password"]), "asdf") + assert.Assert(t, len(secret.Data["verifier"]) > 90, "got %v", len(secret.Data["verifier"])) + } + // Copied when existing Secret is full. secret, err = reconciler.generatePostgresUserSecret(cluster, spec, &corev1.Secret{ Data: map[string][]byte{ @@ -121,6 +159,7 @@ func TestGeneratePostgresUserSecret(t *testing.T) { if assert.Check(t, secret != nil) { assert.Assert(t, secret.Data["dbname"] == nil) assert.Assert(t, secret.Data["uri"] == nil) + assert.Assert(t, secret.Data["jdbc-uri"] == nil) } // Present when specified. @@ -131,8 +170,13 @@ func TestGeneratePostgresUserSecret(t *testing.T) { if assert.Check(t, secret != nil) { assert.Equal(t, string(secret.Data["dbname"]), "db1") - assert.Assert(t, cmp.Regexp(`postgresql://some-user-name:[^@]+@hippo2-primary.ns1.svc:9999/db1`, + assert.Assert(t, cmp.Regexp( + `^postgresql://some-user-name:[^@]+@hippo2-primary.ns1.svc:9999/db1$`, string(secret.Data["uri"]))) + assert.Assert(t, cmp.Regexp( + `^jdbc:postgresql://hippo2-primary.ns1.svc:9999/db1`+ + `[?]password=[^&]+&user=some-user-name$`, + string(secret.Data["jdbc-uri"]))) } // Only the first in the list. @@ -143,8 +187,13 @@ func TestGeneratePostgresUserSecret(t *testing.T) { if assert.Check(t, secret != nil) { assert.Equal(t, string(secret.Data["dbname"]), "first") - assert.Assert(t, cmp.Regexp(`postgresql://some-user-name:[^@]+@hippo2-primary.ns1.svc:9999/first`, + assert.Assert(t, cmp.Regexp( + `^postgresql://some-user-name:[^@]+@hippo2-primary.ns1.svc:9999/first$`, string(secret.Data["uri"]))) + assert.Assert(t, cmp.Regexp( + `^jdbc:postgresql://hippo2-primary.ns1.svc:9999/first[?].+$`, + string(secret.Data["jdbc-uri"]))) + } }) @@ -160,6 +209,7 @@ func TestGeneratePostgresUserSecret(t *testing.T) { assert.Equal(t, string(secret.Data["pgbouncer-host"]), "hippo2-pgbouncer.ns1.svc") assert.Equal(t, string(secret.Data["pgbouncer-port"]), "10220") assert.Assert(t, secret.Data["pgbouncer-uri"] == nil) + assert.Assert(t, secret.Data["pgbouncer-jdbc-uri"] == nil) } // Includes a URI when possible. @@ -170,47 +220,137 @@ func TestGeneratePostgresUserSecret(t *testing.T) { assert.NilError(t, err) if assert.Check(t, secret != nil) { - assert.Assert(t, cmp.Regexp(`postgresql://some-user-name:[^@]+@hippo2-pgbouncer.ns1.svc:10220/yes`, + assert.Assert(t, cmp.Regexp( + `^postgresql://some-user-name:[^@]+@hippo2-pgbouncer.ns1.svc:10220/yes$`, string(secret.Data["pgbouncer-uri"]))) + assert.Assert(t, cmp.Regexp( + `^jdbc:postgresql://hippo2-pgbouncer.ns1.svc:10220/yes`+ + `[?]password=[^&]+&prepareThreshold=0&user=some-user-name$`, + string(secret.Data["pgbouncer-jdbc-uri"]))) } }) } func TestReconcilePostgresVolumes(t *testing.T) { ctx := context.Background() - tEnv, tClient, _ := setupTestEnv(t, ControllerName) - t.Cleanup(func() { teardownTestEnv(t, tEnv) }) + _, tClient := setupKubernetes(t) + require.ParallelCapacity(t, 1) reconciler := &Reconciler{ Client: tClient, Owner: client.FieldOwner(t.Name()), } - ns := &corev1.Namespace{} - ns.GenerateName = "postgres-operator-test-" - assert.NilError(t, tClient.Create(ctx, ns)) - t.Cleanup(func() { assert.Check(t, tClient.Delete(ctx, ns)) }) + t.Run("DataVolumeNoSourceCluster", func(t *testing.T) { + cluster := testCluster() + ns := setupNamespace(t, tClient) + cluster.Namespace = ns.Name - cluster := testCluster() - cluster.Namespace = ns.Name + assert.NilError(t, tClient.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, tClient.Delete(ctx, cluster)) }) - assert.NilError(t, tClient.Create(ctx, cluster)) - t.Cleanup(func() { assert.Check(t, tClient.Delete(ctx, cluster)) }) + spec := &v1beta1.PostgresInstanceSetSpec{} + assert.NilError(t, yaml.Unmarshal([]byte(`{ + name: "some-instance", + dataVolumeClaimSpec: { + accessModes: [ReadWriteOnce], + resources: { requests: { storage: 1Gi } }, + storageClassName: "storage-class-for-data", + }, + }`), spec)) + instance := &appsv1.StatefulSet{ObjectMeta: naming.GenerateInstance(cluster, spec)} - spec := &v1beta1.PostgresInstanceSetSpec{} - assert.NilError(t, yaml.Unmarshal([]byte(`{ - name: "some-instance", - dataVolumeClaimSpec: { - accessModes: [ReadWriteOnce], - resources: { requests: { storage: 1Gi } }, - storageClassName: "storage-class-for-data", - }, - }`), spec)) + pvc, err := reconciler.reconcilePostgresDataVolume(ctx, cluster, spec, instance, nil, nil) + assert.NilError(t, err) + + assert.Assert(t, metav1.IsControlledBy(pvc, cluster)) + + assert.Equal(t, pvc.Labels[naming.LabelCluster], cluster.Name) + assert.Equal(t, pvc.Labels[naming.LabelInstance], instance.Name) + assert.Equal(t, pvc.Labels[naming.LabelInstanceSet], spec.Name) + assert.Equal(t, pvc.Labels[naming.LabelRole], "pgdata") + + assert.Assert(t, cmp.MarshalMatches(pvc.Spec, ` +accessModes: +- ReadWriteOnce +resources: + requests: + storage: 1Gi +storageClassName: storage-class-for-data +volumeMode: Filesystem + `)) + }) - instance := &appsv1.StatefulSet{ObjectMeta: naming.GenerateInstance(cluster, spec)} + t.Run("DataVolumeSourceClusterWithGoodSnapshot", func(t *testing.T) { + cluster := testCluster() + ns := setupNamespace(t, tClient) + cluster.Namespace = ns.Name - t.Run("DataVolume", func(t *testing.T) { - pvc, err := reconciler.reconcilePostgresDataVolume(ctx, cluster, spec, instance, nil) + assert.NilError(t, tClient.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, tClient.Delete(ctx, cluster)) }) + + spec := &v1beta1.PostgresInstanceSetSpec{} + assert.NilError(t, yaml.Unmarshal([]byte(`{ + name: "some-instance", + dataVolumeClaimSpec: { + accessModes: [ReadWriteOnce], + resources: { requests: { storage: 1Gi } }, + storageClassName: "storage-class-for-data", + }, + }`), spec)) + instance := &appsv1.StatefulSet{ObjectMeta: naming.GenerateInstance(cluster, spec)} + + recorder := events.NewRecorder(t, runtime.Scheme) + reconciler.Recorder = recorder + + // Turn on VolumeSnapshots feature gate + gate := feature.NewGate() + assert.NilError(t, gate.SetFromMap(map[string]bool{ + feature.VolumeSnapshots: true, + })) + ctx := feature.NewContext(ctx, gate) + + // Create source cluster and enable snapshots + sourceCluster := testCluster() + sourceCluster.Namespace = ns.Name + sourceCluster.Name = "rhino" + sourceCluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ + VolumeSnapshotClassName: "some-class-name", + } + + // Create a snapshot + snapshot := &volumesnapshotv1.VolumeSnapshot{ + TypeMeta: metav1.TypeMeta{ + APIVersion: volumesnapshotv1.SchemeGroupVersion.String(), + Kind: "VolumeSnapshot", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "some-snapshot", + Namespace: ns.Name, + Labels: map[string]string{ + naming.LabelCluster: "rhino", + }, + }, + } + snapshot.Spec.Source.PersistentVolumeClaimName = initialize.String("some-pvc-name") + snapshot.Spec.VolumeSnapshotClassName = initialize.String("some-class-name") + err := reconciler.apply(ctx, snapshot) + assert.NilError(t, err) + + // Get snapshot and update Status.ReadyToUse and CreationTime + err = reconciler.Client.Get(ctx, client.ObjectKeyFromObject(snapshot), snapshot) + assert.NilError(t, err) + + currentTime := metav1.Now() + snapshot.Status = &volumesnapshotv1.VolumeSnapshotStatus{ + ReadyToUse: initialize.Bool(true), + CreationTime: ¤tTime, + } + err = reconciler.Client.Status().Update(ctx, snapshot) + assert.NilError(t, err) + + // Reconcile volume + pvc, err := reconciler.reconcilePostgresDataVolume(ctx, cluster, spec, instance, nil, sourceCluster) assert.NilError(t, err) assert.Assert(t, metav1.IsControlledBy(pvc, cluster)) @@ -220,18 +360,111 @@ func TestReconcilePostgresVolumes(t *testing.T) { assert.Equal(t, pvc.Labels[naming.LabelInstanceSet], spec.Name) assert.Equal(t, pvc.Labels[naming.LabelRole], "pgdata") - assert.Assert(t, marshalMatches(pvc.Spec, ` + assert.Assert(t, cmp.MarshalMatches(pvc.Spec, ` accessModes: - ReadWriteOnce +dataSource: + apiGroup: snapshot.storage.k8s.io + kind: VolumeSnapshot + name: some-snapshot +dataSourceRef: + apiGroup: snapshot.storage.k8s.io + kind: VolumeSnapshot + name: some-snapshot resources: requests: storage: 1Gi storageClassName: storage-class-for-data volumeMode: Filesystem `)) + assert.Equal(t, len(recorder.Events), 1) + assert.Equal(t, recorder.Events[0].Regarding.Name, cluster.Name) + assert.Equal(t, recorder.Events[0].Reason, "BootstrappingWithSnapshot") + assert.Equal(t, recorder.Events[0].Note, "Snapshot found for rhino; bootstrapping cluster with snapshot.") + }) + + t.Run("DataVolumeSourceClusterSnapshotsEnabledNoSnapshots", func(t *testing.T) { + cluster := testCluster() + ns := setupNamespace(t, tClient) + cluster.Namespace = ns.Name + + assert.NilError(t, tClient.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, tClient.Delete(ctx, cluster)) }) + + spec := &v1beta1.PostgresInstanceSetSpec{} + assert.NilError(t, yaml.Unmarshal([]byte(`{ + name: "some-instance", + dataVolumeClaimSpec: { + accessModes: [ReadWriteOnce], + resources: { requests: { storage: 1Gi } }, + storageClassName: "storage-class-for-data", + }, + }`), spec)) + instance := &appsv1.StatefulSet{ObjectMeta: naming.GenerateInstance(cluster, spec)} + + recorder := events.NewRecorder(t, runtime.Scheme) + reconciler.Recorder = recorder + + // Turn on VolumeSnapshots feature gate + gate := feature.NewGate() + assert.NilError(t, gate.SetFromMap(map[string]bool{ + feature.VolumeSnapshots: true, + })) + ctx := feature.NewContext(ctx, gate) + + // Create source cluster and enable snapshots + sourceCluster := testCluster() + sourceCluster.Namespace = ns.Name + sourceCluster.Name = "rhino" + sourceCluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ + VolumeSnapshotClassName: "some-class-name", + } + + // Reconcile volume + pvc, err := reconciler.reconcilePostgresDataVolume(ctx, cluster, spec, instance, nil, sourceCluster) + assert.NilError(t, err) + + assert.Assert(t, metav1.IsControlledBy(pvc, cluster)) + + assert.Equal(t, pvc.Labels[naming.LabelCluster], cluster.Name) + assert.Equal(t, pvc.Labels[naming.LabelInstance], instance.Name) + assert.Equal(t, pvc.Labels[naming.LabelInstanceSet], spec.Name) + assert.Equal(t, pvc.Labels[naming.LabelRole], "pgdata") + + assert.Assert(t, cmp.MarshalMatches(pvc.Spec, ` +accessModes: +- ReadWriteOnce +resources: + requests: + storage: 1Gi +storageClassName: storage-class-for-data +volumeMode: Filesystem + `)) + assert.Equal(t, len(recorder.Events), 1) + assert.Equal(t, recorder.Events[0].Regarding.Name, cluster.Name) + assert.Equal(t, recorder.Events[0].Reason, "SnapshotNotFound") + assert.Equal(t, recorder.Events[0].Note, "No ReadyToUse snapshots were found for rhino; proceeding with typical restore process.") }) t.Run("WALVolume", func(t *testing.T) { + cluster := testCluster() + ns := setupNamespace(t, tClient) + cluster.Namespace = ns.Name + + assert.NilError(t, tClient.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, tClient.Delete(ctx, cluster)) }) + + spec := &v1beta1.PostgresInstanceSetSpec{} + assert.NilError(t, yaml.Unmarshal([]byte(`{ + name: "some-instance", + dataVolumeClaimSpec: { + accessModes: [ReadWriteOnce], + resources: { requests: { storage: 1Gi } }, + storageClassName: "storage-class-for-data", + }, + }`), spec)) + instance := &appsv1.StatefulSet{ObjectMeta: naming.GenerateInstance(cluster, spec)} + observed := &Instance{} t.Run("None", func(t *testing.T) { @@ -260,7 +493,7 @@ volumeMode: Filesystem assert.Equal(t, pvc.Labels[naming.LabelInstanceSet], spec.Name) assert.Equal(t, pvc.Labels[naming.LabelRole], "pgwal") - assert.Assert(t, marshalMatches(pvc.Spec, ` + assert.Assert(t, cmp.MarshalMatches(pvc.Spec, ` accessModes: - ReadWriteMany resources: @@ -299,7 +532,7 @@ volumeMode: Filesystem expected := errors.New("flop") reconciler.PodExec = func( - namespace, pod, container string, + ctx context.Context, namespace, pod, container string, _ io.Reader, _, _ io.Writer, command ...string, ) error { assert.Equal(t, namespace, "pod-ns") @@ -316,7 +549,7 @@ volumeMode: Filesystem // Files are in the wrong place; expect no changes to the PVC. reconciler.PodExec = func( - _, _, _ string, _ io.Reader, stdout, _ io.Writer, _ ...string, + ctx context.Context, _, _, _ string, _ io.Reader, stdout, _ io.Writer, _ ...string, ) error { assert.Assert(t, stdout != nil) _, err := stdout.Write([]byte("some-place\n")) @@ -339,7 +572,7 @@ volumeMode: Filesystem new(corev1.ContainerStateRunning) reconciler.PodExec = func( - _, _, _ string, _ io.Reader, stdout, _ io.Writer, _ ...string, + ctx context.Context, _, _, _ string, _ io.Reader, stdout, _ io.Writer, _ ...string, ) error { assert.Assert(t, stdout != nil) _, err := stdout.Write([]byte(postgres.WALDirectory(cluster, spec) + "\n")) @@ -352,8 +585,11 @@ volumeMode: Filesystem assert.Assert(t, returned == nil) key, fetched := client.ObjectKeyFromObject(pvc), &corev1.PersistentVolumeClaim{} - assert.NilError(t, tClient.Get(ctx, key, fetched)) - assert.Assert(t, fetched.DeletionTimestamp != nil, "expected deleted") + if err := tClient.Get(ctx, key, fetched); err == nil { + assert.Assert(t, fetched.DeletionTimestamp != nil, "expected deleted") + } else { + assert.Assert(t, apierrors.IsNotFound(err), "expected NotFound, got %v", err) + } // Pods will redeploy while the PVC is scheduled for deletion. observed.Pods = nil @@ -365,3 +601,633 @@ volumeMode: Filesystem }) }) } + +func TestSetVolumeSize(t *testing.T) { + t.Parallel() + + ctx := context.Background() + cluster := v1beta1.PostgresCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "elephant", + Namespace: "test-namespace", + }, + Spec: v1beta1.PostgresClusterSpec{ + InstanceSets: []v1beta1.PostgresInstanceSetSpec{{ + Name: "some-instance", + Replicas: initialize.Int32(1), + }}, + }, + } + + instance := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "elephant-some-instance-wxyz-0", + Namespace: cluster.Namespace, + }} + + setupLogCapture := func(ctx context.Context) (context.Context, *[]string) { + calls := []string{} + testlog := funcr.NewJSON(func(object string) { + calls = append(calls, object) + }, funcr.Options{ + Verbosity: 1, + }) + return logging.NewContext(ctx, testlog), &calls + } + + // helper functions + instanceSetSpec := func(request, limit string) *v1beta1.PostgresInstanceSetSpec { + return &v1beta1.PostgresInstanceSetSpec{ + Name: "some-instance", + DataVolumeClaimSpec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + Resources: corev1.VolumeResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceStorage: resource.MustParse(request), + }, + Limits: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceStorage: resource.MustParse(limit), + }}}} + } + + desiredStatus := func(request string) v1beta1.PostgresClusterStatus { + desiredMap := make(map[string]string) + desiredMap["elephant-some-instance-wxyz-0"] = request + return v1beta1.PostgresClusterStatus{ + InstanceSets: []v1beta1.PostgresInstanceSetStatus{{ + Name: "some-instance", + DesiredPGDataVolume: desiredMap, + }}} + } + + t.Run("RequestAboveLimit", func(t *testing.T) { + recorder := events.NewRecorder(t, runtime.Scheme) + reconciler := &Reconciler{Recorder: recorder} + ctx, logs := setupLogCapture(ctx) + + pvc := &corev1.PersistentVolumeClaim{ObjectMeta: naming.InstancePostgresDataVolume(instance)} + spec := instanceSetSpec("4Gi", "3Gi") + pvc.Spec = spec.DataVolumeClaimSpec + + reconciler.setVolumeSize(ctx, &cluster, pvc, spec.Name) + + assert.Assert(t, cmp.MarshalMatches(pvc.Spec, ` +accessModes: +- ReadWriteOnce +resources: + limits: + storage: 3Gi + requests: + storage: 3Gi +`)) + assert.Equal(t, len(*logs), 0) + assert.Equal(t, len(recorder.Events), 1) + assert.Equal(t, recorder.Events[0].Regarding.Name, cluster.Name) + assert.Equal(t, recorder.Events[0].Reason, "VolumeRequestOverLimit") + assert.Equal(t, recorder.Events[0].Note, "pgData volume request (4Gi) for elephant/some-instance is greater than set limit (3Gi). Limit value will be used.") + }) + + t.Run("NoFeatureGate", func(t *testing.T) { + recorder := events.NewRecorder(t, runtime.Scheme) + reconciler := &Reconciler{Recorder: recorder} + ctx, logs := setupLogCapture(ctx) + + pvc := &corev1.PersistentVolumeClaim{ObjectMeta: naming.InstancePostgresDataVolume(instance)} + spec := instanceSetSpec("1Gi", "3Gi") + + desiredMap := make(map[string]string) + desiredMap["elephant-some-instance-wxyz-0"] = "2Gi" + cluster.Status = v1beta1.PostgresClusterStatus{ + InstanceSets: []v1beta1.PostgresInstanceSetStatus{{ + Name: "some-instance", + DesiredPGDataVolume: desiredMap, + }}, + } + + pvc.Spec = spec.DataVolumeClaimSpec + + reconciler.setVolumeSize(ctx, &cluster, pvc, spec.Name) + + assert.Assert(t, cmp.MarshalMatches(pvc.Spec, ` +accessModes: +- ReadWriteOnce +resources: + limits: + storage: 3Gi + requests: + storage: 1Gi + `)) + + assert.Equal(t, len(recorder.Events), 0) + assert.Equal(t, len(*logs), 0) + + // clear status for other tests + cluster.Status = v1beta1.PostgresClusterStatus{} + }) + + t.Run("FeatureEnabled", func(t *testing.T) { + gate := feature.NewGate() + assert.NilError(t, gate.SetFromMap(map[string]bool{ + feature.AutoGrowVolumes: true, + })) + ctx := feature.NewContext(ctx, gate) + + t.Run("StatusNoLimit", func(t *testing.T) { + recorder := events.NewRecorder(t, runtime.Scheme) + reconciler := &Reconciler{Recorder: recorder} + ctx, logs := setupLogCapture(ctx) + + pvc := &corev1.PersistentVolumeClaim{ObjectMeta: naming.InstancePostgresDataVolume(instance)} + spec := &v1beta1.PostgresInstanceSetSpec{ + Name: "some-instance", + DataVolumeClaimSpec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + Resources: corev1.VolumeResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceStorage: resource.MustParse("1Gi"), + }}}} + cluster.Status = desiredStatus("2Gi") + pvc.Spec = spec.DataVolumeClaimSpec + + reconciler.setVolumeSize(ctx, &cluster, pvc, spec.Name) + + assert.Assert(t, cmp.MarshalMatches(pvc.Spec, ` +accessModes: +- ReadWriteOnce +resources: + requests: + storage: 1Gi +`)) + assert.Equal(t, len(recorder.Events), 0) + assert.Equal(t, len(*logs), 0) + + // clear status for other tests + cluster.Status = v1beta1.PostgresClusterStatus{} + }) + + t.Run("LimitNoStatus", func(t *testing.T) { + recorder := events.NewRecorder(t, runtime.Scheme) + reconciler := &Reconciler{Recorder: recorder} + ctx, logs := setupLogCapture(ctx) + + pvc := &corev1.PersistentVolumeClaim{ObjectMeta: naming.InstancePostgresDataVolume(instance)} + spec := instanceSetSpec("1Gi", "2Gi") + pvc.Spec = spec.DataVolumeClaimSpec + + reconciler.setVolumeSize(ctx, &cluster, pvc, spec.Name) + + assert.Assert(t, cmp.MarshalMatches(pvc.Spec, ` +accessModes: +- ReadWriteOnce +resources: + limits: + storage: 2Gi + requests: + storage: 1Gi +`)) + assert.Equal(t, len(recorder.Events), 0) + assert.Equal(t, len(*logs), 0) + }) + + t.Run("BadStatusWithLimit", func(t *testing.T) { + recorder := events.NewRecorder(t, runtime.Scheme) + reconciler := &Reconciler{Recorder: recorder} + ctx, logs := setupLogCapture(ctx) + + pvc := &corev1.PersistentVolumeClaim{ObjectMeta: naming.InstancePostgresDataVolume(instance)} + spec := instanceSetSpec("1Gi", "3Gi") + cluster.Status = desiredStatus("NotAValidValue") + pvc.Spec = spec.DataVolumeClaimSpec + + reconciler.setVolumeSize(ctx, &cluster, pvc, spec.Name) + + assert.Assert(t, cmp.MarshalMatches(pvc.Spec, ` +accessModes: +- ReadWriteOnce +resources: + limits: + storage: 3Gi + requests: + storage: 1Gi +`)) + + assert.Equal(t, len(recorder.Events), 0) + assert.Equal(t, len(*logs), 1) + assert.Assert(t, cmp.Contains((*logs)[0], "Unable to parse volume request: NotAValidValue")) + }) + + t.Run("StatusWithLimit", func(t *testing.T) { + recorder := events.NewRecorder(t, runtime.Scheme) + reconciler := &Reconciler{Recorder: recorder} + ctx, logs := setupLogCapture(ctx) + + pvc := &corev1.PersistentVolumeClaim{ObjectMeta: naming.InstancePostgresDataVolume(instance)} + spec := instanceSetSpec("1Gi", "3Gi") + cluster.Status = desiredStatus("2Gi") + pvc.Spec = spec.DataVolumeClaimSpec + + reconciler.setVolumeSize(ctx, &cluster, pvc, spec.Name) + + assert.Assert(t, cmp.MarshalMatches(pvc.Spec, ` +accessModes: +- ReadWriteOnce +resources: + limits: + storage: 3Gi + requests: + storage: 2Gi +`)) + assert.Equal(t, len(recorder.Events), 0) + assert.Equal(t, len(*logs), 0) + }) + + t.Run("StatusWithLimitGrowToLimit", func(t *testing.T) { + recorder := events.NewRecorder(t, runtime.Scheme) + reconciler := &Reconciler{Recorder: recorder} + ctx, logs := setupLogCapture(ctx) + + pvc := &corev1.PersistentVolumeClaim{ObjectMeta: naming.InstancePostgresDataVolume(instance)} + spec := instanceSetSpec("1Gi", "2Gi") + cluster.Status = desiredStatus("2Gi") + pvc.Spec = spec.DataVolumeClaimSpec + + reconciler.setVolumeSize(ctx, &cluster, pvc, spec.Name) + + assert.Assert(t, cmp.MarshalMatches(pvc.Spec, ` +accessModes: +- ReadWriteOnce +resources: + limits: + storage: 2Gi + requests: + storage: 2Gi +`)) + + assert.Equal(t, len(*logs), 0) + assert.Equal(t, len(recorder.Events), 1) + assert.Equal(t, recorder.Events[0].Regarding.Name, cluster.Name) + assert.Equal(t, recorder.Events[0].Reason, "VolumeLimitReached") + assert.Equal(t, recorder.Events[0].Note, "pgData volume(s) for elephant/some-instance are at size limit (2Gi).") + }) + + t.Run("DesiredStatusOverLimit", func(t *testing.T) { + recorder := events.NewRecorder(t, runtime.Scheme) + reconciler := &Reconciler{Recorder: recorder} + ctx, logs := setupLogCapture(ctx) + + pvc := &corev1.PersistentVolumeClaim{ObjectMeta: naming.InstancePostgresDataVolume(instance)} + spec := instanceSetSpec("4Gi", "5Gi") + cluster.Status = desiredStatus("10Gi") + pvc.Spec = spec.DataVolumeClaimSpec + + reconciler.setVolumeSize(ctx, &cluster, pvc, spec.Name) + + assert.Assert(t, cmp.MarshalMatches(pvc.Spec, ` +accessModes: +- ReadWriteOnce +resources: + limits: + storage: 5Gi + requests: + storage: 5Gi +`)) + + assert.Equal(t, len(*logs), 0) + assert.Equal(t, len(recorder.Events), 2) + var found1, found2 bool + for _, event := range recorder.Events { + if event.Reason == "VolumeLimitReached" { + found1 = true + assert.Equal(t, event.Regarding.Name, cluster.Name) + assert.Equal(t, event.Note, "pgData volume(s) for elephant/some-instance are at size limit (5Gi).") + } + if event.Reason == "DesiredVolumeAboveLimit" { + found2 = true + assert.Equal(t, event.Regarding.Name, cluster.Name) + assert.Equal(t, event.Note, + "The desired size (10Gi) for the elephant/some-instance pgData volume(s) is greater than the size limit (5Gi).") + } + } + assert.Assert(t, found1 && found2) + }) + + }) +} + +func TestReconcileDatabaseInitSQL(t *testing.T) { + ctx := context.Background() + var called bool + + // Test Environment Setup + _, client := setupKubernetes(t) + require.ParallelCapacity(t, 0) + + r := &Reconciler{ + Client: client, + + // Overwrite the PodExec function with a check to ensure the exec + // call would have been made + PodExec: func(ctx context.Context, namespace, pod, container string, stdin io.Reader, + stdout, stderr io.Writer, command ...string) error { + called = true + return nil + }, + } + + // Test Resources Setup + ns := setupNamespace(t, client) + + // Define a status to be set if sql has already been run + status := "set" + + // reconcileDatabaseInitSQL expects to find a pod that is running with a + // writable database container. Define this pod in an observed instance so + // we can simulate a podExec call into the database + instances := []*Instance{ + { + Name: "instance", + Pods: []*corev1.Pod{{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: ns.Name, + Name: "pod", + Annotations: map[string]string{ + "status": `{"role":"master"}`, + }, + }, + Status: corev1.PodStatus{ + ContainerStatuses: []corev1.ContainerStatus{{ + Name: naming.ContainerDatabase, + State: corev1.ContainerState{ + Running: new(corev1.ContainerStateRunning), + }, + }}, + }, + }}, + Runner: &appsv1.StatefulSet{}, + }, + } + observed := &observedInstances{forCluster: instances} + + // Create a ConfigMap containing SQL to be defined in the spec + path := "test-path" + cm := corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: ns.Name, + }, + Data: map[string]string{ + path: "stuff", + }, + } + assert.NilError(t, client.Create(ctx, cm.DeepCopy())) + + // Define a fully configured cluster that would lead to SQL being run in + // the database. This test cluster will be modified as needed for testing + testCluster := testCluster() + testCluster.Namespace = ns.Name + testCluster.Spec.DatabaseInitSQL = &v1beta1.DatabaseInitSQL{ + Name: cm.Name, + Key: path, + } + + // Start Tests + t.Run("not defined", func(t *testing.T) { + // Custom SQL is not defined in the spec and status is unset + cluster := testCluster.DeepCopy() + cluster.Spec.DatabaseInitSQL = nil + + assert.NilError(t, r.reconcileDatabaseInitSQL(ctx, cluster, observed)) + assert.Assert(t, !called, "PodExec should not have been called") + assert.Assert(t, cluster.Status.DatabaseInitSQL == nil, "Status should not be set") + }) + t.Run("not defined with status", func(t *testing.T) { + // Custom SQL is not defined in the spec and status is set + cluster := testCluster.DeepCopy() + cluster.Spec.DatabaseInitSQL = nil + cluster.Status.DatabaseInitSQL = &status + + assert.NilError(t, r.reconcileDatabaseInitSQL(ctx, cluster, observed)) + assert.Assert(t, !called, "PodExec should not have been called") + assert.Assert(t, cluster.Status.DatabaseInitSQL == nil, "Status was set and should have been removed") + }) + t.Run("status set", func(t *testing.T) { + // Custom SQL is defined and status is set + cluster := testCluster.DeepCopy() + cluster.Status.DatabaseInitSQL = &status + + assert.NilError(t, r.reconcileDatabaseInitSQL(ctx, cluster, observed)) + assert.Assert(t, !called, "PodExec should not have been called") + assert.Equal(t, cluster.Status.DatabaseInitSQL, &status, "Status should not have changed") + }) + t.Run("No writable pod", func(t *testing.T) { + cluster := testCluster.DeepCopy() + + assert.NilError(t, r.reconcileDatabaseInitSQL(ctx, cluster, nil)) + assert.Assert(t, !called, "PodExec should not have been called") + assert.Assert(t, cluster.Status.DatabaseInitSQL == nil, "SQL couldn't be executed so status should be unset") + }) + t.Run("Fully Configured", func(t *testing.T) { + cluster := testCluster.DeepCopy() + + assert.NilError(t, r.reconcileDatabaseInitSQL(ctx, cluster, observed)) + assert.Assert(t, called, "PodExec should be called") + assert.Equal(t, + *cluster.Status.DatabaseInitSQL, + cluster.Spec.DatabaseInitSQL.Name, + "Status should be set to the custom configmap name") + }) +} + +func TestReconcileDatabaseInitSQLConfigMap(t *testing.T) { + ctx := context.Background() + var called bool + + // Test Environment Setup + _, client := setupKubernetes(t) + require.ParallelCapacity(t, 0) + + r := &Reconciler{ + Client: client, + + // Overwrite the PodExec function with a check to ensure the exec + // call would have been made + PodExec: func(ctx context.Context, namespace, pod, container string, stdin io.Reader, + stdout, stderr io.Writer, command ...string) error { + called = true + return nil + }, + } + + // Test Resources Setup + ns := setupNamespace(t, client) + + // reconcileDatabaseInitSQL expects to find a pod that is running with a writable + // database container. Define this pod in an observed instance so that + // we can simulate a podExec call into the database + instances := []*Instance{ + { + Name: "instance", + Pods: []*corev1.Pod{{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: ns.Name, + Name: "pod", + Annotations: map[string]string{ + "status": `{"role":"master"}`, + }, + }, + Status: corev1.PodStatus{ + ContainerStatuses: []corev1.ContainerStatus{{ + Name: naming.ContainerDatabase, + State: corev1.ContainerState{ + Running: new(corev1.ContainerStateRunning), + }, + }}, + }, + }}, + Runner: &appsv1.StatefulSet{}, + }, + } + observed := &observedInstances{forCluster: instances} + + // Define fully configured cluster that would lead to sql being run in the + // database. This cluster will be modified for testing + testCluster := testCluster() + testCluster.Namespace = ns.Name + testCluster.Spec.DatabaseInitSQL = new(v1beta1.DatabaseInitSQL) + + t.Run("not found", func(t *testing.T) { + cluster := testCluster.DeepCopy() + cluster.Spec.DatabaseInitSQL = &v1beta1.DatabaseInitSQL{ + Name: "not-found", + } + + err := r.reconcileDatabaseInitSQL(ctx, cluster, observed) + assert.Assert(t, apierrors.IsNotFound(err), err) + assert.Assert(t, !called) + }) + + t.Run("found no data", func(t *testing.T) { + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "found-no-data", + Namespace: ns.Name, + }, + } + assert.NilError(t, client.Create(ctx, cm)) + + cluster := testCluster.DeepCopy() + cluster.Spec.DatabaseInitSQL = &v1beta1.DatabaseInitSQL{ + Name: cm.Name, + Key: "bad-path", + } + + err := r.reconcileDatabaseInitSQL(ctx, cluster, observed) + assert.Equal(t, err.Error(), "ConfigMap did not contain expected key: bad-path") + assert.Assert(t, !called) + }) + + t.Run("found with data", func(t *testing.T) { + path := "test-path" + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "found-with-data", + Namespace: ns.Name, + }, + Data: map[string]string{ + path: "string", + }, + } + assert.NilError(t, client.Create(ctx, cm)) + + cluster := testCluster.DeepCopy() + cluster.Spec.DatabaseInitSQL = &v1beta1.DatabaseInitSQL{ + Name: cm.Name, + Key: path, + } + + assert.NilError(t, r.reconcileDatabaseInitSQL(ctx, cluster, observed)) + assert.Assert(t, called) + }) +} + +func TestValidatePostgresUsers(t *testing.T) { + t.Parallel() + + t.Run("Empty", func(t *testing.T) { + cluster := v1beta1.NewPostgresCluster() + recorder := events.NewRecorder(t, runtime.Scheme) + reconciler := &Reconciler{Recorder: recorder} + + cluster.Spec.Users = nil + reconciler.validatePostgresUsers(cluster) + assert.Equal(t, len(recorder.Events), 0) + + cluster.Spec.Users = []v1beta1.PostgresUserSpec{} + reconciler.validatePostgresUsers(cluster) + assert.Equal(t, len(recorder.Events), 0) + }) + + // See [internal/testing/validation.TestPostgresUserOptions] + + t.Run("NoComments", func(t *testing.T) { + cluster := v1beta1.NewPostgresCluster() + cluster.Name = "pg1" + cluster.Spec.Users = []v1beta1.PostgresUserSpec{ + {Name: "dashes", Options: "ANY -- comment"}, + {Name: "block-open", Options: "/* asdf"}, + {Name: "block-close", Options: " qw */ rt"}, + } + + recorder := events.NewRecorder(t, runtime.Scheme) + reconciler := &Reconciler{Recorder: recorder} + + reconciler.validatePostgresUsers(cluster) + assert.Equal(t, len(recorder.Events), 3) + + for i, event := range recorder.Events { + assert.Equal(t, event.Regarding.Name, cluster.Name) + assert.Equal(t, event.Reason, "InvalidUser") + assert.Assert(t, cmp.Contains(event.Note, "cannot contain comments")) + assert.Assert(t, cmp.Contains(event.Note, + fmt.Sprintf("spec.users[%d].options", i))) + } + }) + + t.Run("NoPassword", func(t *testing.T) { + cluster := v1beta1.NewPostgresCluster() + cluster.Name = "pg5" + cluster.Spec.Users = []v1beta1.PostgresUserSpec{ + {Name: "uppercase", Options: "SUPERUSER PASSWORD ''"}, + {Name: "lowercase", Options: "password 'asdf'"}, + } + + recorder := events.NewRecorder(t, runtime.Scheme) + reconciler := &Reconciler{Recorder: recorder} + + reconciler.validatePostgresUsers(cluster) + assert.Equal(t, len(recorder.Events), 2) + + for i, event := range recorder.Events { + assert.Equal(t, event.Regarding.Name, cluster.Name) + assert.Equal(t, event.Reason, "InvalidUser") + assert.Assert(t, cmp.Contains(event.Note, "cannot assign password")) + assert.Assert(t, cmp.Contains(event.Note, + fmt.Sprintf("spec.users[%d].options", i))) + } + }) + + t.Run("Valid", func(t *testing.T) { + cluster := v1beta1.NewPostgresCluster() + cluster.Spec.Users = []v1beta1.PostgresUserSpec{ + {Name: "normal", Options: "CREATEDB valid until '2006-01-02'"}, + {Name: "very-full", Options: "NOSUPERUSER NOCREATEDB NOCREATEROLE NOINHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT 5"}, + } + + reconciler := &Reconciler{} + assert.Assert(t, reconciler.Recorder == nil, + "expected the following to not use a Recorder at all") + + reconciler.validatePostgresUsers(cluster) + }) +} diff --git a/internal/controller/postgrescluster/rbac.go b/internal/controller/postgrescluster/rbac.go index 2b15f8d1cc..38dd808c44 100644 --- a/internal/controller/postgrescluster/rbac.go +++ b/internal/controller/postgrescluster/rbac.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster @@ -39,9 +28,9 @@ func (r *Reconciler) reconcileRBACResources( return r.reconcileInstanceRBAC(ctx, cluster) } -// +kubebuilder:rbac:groups="",resources=serviceaccounts,verbs=create;patch -// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=roles,verbs=create;patch -// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=rolebindings,verbs=create;patch +// +kubebuilder:rbac:groups="",resources="serviceaccounts",verbs={create,patch} +// +kubebuilder:rbac:groups="rbac.authorization.k8s.io",resources="roles",verbs={create,patch} +// +kubebuilder:rbac:groups="rbac.authorization.k8s.io",resources="rolebindings",verbs={create,patch} // reconcileInstanceRBAC writes the Role, RoleBinding, and ServiceAccount for // all instances of cluster. diff --git a/internal/controller/postgrescluster/scale_test.go b/internal/controller/postgrescluster/scale_test.go deleted file mode 100644 index 707009c4ba..0000000000 --- a/internal/controller/postgrescluster/scale_test.go +++ /dev/null @@ -1,356 +0,0 @@ -// +build envtest - -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package postgrescluster - -import ( - "os" - "strings" - "testing" - "time" - - "go.opentelemetry.io/otel" - "gotest.tools/v3/assert" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/tools/record" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - - "github.com/crunchydata/postgres-operator/internal/naming" - "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" -) - -func Int32(v int32) *int32 { return &v } - -func TestScaleDown(t *testing.T) { - if !strings.EqualFold(os.Getenv("USE_EXISTING_CLUSTER"), "true") { - t.Skip("requires a running garbage collection controller") - } - // TODO: Update tests that include envtest package to better handle - // running in parallel - // t.Parallel() - env, cc, config := setupTestEnv(t, ControllerName) - t.Cleanup(func() { teardownTestEnv(t, env) }) - - reconciler := &Reconciler{} - ctx, cancel := setupManager(t, config, func(mgr manager.Manager) { - reconciler = &Reconciler{ - Client: cc, - Owner: client.FieldOwner(t.Name()), - Recorder: new(record.FakeRecorder), - Tracer: otel.Tracer(t.Name()), - } - podExec, err := newPodExecutor(config) - assert.NilError(t, err) - reconciler.PodExec = podExec - }) - t.Cleanup(func() { teardownManager(cancel, t) }) - - ns := &v1.Namespace{} - ns.GenerateName = "postgres-operator-test-" - ns.Labels = labels.Set{"postgres-operator-test": t.Name()} - assert.NilError(t, cc.Create(ctx, ns)) - t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, ns)) }) - - mustReconcile := func(t *testing.T, cluster *v1beta1.PostgresCluster) reconcile.Result { - t.Helper() - key := client.ObjectKeyFromObject(cluster) - request := reconcile.Request{NamespacedName: key} - result, err := reconciler.Reconcile(ctx, request) - assert.NilError(t, err, "%+v", err) - return result - } - - // Defines a volume claim spec that can be used to create instances - volumeClaimSpec := v1.PersistentVolumeClaimSpec{ - AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, - Resources: v1.ResourceRequirements{ - Requests: map[v1.ResourceName]resource.Quantity{ - v1.ResourceStorage: resource.MustParse("1Gi"), - }, - }, - } - - // Defines a base cluster spec that can be used by tests to generate a - // cluster with an expected number of instances - baseCluster := v1beta1.PostgresCluster{ - Spec: v1beta1.PostgresClusterSpec{ - PostgresVersion: 13, - Image: CrunchyPostgresHAImage, - Backups: v1beta1.Backups{ - PGBackRest: v1beta1.PGBackRestArchive{ - Image: CrunchyPGBackRestImage, - Repos: []v1beta1.PGBackRestRepo{{ - Name: "repo1", - Volume: &v1beta1.RepoPVC{VolumeClaimSpec: volumeClaimSpec}, - }}, - }, - }, - }, - } - - for _, test := range []struct { - name string - createSet []v1beta1.PostgresInstanceSetSpec - createRunningInstances int32 - updateSet []v1beta1.PostgresInstanceSetSpec - updateRunningInstances int32 - primaryTest func(*testing.T, string, string) - }{ - { - name: "OneSet", - // Remove a single instance set from the spec - createSet: []v1beta1.PostgresInstanceSetSpec{{ - Name: "daisy", - Replicas: Int32(1), - DataVolumeClaimSpec: volumeClaimSpec, - }, { - Name: "max", - Replicas: Int32(1), - DataVolumeClaimSpec: volumeClaimSpec, - }}, - createRunningInstances: 2, - updateSet: []v1beta1.PostgresInstanceSetSpec{{ - Name: "daisy", - Replicas: Int32(1), - DataVolumeClaimSpec: volumeClaimSpec, - }}, - updateRunningInstances: 1, - }, { - name: "InstancesWithOneSet", - // Decrease the number of replicas that are defined for one instance set - createSet: []v1beta1.PostgresInstanceSetSpec{{ - Name: "daisy", - Replicas: Int32(2), - DataVolumeClaimSpec: volumeClaimSpec, - }}, - createRunningInstances: 2, - updateSet: []v1beta1.PostgresInstanceSetSpec{{ - Name: "daisy", - Replicas: Int32(1), - DataVolumeClaimSpec: volumeClaimSpec, - }}, - updateRunningInstances: 1, - primaryTest: func(t *testing.T, old, new string) { - assert.Equal(t, old, new, "Primary instance should not have changed") - }, - }, { - name: "InstancesWithTwoSets", - // Decrease the number of replicas that are defined for one instance set - // and ensure that the other instance set is unchanged - createSet: []v1beta1.PostgresInstanceSetSpec{{ - Name: "daisy", - Replicas: Int32(2), - DataVolumeClaimSpec: volumeClaimSpec, - }, { - Name: "max", - Replicas: Int32(1), - DataVolumeClaimSpec: volumeClaimSpec, - }}, - createRunningInstances: 3, - updateSet: []v1beta1.PostgresInstanceSetSpec{{ - Name: "daisy", - Replicas: Int32(1), - DataVolumeClaimSpec: volumeClaimSpec, - }, { - Name: "max", - Replicas: Int32(1), - DataVolumeClaimSpec: volumeClaimSpec, - }}, - updateRunningInstances: 2, - }, - } { - t.Run(test.name, func(t *testing.T) { - var oldPrimaryInstanceName string - var newPrimaryInstanceName string - - cluster := baseCluster.DeepCopy() - cluster.ObjectMeta.Name = strings.ToLower(test.name) - cluster.ObjectMeta.Namespace = ns.Name - cluster.Spec.InstanceSets = test.createSet - - assert.NilError(t, reconciler.Client.Create(ctx, cluster)) - t.Cleanup(func() { - // Remove finalizers, if any, so the namespace can terminate. - assert.Check(t, client.IgnoreNotFound( - reconciler.Client.Patch(ctx, cluster, client.RawPatch( - client.Merge.Type(), []byte(`{"metadata":{"finalizers":[]}}`))))) - - // Set Cluster to delete after test - assert.Check(t, reconciler.Client.Delete(ctx, cluster)) - }) - - // Continue until instances are healthy. - var instances []appsv1.StatefulSet - assert.NilError(t, wait.Poll(time.Second, Scale(time.Minute), func() (bool, error) { - mustReconcile(t, cluster) - - list := appsv1.StatefulSetList{} - selector, err := labels.Parse(strings.Join([]string{ - "postgres-operator.crunchydata.com/cluster=" + cluster.Name, - "postgres-operator.crunchydata.com/instance", - }, ",")) - assert.NilError(t, err) - assert.NilError(t, cc.List(ctx, &list, - client.InNamespace(cluster.Namespace), - client.MatchingLabelsSelector{Selector: selector})) - - instances = list.Items - - ready := int32(0) - for i := range instances { - ready += instances[i].Status.ReadyReplicas - } - return ready == test.createRunningInstances, nil - }), "expected %v instances to be ready, got:\n%+v", test.createRunningInstances, instances) - - if test.primaryTest != nil { - // Grab the old primary name to use later - primaryPod := v1.PodList{} - assert.NilError(t, wait.Poll(time.Second, Scale(15*time.Second), func() (bool, error) { - primarySelector, err := naming.AsSelector(metav1.LabelSelector{ - MatchLabels: map[string]string{ - naming.LabelCluster: cluster.Name, - naming.LabelRole: naming.RolePatroniLeader, - }, - }) - assert.NilError(t, err) - assert.NilError(t, cc.List(ctx, &primaryPod, - client.InNamespace(cluster.Namespace), - client.MatchingLabelsSelector{Selector: primarySelector})) - - if len(primaryPod.Items) == 1 { - oldPrimaryInstanceName = primaryPod.Items[0].Labels[naming.LabelInstance] - return true, nil - } - return false, nil - }), "could not find primary, got:\n%+v", primaryPod.Items) - } - - // The cluster is running with the correct number of Ready Replicas - // Now we can update the cluster by applying changes to the spec - copy := cluster.DeepCopy() - copy.Spec.InstanceSets = test.updateSet - - err := reconciler.Client.Patch(ctx, copy, client.MergeFrom(cluster)) - assert.NilError(t, err, "Error reconciling cluster") - - // Run the reconcile loop until we have the expected number of - // Ready Replicas - assert.NilError(t, wait.Poll(time.Second, Scale(time.Minute), func() (bool, error) { - mustReconcile(t, cluster) - - list := appsv1.StatefulSetList{} - selector, err := labels.Parse(strings.Join([]string{ - "postgres-operator.crunchydata.com/cluster=" + cluster.Name, - "postgres-operator.crunchydata.com/instance", - }, ",")) - assert.NilError(t, err) - assert.NilError(t, cc.List(ctx, &list, - client.InNamespace(cluster.Namespace), - client.MatchingLabelsSelector{Selector: selector})) - - instances = list.Items - - ready := int32(0) - for i := range instances { - ready += instances[i].Status.ReadyReplicas - } - return ready == test.updateRunningInstances, nil - }), "expected %v instances to be ready, got:\n%+v", test.updateRunningInstances, instances) - - // In the update case we need to ensure that the pods have deleted - var pods []corev1.Pod - assert.NilError(t, wait.Poll(time.Second, Scale(time.Minute/2), func() (bool, error) { - list := v1.PodList{} - selector, err := labels.Parse(strings.Join([]string{ - "postgres-operator.crunchydata.com/cluster=" + cluster.Name, - "postgres-operator.crunchydata.com/instance", - }, ",")) - assert.NilError(t, err) - assert.NilError(t, cc.List(ctx, &list, - client.InNamespace(cluster.Namespace), - client.MatchingLabelsSelector{Selector: selector})) - - pods = list.Items - - return len(pods) == int(test.updateRunningInstances), nil - }), "expected %v pods, got:\n%+v", test.updateRunningInstances, pods) - - if test.primaryTest != nil { - // If this is a primary test grab the updated primary - primaryPod := v1.PodList{} - primarySelector, err := naming.AsSelector(metav1.LabelSelector{ - MatchLabels: map[string]string{ - naming.LabelCluster: cluster.Name, - naming.LabelRole: "master", - }, - }) - assert.NilError(t, err) - assert.NilError(t, cc.List(ctx, &primaryPod, - client.InNamespace(cluster.Namespace), - client.MatchingLabelsSelector{Selector: primarySelector})) - - if len(primaryPod.Items) == 1 { - newPrimaryInstanceName = primaryPod.Items[0].Labels[naming.LabelInstance] - } - - t.Run("Primary Test", func(t *testing.T) { - test.primaryTest(t, oldPrimaryInstanceName, newPrimaryInstanceName) - }) - } - - // The cluster has the correct number of total instances. - // Does each instance set have the correct number of replicas? - var podList corev1.PodList - selector, err := naming.AsSelector(naming.ClusterInstances(cluster.Name)) - assert.NilError(t, err) - assert.NilError(t, reconciler.Client.List(ctx, &podList, - client.InNamespace(cluster.Namespace), - client.MatchingLabelsSelector{Selector: selector}, - )) - - // Once again we make sure that the number of instances in the - // environment reflect the number we expect - assert.Equal(t, len(podList.Items), int(test.updateRunningInstances)) - - // Group the instances by the instance set label and count the - // replicas for each set - replicas := map[string]int32{} - for _, instance := range podList.Items { - replicas[instance.Labels[naming.LabelInstanceSet]]++ - } - - // Ensure that each set has the number of replicas defined in - // the test - for _, set := range test.updateSet { - assert.Equal(t, replicas[set.Name], *set.Replicas) - delete(replicas, set.Name) - } - - // Finally make sure that we don't have any extra sets - assert.Equal(t, len(replicas), 0) - }) - } -} diff --git a/internal/controller/postgrescluster/snapshots.go b/internal/controller/postgrescluster/snapshots.go new file mode 100644 index 0000000000..76ad195600 --- /dev/null +++ b/internal/controller/postgrescluster/snapshots.go @@ -0,0 +1,617 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package postgrescluster + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/pkg/errors" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + volumesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" + + "github.com/crunchydata/postgres-operator/internal/config" + "github.com/crunchydata/postgres-operator/internal/feature" + "github.com/crunchydata/postgres-operator/internal/initialize" + "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/internal/pgbackrest" + "github.com/crunchydata/postgres-operator/internal/postgres" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +//+kubebuilder:rbac:groups="snapshot.storage.k8s.io",resources="volumesnapshots",verbs={get,list,create,patch,delete} + +// The controller-runtime client sets up a cache that watches anything we "get" or "list". +//+kubebuilder:rbac:groups="snapshot.storage.k8s.io",resources="volumesnapshots",verbs={watch} + +// reconcileVolumeSnapshots creates and manages VolumeSnapshots if the proper VolumeSnapshot CRDs +// are installed and VolumeSnapshots are enabled for the PostgresCluster. A VolumeSnapshot of the +// primary instance's pgdata volume will be created whenever a backup is completed. The steps to +// create snapshots include the following sequence: +// 1. We find the latest completed backup job and check the timestamp. +// 2. If the timestamp is later than what's on the dedicated snapshot PVC, a restore job runs in +// the dedicated snapshot volume. +// 3. When the restore job completes, an annotation is updated on the PVC. If the restore job +// fails, we don't run it again. +// 4. When the PVC annotation is updated, we see if there's a volume snapshot with an earlier +// timestamp. +// 5. If there are no snapshots at all, we take a snapshot and put the backup job's completion +// timestamp on the snapshot annotation. +// 6. If an earlier snapshot is found, we take a new snapshot, annotate it and delete the old +// snapshot. +// 7. When the snapshot job completes, we delete the restore job. +func (r *Reconciler) reconcileVolumeSnapshots(ctx context.Context, + postgrescluster *v1beta1.PostgresCluster, pvc *corev1.PersistentVolumeClaim) error { + + // If VolumeSnapshots feature gate is disabled. Do nothing and return early. + if !feature.Enabled(ctx, feature.VolumeSnapshots) { + return nil + } + + // Check if the Kube cluster has VolumeSnapshots installed. If VolumeSnapshots + // are not installed, we need to return early. If user is attempting to use + // VolumeSnapshots, return an error, otherwise return nil. + volumeSnapshotKindExists, err := r.GroupVersionKindExists("snapshot.storage.k8s.io/v1", "VolumeSnapshot") + if err != nil { + return err + } + if !*volumeSnapshotKindExists { + if postgrescluster.Spec.Backups.Snapshots != nil { + return errors.New("VolumeSnapshots are not installed/enabled in this Kubernetes cluster; cannot create snapshot.") + } else { + return nil + } + } + + // If user is attempting to use snapshots and has tablespaces enabled, we + // need to create a warning event indicating that the two features are not + // currently compatible and return early. + if postgrescluster.Spec.Backups.Snapshots != nil && + clusterUsingTablespaces(ctx, postgrescluster) { + r.Recorder.Event(postgrescluster, corev1.EventTypeWarning, "IncompatibleFeatures", + "VolumeSnapshots not currently compatible with TablespaceVolumes; cannot create snapshot.") + return nil + } + + // Get all snapshots for the cluster. + snapshots, err := r.getSnapshotsForCluster(ctx, postgrescluster) + if err != nil { + return err + } + + // If snapshots are disabled, delete any existing snapshots and return early. + if postgrescluster.Spec.Backups.Snapshots == nil { + return r.deleteSnapshots(ctx, postgrescluster, snapshots) + } + + // If we got here, then the snapshots are enabled (feature gate is enabled and the + // cluster has a Spec.Backups.Snapshots section defined). + + // Check snapshots for errors; if present, create an event. If there are + // multiple snapshots with errors, create event for the latest error and + // delete any older snapshots with error. + snapshotWithLatestError := getSnapshotWithLatestError(snapshots) + if snapshotWithLatestError != nil { + r.Recorder.Event(postgrescluster, corev1.EventTypeWarning, "VolumeSnapshotError", + *snapshotWithLatestError.Status.Error.Message) + for _, snapshot := range snapshots.Items { + if snapshot.Status != nil && snapshot.Status.Error != nil && + snapshot.Status.Error.Time.Before(snapshotWithLatestError.Status.Error.Time) { + err = r.deleteControlled(ctx, postgrescluster, &snapshot) + if err != nil { + return err + } + } + } + } + + // Get pvc backup job completion annotation. If it does not exist, there has not been + // a successful restore yet, so return early. + pvcUpdateTimeStamp, pvcAnnotationExists := pvc.GetAnnotations()[naming.PGBackRestBackupJobCompletion] + if !pvcAnnotationExists { + return err + } + + // Check to see if snapshot exists for the latest backup that has been restored into + // the dedicated pvc. + var snapshotForPvcUpdateIdx int + snapshotFoundForPvcUpdate := false + for idx, snapshot := range snapshots.Items { + if snapshot.GetAnnotations()[naming.PGBackRestBackupJobCompletion] == pvcUpdateTimeStamp { + snapshotForPvcUpdateIdx = idx + snapshotFoundForPvcUpdate = true + } + } + + // If a snapshot exists for the latest backup that has been restored into the dedicated pvc + // and the snapshot is Ready, delete all other snapshots. + if snapshotFoundForPvcUpdate && snapshots.Items[snapshotForPvcUpdateIdx].Status.ReadyToUse != nil && + *snapshots.Items[snapshotForPvcUpdateIdx].Status.ReadyToUse { + for idx, snapshot := range snapshots.Items { + if idx != snapshotForPvcUpdateIdx { + err = r.deleteControlled(ctx, postgrescluster, &snapshot) + if err != nil { + return err + } + } + } + } + + // If a snapshot for the latest backup/restore does not exist, create a snapshot. + if !snapshotFoundForPvcUpdate { + var snapshot *volumesnapshotv1.VolumeSnapshot + snapshot, err = r.generateSnapshotOfDedicatedSnapshotVolume(postgrescluster, pvc) + if err == nil { + err = errors.WithStack(r.apply(ctx, snapshot)) + } + } + + return err +} + +// +kubebuilder:rbac:groups="",resources="persistentvolumeclaims",verbs={get} +// +kubebuilder:rbac:groups="",resources="persistentvolumeclaims",verbs={create,delete,patch} + +// reconcileDedicatedSnapshotVolume reconciles the PersistentVolumeClaim that holds a +// copy of the pgdata and is dedicated for clean snapshots of the database. It creates +// and manages the volume as well as the restore jobs that bring the volume data forward +// after a successful backup. +func (r *Reconciler) reconcileDedicatedSnapshotVolume( + ctx context.Context, cluster *v1beta1.PostgresCluster, + clusterVolumes []corev1.PersistentVolumeClaim, +) (*corev1.PersistentVolumeClaim, error) { + + // If VolumeSnapshots feature gate is disabled, do nothing and return early. + if !feature.Enabled(ctx, feature.VolumeSnapshots) { + return nil, nil + } + + // Set appropriate labels for dedicated snapshot volume + labelMap := map[string]string{ + naming.LabelCluster: cluster.Name, + naming.LabelRole: naming.RoleSnapshot, + naming.LabelData: naming.DataPostgres, + } + + // If volume already exists, use existing name. Otherwise, generate a name. + var pvc *corev1.PersistentVolumeClaim + existingPVCName, err := getPGPVCName(labelMap, clusterVolumes) + if err != nil { + return nil, errors.WithStack(err) + } + if existingPVCName != "" { + pvc = &corev1.PersistentVolumeClaim{ObjectMeta: metav1.ObjectMeta{ + Namespace: cluster.GetNamespace(), + Name: existingPVCName, + }} + } else { + pvc = &corev1.PersistentVolumeClaim{ObjectMeta: naming.ClusterDedicatedSnapshotVolume(cluster)} + } + pvc.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("PersistentVolumeClaim")) + + // If snapshots are disabled, delete the PVC if it exists and return early. + // Check the client cache first using Get. + if cluster.Spec.Backups.Snapshots == nil { + key := client.ObjectKeyFromObject(pvc) + err := errors.WithStack(r.Client.Get(ctx, key, pvc)) + if err == nil { + err = errors.WithStack(r.deleteControlled(ctx, cluster, pvc)) + } + return nil, client.IgnoreNotFound(err) + } + + // If we've got this far, snapshots are enabled so we should create/update/get + // the dedicated snapshot volume + pvc, err = r.createDedicatedSnapshotVolume(ctx, cluster, labelMap, pvc) + if err != nil { + return pvc, err + } + + // Determine if we need to run a restore job, based on the most recent backup + // and an annotation on the PVC. + + // Find the most recently completed backup job. + backupJob, err := r.getLatestCompleteBackupJob(ctx, cluster) + if err != nil { + return pvc, err + } + + // Return early if no complete backup job is found. + if backupJob == nil { + return pvc, nil + } + + // Return early if the pvc is annotated with a timestamp newer or equal to the latest backup job. + // If the annotation value cannot be parsed, we want to proceed with a restore. + pvcAnnotationTimestampString := pvc.GetAnnotations()[naming.PGBackRestBackupJobCompletion] + if pvcAnnotationTime, err := time.Parse(time.RFC3339, pvcAnnotationTimestampString); err == nil { + if backupJob.Status.CompletionTime.Compare(pvcAnnotationTime) <= 0 { + return pvc, nil + } + } + + // If we've made it here, the pvc has not been restored with latest backup. + // Find the dedicated snapshot volume restore job if it exists. Since we delete + // successful restores after we annotate the PVC and stop making restore jobs + // if a failed DSV restore job exists, there should only ever be one DSV restore + // job in existence at a time. + // TODO(snapshots): Should this function throw an error or something if multiple + // DSV restores somehow exist? + restoreJob, err := r.getDedicatedSnapshotVolumeRestoreJob(ctx, cluster) + if err != nil { + return pvc, err + } + + // If we don't find a restore job, we run one. + if restoreJob == nil { + err = r.dedicatedSnapshotVolumeRestore(ctx, cluster, pvc, backupJob) + return pvc, err + } + + // If we've made it here, we have found a restore job. If the restore job was + // successful, set/update the annotation on the PVC and delete the restore job. + if restoreJob.Status.Succeeded == 1 { + if pvc.GetAnnotations() == nil { + pvc.Annotations = map[string]string{} + } + pvc.Annotations[naming.PGBackRestBackupJobCompletion] = restoreJob.GetAnnotations()[naming.PGBackRestBackupJobCompletion] + annotations := fmt.Sprintf(`{"metadata":{"annotations":{"%s": "%s"}}}`, + naming.PGBackRestBackupJobCompletion, pvc.Annotations[naming.PGBackRestBackupJobCompletion]) + + patch := client.RawPatch(client.Merge.Type(), []byte(annotations)) + err = r.handlePersistentVolumeClaimError(cluster, + errors.WithStack(r.patch(ctx, pvc, patch))) + + if err != nil { + return pvc, err + } + + err = r.Client.Delete(ctx, restoreJob, client.PropagationPolicy(metav1.DeletePropagationBackground)) + return pvc, errors.WithStack(err) + } + + // If the restore job failed, create a warning event. + if restoreJob.Status.Failed == 1 { + r.Recorder.Event(cluster, corev1.EventTypeWarning, + "DedicatedSnapshotVolumeRestoreJobError", "restore job failed, check the logs") + return pvc, nil + } + + // If we made it here, the restore job is still running and we should do nothing. + return pvc, err +} + +// createDedicatedSnapshotVolume creates/updates/gets the dedicated snapshot volume. +// It expects that the volume name and GVK has already been set on the pvc that is passed in. +func (r *Reconciler) createDedicatedSnapshotVolume(ctx context.Context, + cluster *v1beta1.PostgresCluster, labelMap map[string]string, + pvc *corev1.PersistentVolumeClaim, +) (*corev1.PersistentVolumeClaim, error) { + var err error + + // An InstanceSet must be chosen to scale resources for the dedicated snapshot volume. + // TODO: We've chosen the first InstanceSet for the time being, but might want to consider + // making the choice configurable. + instanceSpec := cluster.Spec.InstanceSets[0] + + pvc.Annotations = naming.Merge( + cluster.Spec.Metadata.GetAnnotationsOrNil(), + instanceSpec.Metadata.GetAnnotationsOrNil()) + + pvc.Labels = naming.Merge( + cluster.Spec.Metadata.GetLabelsOrNil(), + instanceSpec.Metadata.GetLabelsOrNil(), + labelMap, + ) + + err = errors.WithStack(r.setControllerReference(cluster, pvc)) + if err != nil { + return pvc, err + } + + pvc.Spec = instanceSpec.DataVolumeClaimSpec + + // Set the snapshot volume to the same size as the pgdata volume. The size should scale with auto-grow. + r.setVolumeSize(ctx, cluster, pvc, instanceSpec.Name) + + // Clear any set limit before applying PVC. This is needed to allow the limit + // value to change later. + pvc.Spec.Resources.Limits = nil + + err = r.handlePersistentVolumeClaimError(cluster, + errors.WithStack(r.apply(ctx, pvc))) + if err != nil { + return pvc, err + } + + return pvc, err +} + +// dedicatedSnapshotVolumeRestore creates a Job that performs a restore into the dedicated +// snapshot volume. +// This function is very similar to reconcileRestoreJob, but specifically tailored to the +// dedicated snapshot volume. +func (r *Reconciler) dedicatedSnapshotVolumeRestore(ctx context.Context, + cluster *v1beta1.PostgresCluster, dedicatedSnapshotVolume *corev1.PersistentVolumeClaim, + backupJob *batchv1.Job, +) error { + + pgdata := postgres.DataDirectory(cluster) + repoName := backupJob.GetLabels()[naming.LabelPGBackRestRepo] + + opts := []string{ + "--stanza=" + pgbackrest.DefaultStanzaName, + "--pg1-path=" + pgdata, + "--repo=" + regexRepoIndex.FindString(repoName), + "--delta", + } + + cmd := pgbackrest.DedicatedSnapshotVolumeRestoreCommand(pgdata, strings.Join(opts, " ")) + + // Create the volume resources required for the Postgres data directory. + dataVolumeMount := postgres.DataVolumeMount() + dataVolume := corev1.Volume{ + Name: dataVolumeMount.Name, + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: dedicatedSnapshotVolume.GetName(), + }, + }, + } + volumes := []corev1.Volume{dataVolume} + volumeMounts := []corev1.VolumeMount{dataVolumeMount} + + _, configHash, err := pgbackrest.CalculateConfigHashes(cluster) + if err != nil { + return err + } + + // A DataSource is required to avoid a nil pointer exception. + fakeDataSource := &v1beta1.PostgresClusterDataSource{RepoName: ""} + + restoreJob := &batchv1.Job{} + instanceName := cluster.Status.StartupInstance + + if err := r.generateRestoreJobIntent(cluster, configHash, instanceName, cmd, + volumeMounts, volumes, fakeDataSource, restoreJob); err != nil { + return errors.WithStack(err) + } + + // Attempt the restore exactly once. If the restore job fails, we prompt the user to investigate. + restoreJob.Spec.BackoffLimit = initialize.Int32(0) + restoreJob.Spec.Template.Spec.RestartPolicy = corev1.RestartPolicyNever + + // Add pgBackRest configs to template. + pgbackrest.AddConfigToRestorePod(cluster, cluster, &restoreJob.Spec.Template.Spec) + + // Add nss_wrapper init container and add nss_wrapper env vars to the pgbackrest restore container. + addNSSWrapper( + config.PGBackRestContainerImage(cluster), + cluster.Spec.ImagePullPolicy, + &restoreJob.Spec.Template) + + addTMPEmptyDir(&restoreJob.Spec.Template) + + restoreJob.Annotations[naming.PGBackRestBackupJobCompletion] = backupJob.Status.CompletionTime.Format(time.RFC3339) + return errors.WithStack(r.apply(ctx, restoreJob)) +} + +// generateSnapshotOfDedicatedSnapshotVolume will generate a VolumeSnapshot of +// the dedicated snapshot PersistentVolumeClaim and annotate it with the +// provided backup job's UID. +func (r *Reconciler) generateSnapshotOfDedicatedSnapshotVolume( + postgrescluster *v1beta1.PostgresCluster, + dedicatedSnapshotVolume *corev1.PersistentVolumeClaim, +) (*volumesnapshotv1.VolumeSnapshot, error) { + + snapshot, err := r.generateVolumeSnapshot(postgrescluster, *dedicatedSnapshotVolume, + postgrescluster.Spec.Backups.Snapshots.VolumeSnapshotClassName) + if err == nil { + if snapshot.Annotations == nil { + snapshot.Annotations = map[string]string{} + } + snapshot.Annotations[naming.PGBackRestBackupJobCompletion] = dedicatedSnapshotVolume.GetAnnotations()[naming.PGBackRestBackupJobCompletion] + } + + return snapshot, err +} + +// generateVolumeSnapshot generates a VolumeSnapshot that will use the supplied +// PersistentVolumeClaim and VolumeSnapshotClassName and will set the provided +// PostgresCluster as the owner. +func (r *Reconciler) generateVolumeSnapshot(postgrescluster *v1beta1.PostgresCluster, + pvc corev1.PersistentVolumeClaim, volumeSnapshotClassName string, +) (*volumesnapshotv1.VolumeSnapshot, error) { + + snapshot := &volumesnapshotv1.VolumeSnapshot{ + TypeMeta: metav1.TypeMeta{ + APIVersion: volumesnapshotv1.SchemeGroupVersion.String(), + Kind: "VolumeSnapshot", + }, + ObjectMeta: naming.ClusterVolumeSnapshot(postgrescluster), + } + snapshot.Spec.Source.PersistentVolumeClaimName = &pvc.Name + snapshot.Spec.VolumeSnapshotClassName = &volumeSnapshotClassName + + snapshot.Annotations = postgrescluster.Spec.Metadata.GetAnnotationsOrNil() + snapshot.Labels = naming.Merge(postgrescluster.Spec.Metadata.GetLabelsOrNil(), + map[string]string{ + naming.LabelCluster: postgrescluster.Name, + }) + + err := errors.WithStack(r.setControllerReference(postgrescluster, snapshot)) + + return snapshot, err +} + +// getDedicatedSnapshotVolumeRestoreJob finds a dedicated snapshot volume (DSV) +// restore job if one exists. Since we delete successful restore jobs and stop +// creating new restore jobs when one fails, there should only ever be one DSV +// restore job present at a time. If a DSV restore cannot be found, we return nil. +func (r *Reconciler) getDedicatedSnapshotVolumeRestoreJob(ctx context.Context, + postgrescluster *v1beta1.PostgresCluster) (*batchv1.Job, error) { + + // Get all restore jobs for this cluster + jobs := &batchv1.JobList{} + selectJobs, err := naming.AsSelector(naming.ClusterRestoreJobs(postgrescluster.Name)) + if err == nil { + err = errors.WithStack( + r.Client.List(ctx, jobs, + client.InNamespace(postgrescluster.Namespace), + client.MatchingLabelsSelector{Selector: selectJobs}, + )) + } + if err != nil { + return nil, err + } + + // Get restore job that has PGBackRestBackupJobCompletion annotation + for _, job := range jobs.Items { + _, annotationExists := job.GetAnnotations()[naming.PGBackRestBackupJobCompletion] + if annotationExists { + return &job, nil + } + } + + return nil, nil +} + +// getLatestCompleteBackupJob finds the most recently completed +// backup job for a cluster +func (r *Reconciler) getLatestCompleteBackupJob(ctx context.Context, + postgrescluster *v1beta1.PostgresCluster) (*batchv1.Job, error) { + + // Get all backup jobs for this cluster + jobs := &batchv1.JobList{} + selectJobs, err := naming.AsSelector(naming.ClusterBackupJobs(postgrescluster.Name)) + if err == nil { + err = errors.WithStack( + r.Client.List(ctx, jobs, + client.InNamespace(postgrescluster.Namespace), + client.MatchingLabelsSelector{Selector: selectJobs}, + )) + } + if err != nil { + return nil, err + } + + zeroTime := metav1.NewTime(time.Time{}) + latestCompleteBackupJob := batchv1.Job{ + Status: batchv1.JobStatus{ + Succeeded: 1, + CompletionTime: &zeroTime, + }, + } + for _, job := range jobs.Items { + if job.Status.Succeeded > 0 && + latestCompleteBackupJob.Status.CompletionTime.Before(job.Status.CompletionTime) { + latestCompleteBackupJob = job + } + } + + if latestCompleteBackupJob.Status.CompletionTime.Equal(&zeroTime) { + return nil, nil + } + + return &latestCompleteBackupJob, nil +} + +// getSnapshotWithLatestError takes a VolumeSnapshotList and returns a pointer to the +// snapshot that has most recently had an error. If no snapshot errors exist +// then it returns nil. +func getSnapshotWithLatestError(snapshots *volumesnapshotv1.VolumeSnapshotList) *volumesnapshotv1.VolumeSnapshot { + zeroTime := metav1.NewTime(time.Time{}) + snapshotWithLatestError := volumesnapshotv1.VolumeSnapshot{ + Status: &volumesnapshotv1.VolumeSnapshotStatus{ + Error: &volumesnapshotv1.VolumeSnapshotError{ + Time: &zeroTime, + }, + }, + } + for _, snapshot := range snapshots.Items { + if snapshot.Status != nil && snapshot.Status.Error != nil && + snapshotWithLatestError.Status.Error.Time.Before(snapshot.Status.Error.Time) { + snapshotWithLatestError = snapshot + } + } + + if snapshotWithLatestError.Status.Error.Time.Equal(&zeroTime) { + return nil + } + + return &snapshotWithLatestError +} + +// getSnapshotsForCluster gets all the VolumeSnapshots for a given postgrescluster. +func (r *Reconciler) getSnapshotsForCluster(ctx context.Context, cluster *v1beta1.PostgresCluster) ( + *volumesnapshotv1.VolumeSnapshotList, error) { + + selectSnapshots, err := naming.AsSelector(naming.Cluster(cluster.Name)) + if err != nil { + return nil, err + } + snapshots := &volumesnapshotv1.VolumeSnapshotList{} + err = errors.WithStack( + r.Client.List(ctx, snapshots, + client.InNamespace(cluster.Namespace), + client.MatchingLabelsSelector{Selector: selectSnapshots}, + )) + + return snapshots, err +} + +// getLatestReadySnapshot takes a VolumeSnapshotList and returns the latest ready VolumeSnapshot. +func getLatestReadySnapshot(snapshots *volumesnapshotv1.VolumeSnapshotList) *volumesnapshotv1.VolumeSnapshot { + zeroTime := metav1.NewTime(time.Time{}) + latestReadySnapshot := volumesnapshotv1.VolumeSnapshot{ + Status: &volumesnapshotv1.VolumeSnapshotStatus{ + CreationTime: &zeroTime, + }, + } + for _, snapshot := range snapshots.Items { + if snapshot.Status != nil && snapshot.Status.ReadyToUse != nil && *snapshot.Status.ReadyToUse && + latestReadySnapshot.Status.CreationTime.Before(snapshot.Status.CreationTime) { + latestReadySnapshot = snapshot + } + } + + if latestReadySnapshot.Status.CreationTime.Equal(&zeroTime) { + return nil + } + + return &latestReadySnapshot +} + +// deleteSnapshots takes a postgrescluster and a snapshot list and deletes all snapshots +// in the list that are controlled by the provided postgrescluster. +func (r *Reconciler) deleteSnapshots(ctx context.Context, + postgrescluster *v1beta1.PostgresCluster, snapshots *volumesnapshotv1.VolumeSnapshotList) error { + + for i := range snapshots.Items { + err := errors.WithStack(client.IgnoreNotFound( + r.deleteControlled(ctx, postgrescluster, &snapshots.Items[i]))) + if err != nil { + return err + } + } + return nil +} + +// tablespaceVolumesInUse determines if the TablespaceVolumes feature is enabled and the given +// cluster has tablespace volumes in place. +func clusterUsingTablespaces(ctx context.Context, postgrescluster *v1beta1.PostgresCluster) bool { + for _, instanceSet := range postgrescluster.Spec.InstanceSets { + if len(instanceSet.TablespaceVolumes) > 0 { + return feature.Enabled(ctx, feature.TablespaceVolumes) + } + } + return false +} diff --git a/internal/controller/postgrescluster/snapshots_test.go b/internal/controller/postgrescluster/snapshots_test.go new file mode 100644 index 0000000000..4c3d987ecd --- /dev/null +++ b/internal/controller/postgrescluster/snapshots_test.go @@ -0,0 +1,1476 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package postgrescluster + +import ( + "context" + "testing" + "time" + + "github.com/pkg/errors" + "gotest.tools/v3/assert" + appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/discovery" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/crunchydata/postgres-operator/internal/controller/runtime" + "github.com/crunchydata/postgres-operator/internal/feature" + "github.com/crunchydata/postgres-operator/internal/initialize" + "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/internal/testing/cmp" + "github.com/crunchydata/postgres-operator/internal/testing/events" + "github.com/crunchydata/postgres-operator/internal/testing/require" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" + + volumesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" +) + +func TestReconcileVolumeSnapshots(t *testing.T) { + ctx := context.Background() + cfg, cc := setupKubernetes(t) + require.ParallelCapacity(t, 1) + discoveryClient, err := discovery.NewDiscoveryClientForConfig(cfg) + assert.NilError(t, err) + + recorder := events.NewRecorder(t, runtime.Scheme) + r := &Reconciler{ + Client: cc, + Owner: client.FieldOwner(t.Name()), + DiscoveryClient: discoveryClient, + Recorder: recorder, + } + ns := setupNamespace(t, cc) + + // Enable snapshots feature gate + gate := feature.NewGate() + assert.NilError(t, gate.SetFromMap(map[string]bool{ + feature.VolumeSnapshots: true, + })) + ctx = feature.NewContext(ctx, gate) + + t.Run("SnapshotsDisabledDeleteSnapshots", func(t *testing.T) { + // Create cluster (without snapshots spec) + cluster := testCluster() + cluster.Namespace = ns.Name + cluster.ObjectMeta.UID = "the-uid-123" + assert.NilError(t, r.Client.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + + // Create a snapshot + pvc := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dedicated-snapshot-volume", + }, + } + volumeSnapshotClassName := "my-snapshotclass" + snapshot, err := r.generateVolumeSnapshot(cluster, *pvc, volumeSnapshotClassName) + assert.NilError(t, err) + err = errors.WithStack(r.apply(ctx, snapshot)) + assert.NilError(t, err) + + // Get all snapshots for this cluster and assert 1 exists + selectSnapshots, err := naming.AsSelector(naming.Cluster(cluster.Name)) + assert.NilError(t, err) + snapshots := &volumesnapshotv1.VolumeSnapshotList{} + err = errors.WithStack( + r.Client.List(ctx, snapshots, + client.InNamespace(cluster.Namespace), + client.MatchingLabelsSelector{Selector: selectSnapshots}, + )) + assert.NilError(t, err) + assert.Equal(t, len(snapshots.Items), 1) + + // Reconcile snapshots + err = r.reconcileVolumeSnapshots(ctx, cluster, pvc) + assert.NilError(t, err) + + // Get all snapshots for this cluster and assert 0 exist + assert.NilError(t, err) + snapshots = &volumesnapshotv1.VolumeSnapshotList{} + err = errors.WithStack( + r.Client.List(ctx, snapshots, + client.InNamespace(cluster.Namespace), + client.MatchingLabelsSelector{Selector: selectSnapshots}, + )) + assert.NilError(t, err) + assert.Equal(t, len(snapshots.Items), 0) + }) + + t.Run("SnapshotsEnabledTablespacesEnabled", func(t *testing.T) { + // Enable both tablespaces and snapshots feature gates + gate := feature.NewGate() + assert.NilError(t, gate.SetFromMap(map[string]bool{ + feature.TablespaceVolumes: true, + feature.VolumeSnapshots: true, + })) + ctx := feature.NewContext(ctx, gate) + + // Create a cluster with snapshots and tablespaces enabled + volumeSnapshotClassName := "my-snapshotclass" + cluster := testCluster() + cluster.Namespace = ns.Name + cluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ + VolumeSnapshotClassName: volumeSnapshotClassName, + } + cluster.Spec.InstanceSets[0].TablespaceVolumes = []v1beta1.TablespaceVolume{{ + Name: "volume-1", + }} + + // Create pvc for reconcile + pvc := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dedicated-snapshot-volume", + }, + } + + // Reconcile + err = r.reconcileVolumeSnapshots(ctx, cluster, pvc) + assert.NilError(t, err) + + // Assert warning event was created and has expected attributes + if assert.Check(t, len(recorder.Events) > 0) { + assert.Equal(t, recorder.Events[0].Type, "Warning") + assert.Equal(t, recorder.Events[0].Regarding.Kind, "PostgresCluster") + assert.Equal(t, recorder.Events[0].Regarding.Name, "hippo") + assert.Equal(t, recorder.Events[0].Reason, "IncompatibleFeatures") + assert.Assert(t, cmp.Contains(recorder.Events[0].Note, "VolumeSnapshots not currently compatible with TablespaceVolumes")) + } + }) + + t.Run("SnapshotsEnabledNoPvcAnnotation", func(t *testing.T) { + // Create a volume snapshot class + volumeSnapshotClassName := "my-snapshotclass" + volumeSnapshotClass := &volumesnapshotv1.VolumeSnapshotClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: volumeSnapshotClassName, + }, + DeletionPolicy: "Delete", + } + assert.NilError(t, r.Client.Create(ctx, volumeSnapshotClass)) + t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, volumeSnapshotClass)) }) + + // Create a cluster with snapshots enabled + cluster := testCluster() + cluster.Namespace = ns.Name + cluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ + VolumeSnapshotClassName: volumeSnapshotClassName, + } + assert.NilError(t, r.Client.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + + // Create pvc for reconcile + pvc := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dedicated-snapshot-volume", + }, + } + + // Reconcile + err = r.reconcileVolumeSnapshots(ctx, cluster, pvc) + assert.NilError(t, err) + + // Assert no snapshots exist + selectSnapshots, err := naming.AsSelector(naming.Cluster(cluster.Name)) + assert.NilError(t, err) + snapshots := &volumesnapshotv1.VolumeSnapshotList{} + err = errors.WithStack( + r.Client.List(ctx, snapshots, + client.InNamespace(cluster.Namespace), + client.MatchingLabelsSelector{Selector: selectSnapshots}, + )) + assert.NilError(t, err) + assert.Equal(t, len(snapshots.Items), 0) + }) + + t.Run("SnapshotsEnabledReadySnapshotsExist", func(t *testing.T) { + // Create a volume snapshot class + volumeSnapshotClassName := "my-snapshotclass" + volumeSnapshotClass := &volumesnapshotv1.VolumeSnapshotClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: volumeSnapshotClassName, + }, + DeletionPolicy: "Delete", + } + assert.NilError(t, r.Client.Create(ctx, volumeSnapshotClass)) + t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, volumeSnapshotClass)) }) + + // Create a cluster with snapshots enabled + cluster := testCluster() + cluster.Namespace = ns.Name + cluster.ObjectMeta.UID = "the-uid-123" + cluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ + VolumeSnapshotClassName: volumeSnapshotClassName, + } + assert.NilError(t, r.Client.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + + // Create pvc with annotation + pvcName := initialize.String("dedicated-snapshot-volume") + pvc := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: *pvcName, + Annotations: map[string]string{ + naming.PGBackRestBackupJobCompletion: "backup-timestamp", + }, + }, + } + + // Create snapshot with annotation matching the pvc annotation + snapshot1 := &volumesnapshotv1.VolumeSnapshot{ + TypeMeta: metav1.TypeMeta{ + APIVersion: volumesnapshotv1.SchemeGroupVersion.String(), + Kind: "VolumeSnapshot", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "first-snapshot", + Namespace: ns.Name, + Annotations: map[string]string{ + naming.PGBackRestBackupJobCompletion: "backup-timestamp", + }, + Labels: map[string]string{ + naming.LabelCluster: "hippo", + }, + }, + Spec: volumesnapshotv1.VolumeSnapshotSpec{ + Source: volumesnapshotv1.VolumeSnapshotSource{ + PersistentVolumeClaimName: pvcName, + }, + }, + } + err := errors.WithStack(r.setControllerReference(cluster, snapshot1)) + assert.NilError(t, err) + err = r.apply(ctx, snapshot1) + assert.NilError(t, err) + + // Update snapshot status + truePtr := initialize.Bool(true) + snapshot1.Status = &volumesnapshotv1.VolumeSnapshotStatus{ + ReadyToUse: truePtr, + } + err = r.Client.Status().Update(ctx, snapshot1) + assert.NilError(t, err) + + // Create second snapshot with different annotation value + snapshot2 := &volumesnapshotv1.VolumeSnapshot{ + TypeMeta: metav1.TypeMeta{ + APIVersion: volumesnapshotv1.SchemeGroupVersion.String(), + Kind: "VolumeSnapshot", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "second-snapshot", + Namespace: ns.Name, + Annotations: map[string]string{ + naming.PGBackRestBackupJobCompletion: "older-backup-timestamp", + }, + Labels: map[string]string{ + naming.LabelCluster: "hippo", + }, + }, + Spec: volumesnapshotv1.VolumeSnapshotSpec{ + Source: volumesnapshotv1.VolumeSnapshotSource{ + PersistentVolumeClaimName: pvcName, + }, + }, + } + err = errors.WithStack(r.setControllerReference(cluster, snapshot2)) + assert.NilError(t, err) + err = r.apply(ctx, snapshot2) + assert.NilError(t, err) + + // Update second snapshot's status + snapshot2.Status = &volumesnapshotv1.VolumeSnapshotStatus{ + ReadyToUse: truePtr, + } + err = r.Client.Status().Update(ctx, snapshot2) + assert.NilError(t, err) + + // Reconcile + err = r.reconcileVolumeSnapshots(ctx, cluster, pvc) + assert.NilError(t, err) + + // Assert first snapshot exists and second snapshot was deleted + selectSnapshots, err := naming.AsSelector(naming.Cluster(cluster.Name)) + assert.NilError(t, err) + snapshots := &volumesnapshotv1.VolumeSnapshotList{} + err = errors.WithStack( + r.Client.List(ctx, snapshots, + client.InNamespace(cluster.Namespace), + client.MatchingLabelsSelector{Selector: selectSnapshots}, + )) + assert.NilError(t, err) + assert.Equal(t, len(snapshots.Items), 1) + assert.Equal(t, snapshots.Items[0].Name, "first-snapshot") + + // Cleanup + err = r.deleteControlled(ctx, cluster, snapshot1) + assert.NilError(t, err) + }) + + t.Run("SnapshotsEnabledCreateSnapshot", func(t *testing.T) { + // Create a volume snapshot class + volumeSnapshotClassName := "my-snapshotclass" + volumeSnapshotClass := &volumesnapshotv1.VolumeSnapshotClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: volumeSnapshotClassName, + }, + DeletionPolicy: "Delete", + } + assert.NilError(t, r.Client.Create(ctx, volumeSnapshotClass)) + t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, volumeSnapshotClass)) }) + + // Create a cluster with snapshots enabled + cluster := testCluster() + cluster.Namespace = ns.Name + cluster.ObjectMeta.UID = "the-uid-123" + cluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ + VolumeSnapshotClassName: volumeSnapshotClassName, + } + assert.NilError(t, r.Client.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + + // Create pvc with annotation + pvcName := initialize.String("dedicated-snapshot-volume") + pvc := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: *pvcName, + Annotations: map[string]string{ + naming.PGBackRestBackupJobCompletion: "another-backup-timestamp", + }, + }, + } + + // Reconcile + err = r.reconcileVolumeSnapshots(ctx, cluster, pvc) + assert.NilError(t, err) + + // Assert that a snapshot was created + selectSnapshots, err := naming.AsSelector(naming.Cluster(cluster.Name)) + assert.NilError(t, err) + snapshots := &volumesnapshotv1.VolumeSnapshotList{} + err = errors.WithStack( + r.Client.List(ctx, snapshots, + client.InNamespace(cluster.Namespace), + client.MatchingLabelsSelector{Selector: selectSnapshots}, + )) + assert.NilError(t, err) + assert.Equal(t, len(snapshots.Items), 1) + assert.Equal(t, snapshots.Items[0].Annotations[naming.PGBackRestBackupJobCompletion], + "another-backup-timestamp") + }) +} + +func TestReconcileDedicatedSnapshotVolume(t *testing.T) { + ctx := context.Background() + cfg, cc := setupKubernetes(t) + discoveryClient, err := discovery.NewDiscoveryClientForConfig(cfg) + assert.NilError(t, err) + + recorder := events.NewRecorder(t, runtime.Scheme) + r := &Reconciler{ + Client: cc, + Owner: client.FieldOwner(t.Name()), + DiscoveryClient: discoveryClient, + Recorder: recorder, + } + + // Enable snapshots feature gate + gate := feature.NewGate() + assert.NilError(t, gate.SetFromMap(map[string]bool{ + feature.VolumeSnapshots: true, + })) + ctx = feature.NewContext(ctx, gate) + + t.Run("SnapshotsDisabledDeletePvc", func(t *testing.T) { + // Create cluster without snapshots spec + ns := setupNamespace(t, cc) + cluster := testCluster() + cluster.Namespace = ns.Name + cluster.ObjectMeta.UID = "the-uid-123" + assert.NilError(t, r.Client.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + + // Create a dedicated snapshot volume + pvc := &corev1.PersistentVolumeClaim{ + TypeMeta: metav1.TypeMeta{ + Kind: "PersistentVolumeClaim", + APIVersion: corev1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "dedicated-snapshot-volume", + Namespace: ns.Name, + Labels: map[string]string{ + naming.LabelCluster: cluster.Name, + naming.LabelRole: naming.RoleSnapshot, + naming.LabelData: naming.DataPostgres, + }, + }, + Spec: testVolumeClaimSpec(), + } + err = errors.WithStack(r.setControllerReference(cluster, pvc)) + assert.NilError(t, err) + err = r.apply(ctx, pvc) + assert.NilError(t, err) + + // Assert that the pvc was created + selectPvcs, err := naming.AsSelector(naming.Cluster(cluster.Name)) + assert.NilError(t, err) + pvcs := &corev1.PersistentVolumeClaimList{} + err = errors.WithStack( + r.Client.List(ctx, pvcs, + client.InNamespace(cluster.Namespace), + client.MatchingLabelsSelector{Selector: selectPvcs}, + )) + assert.NilError(t, err) + assert.Equal(t, len(pvcs.Items), 1) + + // Create volumes for reconcile + clusterVolumes := []corev1.PersistentVolumeClaim{*pvc} + + // Reconcile + returned, err := r.reconcileDedicatedSnapshotVolume(ctx, cluster, clusterVolumes) + assert.NilError(t, err) + assert.Check(t, returned == nil) + + // Assert that the pvc has been deleted or marked for deletion + key, fetched := client.ObjectKeyFromObject(pvc), &corev1.PersistentVolumeClaim{} + if err := r.Client.Get(ctx, key, fetched); err == nil { + assert.Assert(t, fetched.DeletionTimestamp != nil, "expected deleted") + } else { + assert.Assert(t, apierrors.IsNotFound(err), "expected NotFound, got %v", err) + } + }) + + t.Run("SnapshotsEnabledCreatePvcNoBackupNoRestore", func(t *testing.T) { + // Create cluster with snapshots enabled + ns := setupNamespace(t, cc) + cluster := testCluster() + cluster.Namespace = ns.Name + cluster.ObjectMeta.UID = "the-uid-123" + cluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ + VolumeSnapshotClassName: "my-snapshotclass", + } + assert.NilError(t, r.Client.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + + // Create volumes for reconcile + clusterVolumes := []corev1.PersistentVolumeClaim{} + + // Reconcile + pvc, err := r.reconcileDedicatedSnapshotVolume(ctx, cluster, clusterVolumes) + assert.NilError(t, err) + assert.Assert(t, pvc != nil) + + // Assert pvc was created + selectPvcs, err := naming.AsSelector(naming.Cluster(cluster.Name)) + assert.NilError(t, err) + pvcs := &corev1.PersistentVolumeClaimList{} + err = errors.WithStack( + r.Client.List(ctx, pvcs, + client.InNamespace(cluster.Namespace), + client.MatchingLabelsSelector{Selector: selectPvcs}, + )) + assert.NilError(t, err) + assert.Equal(t, len(pvcs.Items), 1) + }) + + t.Run("SnapshotsEnabledBackupExistsCreateRestore", func(t *testing.T) { + // Create cluster with snapshots enabled + ns := setupNamespace(t, cc) + cluster := testCluster() + cluster.Namespace = ns.Name + cluster.ObjectMeta.UID = "the-uid-123" + cluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ + VolumeSnapshotClassName: "my-snapshotclass", + } + assert.NilError(t, r.Client.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + + // Create successful backup job + backupJob := testBackupJob(cluster) + err = errors.WithStack(r.setControllerReference(cluster, backupJob)) + assert.NilError(t, err) + err = r.apply(ctx, backupJob) + assert.NilError(t, err) + + currentTime := metav1.Now() + backupJob.Status = batchv1.JobStatus{ + Succeeded: 1, + CompletionTime: ¤tTime, + } + err = r.Client.Status().Update(ctx, backupJob) + assert.NilError(t, err) + + // Create instance set and volumes for reconcile + sts := &appsv1.StatefulSet{} + generateInstanceStatefulSetIntent(ctx, cluster, &cluster.Spec.InstanceSets[0], "pod-service", "service-account", sts, 1) + clusterVolumes := []corev1.PersistentVolumeClaim{} + + // Reconcile + pvc, err := r.reconcileDedicatedSnapshotVolume(ctx, cluster, clusterVolumes) + assert.NilError(t, err) + assert.Assert(t, pvc != nil) + + // Assert restore job with annotation was created + restoreJobs := &batchv1.JobList{} + selectJobs, err := naming.AsSelector(naming.ClusterRestoreJobs(cluster.Name)) + assert.NilError(t, err) + err = errors.WithStack( + r.Client.List(ctx, restoreJobs, + client.InNamespace(cluster.Namespace), + client.MatchingLabelsSelector{Selector: selectJobs}, + )) + assert.NilError(t, err) + assert.Equal(t, len(restoreJobs.Items), 1) + assert.Assert(t, restoreJobs.Items[0].Annotations[naming.PGBackRestBackupJobCompletion] != "") + }) + + t.Run("SnapshotsEnabledSuccessfulRestoreExists", func(t *testing.T) { + // Create cluster with snapshots enabled + ns := setupNamespace(t, cc) + cluster := testCluster() + cluster.Namespace = ns.Name + cluster.ObjectMeta.UID = "the-uid-123" + cluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ + VolumeSnapshotClassName: "my-snapshotclass", + } + assert.NilError(t, r.Client.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + + // Create times for jobs + currentTime := metav1.Now() + earlierTime := metav1.NewTime(currentTime.AddDate(-1, 0, 0)) + + // Create successful backup job + backupJob := testBackupJob(cluster) + err = errors.WithStack(r.setControllerReference(cluster, backupJob)) + assert.NilError(t, err) + err = r.apply(ctx, backupJob) + assert.NilError(t, err) + + backupJob.Status = batchv1.JobStatus{ + Succeeded: 1, + CompletionTime: &earlierTime, + } + err = r.Client.Status().Update(ctx, backupJob) + assert.NilError(t, err) + + // Create successful restore job + restoreJob := testRestoreJob(cluster) + restoreJob.Annotations = map[string]string{ + naming.PGBackRestBackupJobCompletion: backupJob.Status.CompletionTime.Format(time.RFC3339), + } + err = errors.WithStack(r.setControllerReference(cluster, restoreJob)) + assert.NilError(t, err) + err = r.apply(ctx, restoreJob) + assert.NilError(t, err) + + restoreJob.Status = batchv1.JobStatus{ + Succeeded: 1, + CompletionTime: ¤tTime, + } + err = r.Client.Status().Update(ctx, restoreJob) + assert.NilError(t, err) + + // Create instance set and volumes for reconcile + sts := &appsv1.StatefulSet{} + generateInstanceStatefulSetIntent(ctx, cluster, &cluster.Spec.InstanceSets[0], "pod-service", "service-account", sts, 1) + clusterVolumes := []corev1.PersistentVolumeClaim{} + + // Reconcile + pvc, err := r.reconcileDedicatedSnapshotVolume(ctx, cluster, clusterVolumes) + assert.NilError(t, err) + assert.Assert(t, pvc != nil) + + // Assert restore job was deleted + restoreJobs := &batchv1.JobList{} + selectJobs, err := naming.AsSelector(naming.ClusterRestoreJobs(cluster.Name)) + assert.NilError(t, err) + err = errors.WithStack( + r.Client.List(ctx, restoreJobs, + client.InNamespace(cluster.Namespace), + client.MatchingLabelsSelector{Selector: selectJobs}, + )) + assert.NilError(t, err) + assert.Equal(t, len(restoreJobs.Items), 0) + + // Assert pvc was annotated + assert.Equal(t, pvc.GetAnnotations()[naming.PGBackRestBackupJobCompletion], backupJob.Status.CompletionTime.Format(time.RFC3339)) + }) + + t.Run("SnapshotsEnabledFailedRestoreExists", func(t *testing.T) { + // Create cluster with snapshots enabled + ns := setupNamespace(t, cc) + cluster := testCluster() + cluster.Namespace = ns.Name + cluster.ObjectMeta.UID = "the-uid-123" + cluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ + VolumeSnapshotClassName: "my-snapshotclass", + } + assert.NilError(t, r.Client.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + + // Create times for jobs + currentTime := metav1.Now() + earlierTime := metav1.NewTime(currentTime.AddDate(-1, 0, 0)) + + // Create successful backup job + backupJob := testBackupJob(cluster) + err = errors.WithStack(r.setControllerReference(cluster, backupJob)) + assert.NilError(t, err) + err = r.apply(ctx, backupJob) + assert.NilError(t, err) + + backupJob.Status = batchv1.JobStatus{ + Succeeded: 1, + CompletionTime: &earlierTime, + } + err = r.Client.Status().Update(ctx, backupJob) + assert.NilError(t, err) + + // Create failed restore job + restoreJob := testRestoreJob(cluster) + restoreJob.Annotations = map[string]string{ + naming.PGBackRestBackupJobCompletion: backupJob.Status.CompletionTime.Format(time.RFC3339), + } + err = errors.WithStack(r.setControllerReference(cluster, restoreJob)) + assert.NilError(t, err) + err = r.apply(ctx, restoreJob) + assert.NilError(t, err) + + restoreJob.Status = batchv1.JobStatus{ + Succeeded: 0, + Failed: 1, + CompletionTime: ¤tTime, + } + err = r.Client.Status().Update(ctx, restoreJob) + assert.NilError(t, err) + + // Setup instances and volumes for reconcile + sts := &appsv1.StatefulSet{} + generateInstanceStatefulSetIntent(ctx, cluster, &cluster.Spec.InstanceSets[0], "pod-service", "service-account", sts, 1) + clusterVolumes := []corev1.PersistentVolumeClaim{} + + // Reconcile + pvc, err := r.reconcileDedicatedSnapshotVolume(ctx, cluster, clusterVolumes) + assert.NilError(t, err) + assert.Assert(t, pvc != nil) + + // Assert warning event was created and has expected attributes + if assert.Check(t, len(recorder.Events) > 0) { + assert.Equal(t, recorder.Events[0].Type, "Warning") + assert.Equal(t, recorder.Events[0].Regarding.Kind, "PostgresCluster") + assert.Equal(t, recorder.Events[0].Regarding.Name, "hippo") + assert.Equal(t, recorder.Events[0].Reason, "DedicatedSnapshotVolumeRestoreJobError") + assert.Assert(t, cmp.Contains(recorder.Events[0].Note, "restore job failed, check the logs")) + } + }) +} + +func TestCreateDedicatedSnapshotVolume(t *testing.T) { + ctx := context.Background() + _, cc := setupKubernetes(t) + + r := &Reconciler{ + Client: cc, + Owner: client.FieldOwner(t.Name()), + } + + ns := setupNamespace(t, cc) + cluster := testCluster() + cluster.Namespace = ns.Name + cluster.ObjectMeta.UID = "the-uid-123" + + labelMap := map[string]string{ + naming.LabelCluster: cluster.Name, + naming.LabelRole: naming.RoleSnapshot, + naming.LabelData: naming.DataPostgres, + } + pvc := &corev1.PersistentVolumeClaim{ObjectMeta: naming.ClusterDedicatedSnapshotVolume(cluster)} + pvc.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("PersistentVolumeClaim")) + + pvc, err := r.createDedicatedSnapshotVolume(ctx, cluster, labelMap, pvc) + assert.NilError(t, err) + assert.Assert(t, metav1.IsControlledBy(pvc, cluster)) + assert.Equal(t, pvc.Spec.Resources.Requests[corev1.ResourceStorage], resource.MustParse("1Gi")) +} + +func TestDedicatedSnapshotVolumeRestore(t *testing.T) { + ctx := context.Background() + _, cc := setupKubernetes(t) + + r := &Reconciler{ + Client: cc, + Owner: client.FieldOwner(t.Name()), + } + + ns := setupNamespace(t, cc) + cluster := testCluster() + cluster.Namespace = ns.Name + cluster.ObjectMeta.UID = "the-uid-123" + + pvc := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dedicated-snapshot-volume", + }, + } + + sts := &appsv1.StatefulSet{} + generateInstanceStatefulSetIntent(ctx, cluster, &cluster.Spec.InstanceSets[0], "pod-service", "service-account", sts, 1) + currentTime := metav1.Now() + backupJob := testBackupJob(cluster) + backupJob.Status.CompletionTime = ¤tTime + + err := r.dedicatedSnapshotVolumeRestore(ctx, cluster, pvc, backupJob) + assert.NilError(t, err) + + // Assert a restore job was created that has the correct annotation + jobs := &batchv1.JobList{} + selectJobs, err := naming.AsSelector(naming.ClusterRestoreJobs(cluster.Name)) + assert.NilError(t, err) + err = errors.WithStack( + r.Client.List(ctx, jobs, + client.InNamespace(cluster.Namespace), + client.MatchingLabelsSelector{Selector: selectJobs}, + )) + assert.NilError(t, err) + assert.Equal(t, len(jobs.Items), 1) + assert.Equal(t, jobs.Items[0].Annotations[naming.PGBackRestBackupJobCompletion], + backupJob.Status.CompletionTime.Format(time.RFC3339)) +} + +func TestGenerateSnapshotOfDedicatedSnapshotVolume(t *testing.T) { + _, cc := setupKubernetes(t) + require.ParallelCapacity(t, 1) + + r := &Reconciler{ + Client: cc, + Owner: client.FieldOwner(t.Name()), + } + ns := setupNamespace(t, cc) + + cluster := testCluster() + cluster.Namespace = ns.Name + cluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ + VolumeSnapshotClassName: "my-snapshot", + } + + pvc := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + naming.PGBackRestBackupJobCompletion: "backup-completion-timestamp", + }, + Name: "dedicated-snapshot-volume", + }, + } + + snapshot, err := r.generateSnapshotOfDedicatedSnapshotVolume(cluster, pvc) + assert.NilError(t, err) + assert.Equal(t, snapshot.GetAnnotations()[naming.PGBackRestBackupJobCompletion], + "backup-completion-timestamp") +} + +func TestGenerateVolumeSnapshot(t *testing.T) { + _, cc := setupKubernetes(t) + require.ParallelCapacity(t, 1) + + r := &Reconciler{ + Client: cc, + Owner: client.FieldOwner(t.Name()), + } + ns := setupNamespace(t, cc) + + cluster := testCluster() + cluster.Namespace = ns.Name + + pvc := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dedicated-snapshot-volume", + }, + } + volumeSnapshotClassName := "my-snapshot" + + snapshot, err := r.generateVolumeSnapshot(cluster, *pvc, volumeSnapshotClassName) + assert.NilError(t, err) + assert.Equal(t, *snapshot.Spec.VolumeSnapshotClassName, "my-snapshot") + assert.Equal(t, *snapshot.Spec.Source.PersistentVolumeClaimName, "dedicated-snapshot-volume") + assert.Equal(t, snapshot.Labels[naming.LabelCluster], "hippo") + assert.Equal(t, snapshot.ObjectMeta.OwnerReferences[0].Name, "hippo") +} + +func TestGetDedicatedSnapshotVolumeRestoreJob(t *testing.T) { + ctx := context.Background() + _, cc := setupKubernetes(t) + require.ParallelCapacity(t, 1) + + r := &Reconciler{ + Client: cc, + Owner: client.FieldOwner(t.Name()), + } + ns := setupNamespace(t, cc) + + cluster := testCluster() + cluster.Namespace = ns.Name + + t.Run("NoRestoreJobs", func(t *testing.T) { + dsvRestoreJob, err := r.getDedicatedSnapshotVolumeRestoreJob(ctx, cluster) + assert.NilError(t, err) + assert.Check(t, dsvRestoreJob == nil) + }) + + t.Run("NoDsvRestoreJobs", func(t *testing.T) { + job1 := testRestoreJob(cluster) + job1.Namespace = ns.Name + + err := r.apply(ctx, job1) + assert.NilError(t, err) + + dsvRestoreJob, err := r.getDedicatedSnapshotVolumeRestoreJob(ctx, cluster) + assert.NilError(t, err) + assert.Check(t, dsvRestoreJob == nil) + }) + + t.Run("DsvRestoreJobExists", func(t *testing.T) { + job2 := testRestoreJob(cluster) + job2.Name = "restore-job-2" + job2.Namespace = ns.Name + job2.Annotations = map[string]string{ + naming.PGBackRestBackupJobCompletion: "backup-timestamp", + } + + err := r.apply(ctx, job2) + assert.NilError(t, err) + + job3 := testRestoreJob(cluster) + job3.Name = "restore-job-3" + job3.Namespace = ns.Name + + err = r.apply(ctx, job3) + assert.NilError(t, err) + + dsvRestoreJob, err := r.getDedicatedSnapshotVolumeRestoreJob(ctx, cluster) + assert.NilError(t, err) + assert.Assert(t, dsvRestoreJob != nil) + assert.Equal(t, dsvRestoreJob.Name, "restore-job-2") + }) +} + +func TestGetLatestCompleteBackupJob(t *testing.T) { + ctx := context.Background() + _, cc := setupKubernetes(t) + // require.ParallelCapacity(t, 1) + + r := &Reconciler{ + Client: cc, + Owner: client.FieldOwner(t.Name()), + } + ns := setupNamespace(t, cc) + + cluster := testCluster() + cluster.Namespace = ns.Name + + t.Run("NoJobs", func(t *testing.T) { + latestCompleteBackupJob, err := r.getLatestCompleteBackupJob(ctx, cluster) + assert.NilError(t, err) + assert.Check(t, latestCompleteBackupJob == nil) + }) + + t.Run("NoCompleteJobs", func(t *testing.T) { + job1 := testBackupJob(cluster) + job1.Namespace = ns.Name + + err := r.apply(ctx, job1) + assert.NilError(t, err) + + latestCompleteBackupJob, err := r.getLatestCompleteBackupJob(ctx, cluster) + assert.NilError(t, err) + assert.Check(t, latestCompleteBackupJob == nil) + }) + + t.Run("OneCompleteBackupJob", func(t *testing.T) { + currentTime := metav1.Now() + + job1 := testBackupJob(cluster) + job1.Namespace = ns.Name + + err := r.apply(ctx, job1) + assert.NilError(t, err) + + job2 := testBackupJob(cluster) + job2.Namespace = ns.Name + job2.Name = "backup-job-2" + + err = r.apply(ctx, job2) + assert.NilError(t, err) + + // Get job1 and update Status. + err = r.Client.Get(ctx, client.ObjectKeyFromObject(job1), job1) + assert.NilError(t, err) + + job1.Status = batchv1.JobStatus{ + Succeeded: 1, + CompletionTime: ¤tTime, + } + err = r.Client.Status().Update(ctx, job1) + assert.NilError(t, err) + + latestCompleteBackupJob, err := r.getLatestCompleteBackupJob(ctx, cluster) + assert.NilError(t, err) + assert.Check(t, latestCompleteBackupJob.Name == "backup-job-1") + }) + + t.Run("TwoCompleteBackupJobs", func(t *testing.T) { + currentTime := metav1.Now() + earlierTime := metav1.NewTime(currentTime.AddDate(-1, 0, 0)) + assert.Check(t, earlierTime.Before(¤tTime)) + + job1 := testBackupJob(cluster) + job1.Namespace = ns.Name + + err := r.apply(ctx, job1) + assert.NilError(t, err) + + job2 := testBackupJob(cluster) + job2.Namespace = ns.Name + job2.Name = "backup-job-2" + + err = r.apply(ctx, job2) + assert.NilError(t, err) + + // Get job1 and update Status. + err = r.Client.Get(ctx, client.ObjectKeyFromObject(job1), job1) + assert.NilError(t, err) + + job1.Status = batchv1.JobStatus{ + Succeeded: 1, + CompletionTime: ¤tTime, + } + err = r.Client.Status().Update(ctx, job1) + assert.NilError(t, err) + + // Get job2 and update Status. + err = r.Client.Get(ctx, client.ObjectKeyFromObject(job2), job2) + assert.NilError(t, err) + + job2.Status = batchv1.JobStatus{ + Succeeded: 1, + CompletionTime: &earlierTime, + } + err = r.Client.Status().Update(ctx, job2) + assert.NilError(t, err) + + latestCompleteBackupJob, err := r.getLatestCompleteBackupJob(ctx, cluster) + assert.NilError(t, err) + assert.Check(t, latestCompleteBackupJob.Name == "backup-job-1") + }) +} + +func TestGetSnapshotWithLatestError(t *testing.T) { + t.Run("NoSnapshots", func(t *testing.T) { + snapshotList := &volumesnapshotv1.VolumeSnapshotList{} + snapshotWithLatestError := getSnapshotWithLatestError(snapshotList) + assert.Check(t, snapshotWithLatestError == nil) + }) + + t.Run("NoSnapshotsWithStatus", func(t *testing.T) { + snapshotList := &volumesnapshotv1.VolumeSnapshotList{ + Items: []volumesnapshotv1.VolumeSnapshot{ + {}, + {}, + }, + } + snapshotWithLatestError := getSnapshotWithLatestError(snapshotList) + assert.Check(t, snapshotWithLatestError == nil) + }) + + t.Run("NoSnapshotsWithErrors", func(t *testing.T) { + snapshotList := &volumesnapshotv1.VolumeSnapshotList{ + Items: []volumesnapshotv1.VolumeSnapshot{ + { + Status: &volumesnapshotv1.VolumeSnapshotStatus{ + ReadyToUse: initialize.Bool(true), + }, + }, + { + Status: &volumesnapshotv1.VolumeSnapshotStatus{ + ReadyToUse: initialize.Bool(false), + }, + }, + }, + } + snapshotWithLatestError := getSnapshotWithLatestError(snapshotList) + assert.Check(t, snapshotWithLatestError == nil) + }) + + t.Run("OneSnapshotWithError", func(t *testing.T) { + currentTime := metav1.Now() + earlierTime := metav1.NewTime(currentTime.AddDate(-1, 0, 0)) + snapshotList := &volumesnapshotv1.VolumeSnapshotList{ + Items: []volumesnapshotv1.VolumeSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "good-snapshot", + UID: "the-uid-123", + }, + Status: &volumesnapshotv1.VolumeSnapshotStatus{ + CreationTime: ¤tTime, + ReadyToUse: initialize.Bool(true), + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "bad-snapshot", + UID: "the-uid-456", + }, + Status: &volumesnapshotv1.VolumeSnapshotStatus{ + ReadyToUse: initialize.Bool(false), + Error: &volumesnapshotv1.VolumeSnapshotError{ + Time: &earlierTime, + }, + }, + }, + }, + } + snapshotWithLatestError := getSnapshotWithLatestError(snapshotList) + assert.Equal(t, snapshotWithLatestError.ObjectMeta.Name, "bad-snapshot") + }) + + t.Run("TwoSnapshotsWithErrors", func(t *testing.T) { + currentTime := metav1.Now() + earlierTime := metav1.NewTime(currentTime.AddDate(-1, 0, 0)) + snapshotList := &volumesnapshotv1.VolumeSnapshotList{ + Items: []volumesnapshotv1.VolumeSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "first-bad-snapshot", + UID: "the-uid-123", + }, + Status: &volumesnapshotv1.VolumeSnapshotStatus{ + ReadyToUse: initialize.Bool(false), + Error: &volumesnapshotv1.VolumeSnapshotError{ + Time: &earlierTime, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "second-bad-snapshot", + UID: "the-uid-456", + }, + Status: &volumesnapshotv1.VolumeSnapshotStatus{ + ReadyToUse: initialize.Bool(false), + Error: &volumesnapshotv1.VolumeSnapshotError{ + Time: ¤tTime, + }, + }, + }, + }, + } + snapshotWithLatestError := getSnapshotWithLatestError(snapshotList) + assert.Equal(t, snapshotWithLatestError.ObjectMeta.Name, "second-bad-snapshot") + }) +} + +func TestGetSnapshotsForCluster(t *testing.T) { + ctx := context.Background() + _, cc := setupKubernetes(t) + require.ParallelCapacity(t, 1) + + r := &Reconciler{ + Client: cc, + Owner: client.FieldOwner(t.Name()), + } + ns := setupNamespace(t, cc) + + cluster := testCluster() + cluster.Namespace = ns.Name + + t.Run("NoSnapshots", func(t *testing.T) { + snapshots, err := r.getSnapshotsForCluster(ctx, cluster) + assert.NilError(t, err) + assert.Equal(t, len(snapshots.Items), 0) + }) + + t.Run("NoSnapshotsForCluster", func(t *testing.T) { + snapshot := &volumesnapshotv1.VolumeSnapshot{ + TypeMeta: metav1.TypeMeta{ + APIVersion: volumesnapshotv1.SchemeGroupVersion.String(), + Kind: "VolumeSnapshot", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "some-snapshot", + Namespace: ns.Name, + Labels: map[string]string{ + naming.LabelCluster: "rhino", + }, + }, + } + snapshot.Spec.Source.PersistentVolumeClaimName = initialize.String("some-pvc-name") + snapshot.Spec.VolumeSnapshotClassName = initialize.String("some-class-name") + err := r.apply(ctx, snapshot) + assert.NilError(t, err) + + snapshots, err := r.getSnapshotsForCluster(ctx, cluster) + assert.NilError(t, err) + assert.Equal(t, len(snapshots.Items), 0) + }) + + t.Run("OneSnapshotForCluster", func(t *testing.T) { + snapshot1 := &volumesnapshotv1.VolumeSnapshot{ + TypeMeta: metav1.TypeMeta{ + APIVersion: volumesnapshotv1.SchemeGroupVersion.String(), + Kind: "VolumeSnapshot", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "some-snapshot", + Namespace: ns.Name, + Labels: map[string]string{ + naming.LabelCluster: "rhino", + }, + }, + } + snapshot1.Spec.Source.PersistentVolumeClaimName = initialize.String("some-pvc-name") + snapshot1.Spec.VolumeSnapshotClassName = initialize.String("some-class-name") + err := r.apply(ctx, snapshot1) + assert.NilError(t, err) + + snapshot2 := &volumesnapshotv1.VolumeSnapshot{ + TypeMeta: metav1.TypeMeta{ + APIVersion: volumesnapshotv1.SchemeGroupVersion.String(), + Kind: "VolumeSnapshot", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "another-snapshot", + Namespace: ns.Name, + Labels: map[string]string{ + naming.LabelCluster: "hippo", + }, + }, + } + snapshot2.Spec.Source.PersistentVolumeClaimName = initialize.String("another-pvc-name") + snapshot2.Spec.VolumeSnapshotClassName = initialize.String("another-class-name") + err = r.apply(ctx, snapshot2) + assert.NilError(t, err) + + snapshots, err := r.getSnapshotsForCluster(ctx, cluster) + assert.NilError(t, err) + assert.Equal(t, len(snapshots.Items), 1) + assert.Equal(t, snapshots.Items[0].Name, "another-snapshot") + }) + + t.Run("TwoSnapshotsForCluster", func(t *testing.T) { + snapshot1 := &volumesnapshotv1.VolumeSnapshot{ + TypeMeta: metav1.TypeMeta{ + APIVersion: volumesnapshotv1.SchemeGroupVersion.String(), + Kind: "VolumeSnapshot", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "some-snapshot", + Namespace: ns.Name, + Labels: map[string]string{ + naming.LabelCluster: "hippo", + }, + }, + } + snapshot1.Spec.Source.PersistentVolumeClaimName = initialize.String("some-pvc-name") + snapshot1.Spec.VolumeSnapshotClassName = initialize.String("some-class-name") + err := r.apply(ctx, snapshot1) + assert.NilError(t, err) + + snapshot2 := &volumesnapshotv1.VolumeSnapshot{ + TypeMeta: metav1.TypeMeta{ + APIVersion: volumesnapshotv1.SchemeGroupVersion.String(), + Kind: "VolumeSnapshot", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "another-snapshot", + Namespace: ns.Name, + Labels: map[string]string{ + naming.LabelCluster: "hippo", + }, + }, + } + snapshot2.Spec.Source.PersistentVolumeClaimName = initialize.String("another-pvc-name") + snapshot2.Spec.VolumeSnapshotClassName = initialize.String("another-class-name") + err = r.apply(ctx, snapshot2) + assert.NilError(t, err) + + snapshots, err := r.getSnapshotsForCluster(ctx, cluster) + assert.NilError(t, err) + assert.Equal(t, len(snapshots.Items), 2) + }) +} + +func TestGetLatestReadySnapshot(t *testing.T) { + t.Run("NoSnapshots", func(t *testing.T) { + snapshotList := &volumesnapshotv1.VolumeSnapshotList{} + latestReadySnapshot := getLatestReadySnapshot(snapshotList) + assert.Assert(t, latestReadySnapshot == nil) + }) + + t.Run("NoSnapshotsWithStatus", func(t *testing.T) { + snapshotList := &volumesnapshotv1.VolumeSnapshotList{ + Items: []volumesnapshotv1.VolumeSnapshot{ + {}, + {}, + }, + } + latestReadySnapshot := getLatestReadySnapshot(snapshotList) + assert.Assert(t, latestReadySnapshot == nil) + }) + + t.Run("NoReadySnapshots", func(t *testing.T) { + snapshotList := &volumesnapshotv1.VolumeSnapshotList{ + Items: []volumesnapshotv1.VolumeSnapshot{ + { + Status: &volumesnapshotv1.VolumeSnapshotStatus{ + ReadyToUse: initialize.Bool(false), + }, + }, + { + Status: &volumesnapshotv1.VolumeSnapshotStatus{ + ReadyToUse: initialize.Bool(false), + }, + }, + }, + } + latestReadySnapshot := getLatestReadySnapshot(snapshotList) + assert.Assert(t, latestReadySnapshot == nil) + }) + + t.Run("OneReadySnapshot", func(t *testing.T) { + currentTime := metav1.Now() + earlierTime := metav1.NewTime(currentTime.AddDate(-1, 0, 0)) + snapshotList := &volumesnapshotv1.VolumeSnapshotList{ + Items: []volumesnapshotv1.VolumeSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "good-snapshot", + UID: "the-uid-123", + }, + Status: &volumesnapshotv1.VolumeSnapshotStatus{ + CreationTime: &earlierTime, + ReadyToUse: initialize.Bool(true), + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "bad-snapshot", + UID: "the-uid-456", + }, + Status: &volumesnapshotv1.VolumeSnapshotStatus{ + CreationTime: ¤tTime, + ReadyToUse: initialize.Bool(false), + }, + }, + }, + } + latestReadySnapshot := getLatestReadySnapshot(snapshotList) + assert.Equal(t, latestReadySnapshot.ObjectMeta.Name, "good-snapshot") + }) + + t.Run("TwoReadySnapshots", func(t *testing.T) { + currentTime := metav1.Now() + earlierTime := metav1.NewTime(currentTime.AddDate(-1, 0, 0)) + snapshotList := &volumesnapshotv1.VolumeSnapshotList{ + Items: []volumesnapshotv1.VolumeSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "first-good-snapshot", + UID: "the-uid-123", + }, + Status: &volumesnapshotv1.VolumeSnapshotStatus{ + CreationTime: &earlierTime, + ReadyToUse: initialize.Bool(true), + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "second-good-snapshot", + UID: "the-uid-456", + }, + Status: &volumesnapshotv1.VolumeSnapshotStatus{ + CreationTime: ¤tTime, + ReadyToUse: initialize.Bool(true), + }, + }, + }, + } + latestReadySnapshot := getLatestReadySnapshot(snapshotList) + assert.Equal(t, latestReadySnapshot.ObjectMeta.Name, "second-good-snapshot") + }) +} + +func TestDeleteSnapshots(t *testing.T) { + ctx := context.Background() + cfg, cc := setupKubernetes(t) + discoveryClient, err := discovery.NewDiscoveryClientForConfig(cfg) + assert.NilError(t, err) + + r := &Reconciler{ + Client: cc, + Owner: client.FieldOwner(t.Name()), + DiscoveryClient: discoveryClient, + } + ns := setupNamespace(t, cc) + + cluster := testCluster() + cluster.Namespace = ns.Name + cluster.ObjectMeta.UID = "the-uid-123" + assert.NilError(t, r.Client.Create(ctx, cluster)) + + rhinoCluster := testCluster() + rhinoCluster.Name = "rhino" + rhinoCluster.Namespace = ns.Name + rhinoCluster.ObjectMeta.UID = "the-uid-456" + assert.NilError(t, r.Client.Create(ctx, rhinoCluster)) + + t.Cleanup(func() { + assert.Check(t, r.Client.Delete(ctx, cluster)) + assert.Check(t, r.Client.Delete(ctx, rhinoCluster)) + }) + + t.Run("NoSnapshots", func(t *testing.T) { + snapshotList := &volumesnapshotv1.VolumeSnapshotList{} + err := r.deleteSnapshots(ctx, cluster, snapshotList) + assert.NilError(t, err) + }) + + t.Run("NoSnapshotsControlledByHippo", func(t *testing.T) { + pvcName := initialize.String("dedicated-snapshot-volume") + snapshot1 := &volumesnapshotv1.VolumeSnapshot{ + TypeMeta: metav1.TypeMeta{ + APIVersion: volumesnapshotv1.SchemeGroupVersion.String(), + Kind: "VolumeSnapshot", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "first-snapshot", + Namespace: ns.Name, + }, + Spec: volumesnapshotv1.VolumeSnapshotSpec{ + Source: volumesnapshotv1.VolumeSnapshotSource{ + PersistentVolumeClaimName: pvcName, + }, + }, + } + err := errors.WithStack(r.setControllerReference(rhinoCluster, snapshot1)) + assert.NilError(t, err) + err = r.apply(ctx, snapshot1) + assert.NilError(t, err) + + snapshotList := &volumesnapshotv1.VolumeSnapshotList{ + Items: []volumesnapshotv1.VolumeSnapshot{ + *snapshot1, + }, + } + err = r.deleteSnapshots(ctx, cluster, snapshotList) + assert.NilError(t, err) + existingSnapshots := &volumesnapshotv1.VolumeSnapshotList{} + err = errors.WithStack( + r.Client.List(ctx, existingSnapshots, + client.InNamespace(ns.Namespace), + )) + assert.NilError(t, err) + assert.Equal(t, len(existingSnapshots.Items), 1) + }) + + t.Run("OneSnapshotControlledByHippo", func(t *testing.T) { + pvcName := initialize.String("dedicated-snapshot-volume") + snapshot1 := &volumesnapshotv1.VolumeSnapshot{ + TypeMeta: metav1.TypeMeta{ + APIVersion: volumesnapshotv1.SchemeGroupVersion.String(), + Kind: "VolumeSnapshot", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "first-snapshot", + Namespace: ns.Name, + }, + Spec: volumesnapshotv1.VolumeSnapshotSpec{ + Source: volumesnapshotv1.VolumeSnapshotSource{ + PersistentVolumeClaimName: pvcName, + }, + }, + } + err := errors.WithStack(r.setControllerReference(rhinoCluster, snapshot1)) + assert.NilError(t, err) + err = r.apply(ctx, snapshot1) + assert.NilError(t, err) + + snapshot2 := &volumesnapshotv1.VolumeSnapshot{ + TypeMeta: metav1.TypeMeta{ + APIVersion: volumesnapshotv1.SchemeGroupVersion.String(), + Kind: "VolumeSnapshot", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "second-snapshot", + Namespace: ns.Name, + }, + Spec: volumesnapshotv1.VolumeSnapshotSpec{ + Source: volumesnapshotv1.VolumeSnapshotSource{ + PersistentVolumeClaimName: pvcName, + }, + }, + } + err = errors.WithStack(r.setControllerReference(cluster, snapshot2)) + assert.NilError(t, err) + err = r.apply(ctx, snapshot2) + assert.NilError(t, err) + + snapshotList := &volumesnapshotv1.VolumeSnapshotList{ + Items: []volumesnapshotv1.VolumeSnapshot{ + *snapshot1, *snapshot2, + }, + } + err = r.deleteSnapshots(ctx, cluster, snapshotList) + assert.NilError(t, err) + existingSnapshots := &volumesnapshotv1.VolumeSnapshotList{} + err = errors.WithStack( + r.Client.List(ctx, existingSnapshots, + client.InNamespace(ns.Namespace), + )) + assert.NilError(t, err) + assert.Equal(t, len(existingSnapshots.Items), 1) + assert.Equal(t, existingSnapshots.Items[0].Name, "first-snapshot") + }) +} + +func TestClusterUsingTablespaces(t *testing.T) { + ctx := context.Background() + cluster := testCluster() + + t.Run("NoVolumesFeatureEnabled", func(t *testing.T) { + // Enable Tablespaces feature gate + gate := feature.NewGate() + assert.NilError(t, gate.SetFromMap(map[string]bool{ + feature.TablespaceVolumes: true, + })) + ctx := feature.NewContext(ctx, gate) + + assert.Assert(t, !clusterUsingTablespaces(ctx, cluster)) + }) + + t.Run("VolumesInPlaceFeatureDisabled", func(t *testing.T) { + cluster.Spec.InstanceSets[0].TablespaceVolumes = []v1beta1.TablespaceVolume{{ + Name: "volume-1", + }} + + assert.Assert(t, !clusterUsingTablespaces(ctx, cluster)) + }) + + t.Run("VolumesInPlaceAndFeatureEnabled", func(t *testing.T) { + // Enable Tablespaces feature gate + gate := feature.NewGate() + assert.NilError(t, gate.SetFromMap(map[string]bool{ + feature.TablespaceVolumes: true, + })) + ctx := feature.NewContext(ctx, gate) + + assert.Assert(t, clusterUsingTablespaces(ctx, cluster)) + }) +} diff --git a/internal/controller/postgrescluster/suite_test.go b/internal/controller/postgrescluster/suite_test.go index d0589e26bf..2a0e3d76ec 100644 --- a/internal/controller/postgrescluster/suite_test.go +++ b/internal/controller/postgrescluster/suite_test.go @@ -1,84 +1,84 @@ -// +build envtest - -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster import ( "context" + "os" "path/filepath" + "strings" "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/kubernetes/scheme" + "k8s.io/apimachinery/pkg/util/version" + "k8s.io/client-go/discovery" + + // Google Kubernetes Engine / Google Cloud Platform authentication provider + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" - "sigs.k8s.io/controller-runtime/pkg/envtest/printer" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/manager" - // Google Kubernetes Engine / Google Cloud Platform authentication provider - _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" - + "github.com/crunchydata/postgres-operator/internal/controller/runtime" "github.com/crunchydata/postgres-operator/internal/logging" - "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) var suite struct { Client client.Client Config *rest.Config - Scheme *runtime.Scheme - Environment *envtest.Environment - Manager manager.Manager + Environment *envtest.Environment + ServerVersion *version.Version + + Manager manager.Manager } func TestAPIs(t *testing.T) { RegisterFailHandler(Fail) - RunSpecsWithDefaultAndCustomReporters(t, - "Controller Suite", - []Reporter{printer.NewlineReporter{}}) + RunSpecs(t, "Controller Suite") } var _ = BeforeSuite(func() { - logging.SetLogFunc(1, logging.Logrus(GinkgoWriter, "test", 1)) + if os.Getenv("KUBEBUILDER_ASSETS") == "" && !strings.EqualFold(os.Getenv("USE_EXISTING_CLUSTER"), "true") { + Skip("skipping") + } + + logging.SetLogSink(logging.Logrus(GinkgoWriter, "test", 1, 1)) log.SetLogger(logging.FromContext(context.Background())) By("bootstrapping test environment") suite.Environment = &envtest.Environment{ - CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "config", "crd", "bases")}, + CRDDirectoryPaths: []string{ + filepath.Join("..", "..", "..", "config", "crd", "bases"), + filepath.Join("..", "..", "..", "hack", "tools", "external-snapshotter", "client", "config", "crd"), + }, } - suite.Scheme = runtime.NewScheme() - Expect(scheme.AddToScheme(suite.Scheme)).To(Succeed()) - Expect(v1beta1.AddToScheme(suite.Scheme)).To(Succeed()) - _, err := suite.Environment.Start() Expect(err).ToNot(HaveOccurred()) + DeferCleanup(suite.Environment.Stop) + suite.Config = suite.Environment.Config - suite.Client, err = client.New(suite.Config, client.Options{Scheme: suite.Scheme}) + suite.Client, err = client.New(suite.Config, client.Options{Scheme: runtime.Scheme}) Expect(err).ToNot(HaveOccurred()) -}, 60) + + dc, err := discovery.NewDiscoveryClientForConfig(suite.Config) + Expect(err).ToNot(HaveOccurred()) + + server, err := dc.ServerVersion() + Expect(err).ToNot(HaveOccurred()) + + suite.ServerVersion, err = version.ParseGeneric(server.GitVersion) + Expect(err).ToNot(HaveOccurred()) +}) var _ = AfterSuite(func() { - By("tearing down the test environment") - Expect(suite.Environment.Stop()).To(Succeed()) + }) diff --git a/internal/controller/postgrescluster/topology.go b/internal/controller/postgrescluster/topology.go new file mode 100644 index 0000000000..58778be907 --- /dev/null +++ b/internal/controller/postgrescluster/topology.go @@ -0,0 +1,27 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package postgrescluster + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// defaultTopologySpreadConstraints returns constraints that prefer to schedule +// pods on different nodes and in different zones. +func defaultTopologySpreadConstraints(selector metav1.LabelSelector) []corev1.TopologySpreadConstraint { + return []corev1.TopologySpreadConstraint{ + { + TopologyKey: corev1.LabelHostname, + WhenUnsatisfiable: corev1.ScheduleAnyway, + LabelSelector: &selector, MaxSkew: 1, + }, + { + TopologyKey: corev1.LabelTopologyZone, + WhenUnsatisfiable: corev1.ScheduleAnyway, + LabelSelector: &selector, MaxSkew: 1, + }, + } +} diff --git a/internal/controller/postgrescluster/topology_test.go b/internal/controller/postgrescluster/topology_test.go new file mode 100644 index 0000000000..40c8c0dd7f --- /dev/null +++ b/internal/controller/postgrescluster/topology_test.go @@ -0,0 +1,51 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package postgrescluster + +import ( + "testing" + + "gotest.tools/v3/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/crunchydata/postgres-operator/internal/testing/cmp" +) + +func TestDefaultTopologySpreadConstraints(t *testing.T) { + constraints := defaultTopologySpreadConstraints(metav1.LabelSelector{ + MatchLabels: map[string]string{"basic": "stuff"}, + MatchExpressions: []metav1.LabelSelectorRequirement{ + {Key: "k1", Operator: "op", Values: []string{"v1", "v2"}}, + }, + }) + + // Entire selector, hostname, zone, and ScheduleAnyway. + assert.Assert(t, cmp.MarshalMatches(constraints, ` +- labelSelector: + matchExpressions: + - key: k1 + operator: op + values: + - v1 + - v2 + matchLabels: + basic: stuff + maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway +- labelSelector: + matchExpressions: + - key: k1 + operator: op + values: + - v1 + - v2 + matchLabels: + basic: stuff + maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway + `)) +} diff --git a/internal/controller/postgrescluster/util.go b/internal/controller/postgrescluster/util.go index 40582951d5..25120ab574 100644 --- a/internal/controller/postgrescluster/util.go +++ b/internal/controller/postgrescluster/util.go @@ -1,19 +1,8 @@ -package postgrescluster - -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +package postgrescluster import ( "fmt" @@ -21,10 +10,9 @@ import ( "io" batchv1 "k8s.io/api/batch/v1" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/util/rand" - "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/naming" @@ -33,27 +21,131 @@ import ( var tmpDirSizeLimit = resource.MustParse("16Mi") const ( + // devSHMDir is the directory used for allocating shared memory segments, + // which are needed by Postgres + devSHMDir = "/dev/shm" // nssWrapperDir is the directory in a container for the nss_wrapper passwd and group files nssWrapperDir = "/tmp/nss_wrapper/%s/%s" - // uidCommand is the command for setting up nss_wrapper in the container - nssWrapperCmd = `NSS_WRAPPER_SUBDIR=postgres CRUNCHY_NSS_USERNAME=postgres ` + - `CRUNCHY_NSS_USER_DESC="postgres" /opt/crunchy/bin/nss_wrapper.sh` + // postgresNSSWrapperPrefix sets the required variables when running the NSS + // wrapper script for the 'postgres' user + postgresNSSWrapperPrefix = `export NSS_WRAPPER_SUBDIR=postgres CRUNCHY_NSS_USERNAME=postgres ` + + `CRUNCHY_NSS_USER_DESC="postgres" ` + // pgAdminNSSWrapperPrefix sets the required variables when running the NSS + // wrapper script for the 'pgadmin' user + pgAdminNSSWrapperPrefix = `export NSS_WRAPPER_SUBDIR=pgadmin CRUNCHY_NSS_USERNAME=pgadmin ` + + `CRUNCHY_NSS_USER_DESC="pgadmin" ` + // nssWrapperScript sets up an nss_wrapper environment in accordance with OpenShift + // guidance for supporting arbitrary user ID's is the script for the configuration + // and startup of the pgAdmin service. + // It is based on the nss_wrapper.sh script from the Crunchy Containers Project. + // - https://github.com/CrunchyData/crunchy-containers/blob/master/bin/common/nss_wrapper.sh + nssWrapperScript = ` +# Define nss_wrapper directory and passwd & group files that will be utilized by nss_wrapper. The +# nss_wrapper_env.sh script (which also sets these vars) isn't sourced here since the nss_wrapper +# has not yet been setup, and we therefore don't yet want the nss_wrapper vars in the environment. +mkdir -p /tmp/nss_wrapper +chmod g+rwx /tmp/nss_wrapper + +NSS_WRAPPER_DIR="/tmp/nss_wrapper/${NSS_WRAPPER_SUBDIR}" +NSS_WRAPPER_PASSWD="${NSS_WRAPPER_DIR}/passwd" +NSS_WRAPPER_GROUP="${NSS_WRAPPER_DIR}/group" + +# create the nss_wrapper directory +mkdir -p "${NSS_WRAPPER_DIR}" + +# grab the current user ID and group ID +USER_ID=$(id -u) +export USER_ID +GROUP_ID=$(id -g) +export GROUP_ID + +# get copies of the passwd and group files +[[ -f "${NSS_WRAPPER_PASSWD}" ]] || cp "/etc/passwd" "${NSS_WRAPPER_PASSWD}" +[[ -f "${NSS_WRAPPER_GROUP}" ]] || cp "/etc/group" "${NSS_WRAPPER_GROUP}" + +# if the username is missing from the passwd file, then add it +if [[ ! $(cat "${NSS_WRAPPER_PASSWD}") =~ ${CRUNCHY_NSS_USERNAME}:x:${USER_ID} ]]; then + echo "nss_wrapper: adding user" + passwd_tmp="${NSS_WRAPPER_DIR}/passwd_tmp" + cp "${NSS_WRAPPER_PASSWD}" "${passwd_tmp}" + sed -i "/${CRUNCHY_NSS_USERNAME}:x:/d" "${passwd_tmp}" + # needed for OCP 4.x because crio updates /etc/passwd with an entry for USER_ID + sed -i "/${USER_ID}:x:/d" "${passwd_tmp}" + printf '${CRUNCHY_NSS_USERNAME}:x:${USER_ID}:${GROUP_ID}:${CRUNCHY_NSS_USER_DESC}:${HOME}:/bin/bash\n' >> "${passwd_tmp}" + envsubst < "${passwd_tmp}" > "${NSS_WRAPPER_PASSWD}" + rm "${passwd_tmp}" +else + echo "nss_wrapper: user exists" +fi + +# if the username (which will be the same as the group name) is missing from group file, then add it +if [[ ! $(cat "${NSS_WRAPPER_GROUP}") =~ ${CRUNCHY_NSS_USERNAME}:x:${USER_ID} ]]; then + echo "nss_wrapper: adding group" + group_tmp="${NSS_WRAPPER_DIR}/group_tmp" + cp "${NSS_WRAPPER_GROUP}" "${group_tmp}" + sed -i "/${CRUNCHY_NSS_USERNAME}:x:/d" "${group_tmp}" + printf '${CRUNCHY_NSS_USERNAME}:x:${USER_ID}:${CRUNCHY_NSS_USERNAME}\n' >> "${group_tmp}" + envsubst < "${group_tmp}" > "${NSS_WRAPPER_GROUP}" + rm "${group_tmp}" +else + echo "nss_wrapper: group exists" +fi + +# export the nss_wrapper env vars +# define nss_wrapper directory and passwd & group files that will be utilized by nss_wrapper +NSS_WRAPPER_DIR="/tmp/nss_wrapper/${NSS_WRAPPER_SUBDIR}" +NSS_WRAPPER_PASSWD="${NSS_WRAPPER_DIR}/passwd" +NSS_WRAPPER_GROUP="${NSS_WRAPPER_DIR}/group" + +export LD_PRELOAD=/usr/lib64/libnss_wrapper.so +export NSS_WRAPPER_PASSWD="${NSS_WRAPPER_PASSWD}" +export NSS_WRAPPER_GROUP="${NSS_WRAPPER_GROUP}" + +echo "nss_wrapper: environment configured" +` ) +// addDevSHM adds the shared memory "directory" to a Pod, which is needed by +// Postgres to allocate shared memory segments. This is a special directory +// called "/dev/shm", and is mounted as an emptyDir over a "memory" medium. This +// is mounted only to the database container. +func addDevSHM(template *corev1.PodTemplateSpec) { + + // do not set a size limit on shared memory. This will be handled by the OS + // layer + template.Spec.Volumes = append(template.Spec.Volumes, corev1.Volume{ + Name: "dshm", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{ + Medium: corev1.StorageMediumMemory, + }, + }, + }) + + // only give the database container access to shared memory + for i := range template.Spec.Containers { + if template.Spec.Containers[i].Name == naming.ContainerDatabase { + template.Spec.Containers[i].VolumeMounts = append(template.Spec.Containers[i].VolumeMounts, + corev1.VolumeMount{ + Name: "dshm", + MountPath: devSHMDir, + }) + } + } +} + // addTMPEmptyDir adds a "tmp" EmptyDir volume to the provided Pod template, while then also adding a // volume mount at /tmp for all containers defined within the Pod template // The '/tmp' directory is currently utilized for the following: -// * A temporary location for instance PGDATA volumes until real volumes are implemented -// * The location of the SSHD pid file -// * As the pgBackRest lock directory (this is the default lock location for pgBackRest) -// * The location where the replication client certificates can be loaded with the proper -// permissions set -func addTMPEmptyDir(template *v1.PodTemplateSpec) { - - template.Spec.Volumes = append(template.Spec.Volumes, v1.Volume{ +// - As the pgBackRest lock directory (this is the default lock location for pgBackRest) +// - The location where the replication client certificates can be loaded with the proper +// permissions set +func addTMPEmptyDir(template *corev1.PodTemplateSpec) { + + template.Spec.Volumes = append(template.Spec.Volumes, corev1.Volume{ Name: "tmp", - VolumeSource: v1.VolumeSource{ - EmptyDir: &v1.EmptyDirVolumeSource{ + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{ SizeLimit: &tmpDirSizeLimit, }, }, @@ -61,7 +153,7 @@ func addTMPEmptyDir(template *v1.PodTemplateSpec) { for i := range template.Spec.Containers { template.Spec.Containers[i].VolumeMounts = append(template.Spec.Containers[i].VolumeMounts, - v1.VolumeMount{ + corev1.VolumeMount{ Name: "tmp", MountPath: "/tmp", }) @@ -69,7 +161,7 @@ func addTMPEmptyDir(template *v1.PodTemplateSpec) { for i := range template.Spec.InitContainers { template.Spec.InitContainers[i].VolumeMounts = append(template.Spec.InitContainers[i].VolumeMounts, - v1.VolumeMount{ + corev1.VolumeMount{ Name: "tmp", MountPath: "/tmp", }) @@ -80,15 +172,25 @@ func addTMPEmptyDir(template *v1.PodTemplateSpec) { // containers in the Pod template. Additionally, an init container is added to the Pod template // as needed to setup the nss_wrapper. Please note that the nss_wrapper is required for // compatibility with OpenShift: https://access.redhat.com/articles/4859371. -func addNSSWrapper(image string, template *v1.PodTemplateSpec) { +func addNSSWrapper(image string, imagePullPolicy corev1.PullPolicy, template *corev1.PodTemplateSpec) { + nssWrapperCmd := postgresNSSWrapperPrefix + nssWrapperScript for i, c := range template.Spec.Containers { switch c.Name { case naming.ContainerDatabase, naming.PGBackRestRepoContainerName, naming.PGBackRestRestoreContainerName: passwd := fmt.Sprintf(nssWrapperDir, "postgres", "passwd") group := fmt.Sprintf(nssWrapperDir, "postgres", "group") - template.Spec.Containers[i].Env = append(template.Spec.Containers[i].Env, []v1.EnvVar{ + template.Spec.Containers[i].Env = append(template.Spec.Containers[i].Env, []corev1.EnvVar{ + {Name: "LD_PRELOAD", Value: "/usr/lib64/libnss_wrapper.so"}, + {Name: "NSS_WRAPPER_PASSWD", Value: passwd}, + {Name: "NSS_WRAPPER_GROUP", Value: group}, + }...) + case naming.ContainerPGAdmin: + nssWrapperCmd = pgAdminNSSWrapperPrefix + nssWrapperScript + passwd := fmt.Sprintf(nssWrapperDir, "pgadmin", "passwd") + group := fmt.Sprintf(nssWrapperDir, "pgadmin", "group") + template.Spec.Containers[i].Env = append(template.Spec.Containers[i].Env, []corev1.EnvVar{ {Name: "LD_PRELOAD", Value: "/usr/lib64/libnss_wrapper.so"}, {Name: "NSS_WRAPPER_PASSWD", Value: passwd}, {Name: "NSS_WRAPPER_GROUP", Value: group}, @@ -96,13 +198,57 @@ func addNSSWrapper(image string, template *v1.PodTemplateSpec) { } } - template.Spec.InitContainers = append(template.Spec.InitContainers, - v1.Container{ - Command: []string{"bash", "-c", nssWrapperCmd}, - Image: image, - Name: naming.ContainerNSSWrapperInit, - SecurityContext: initialize.RestrictedSecurityContext(), - }) + container := corev1.Container{ + Command: []string{"bash", "-c", nssWrapperCmd}, + Image: image, + ImagePullPolicy: imagePullPolicy, + Name: naming.ContainerNSSWrapperInit, + SecurityContext: initialize.RestrictedSecurityContext(), + } + + // Here we set the NSS wrapper container resources to the 'database', 'pgadmin' + // or 'pgbackrest' container configuration, as appropriate. + + // First, we'll set the NSS wrapper container configuration for any pgAdmin + // containers because pgAdmin Pods won't contain any other containers + containsPGAdmin := false + for i, c := range template.Spec.Containers { + if c.Name == naming.ContainerPGAdmin { + containsPGAdmin = true + container.Resources = template.Spec.Containers[i].Resources + break + } + } + + // If this was a pgAdmin Pod, we don't need to check anything else. + if !containsPGAdmin { + // Because the instance Pod has both a 'database' and 'pgbackrest' container, + // we'll first check for the 'database' container and use those resource + // settings for any instance pods. + containsDatabase := false + for i, c := range template.Spec.Containers { + if c.Name == naming.ContainerDatabase { + containsDatabase = true + container.Resources = template.Spec.Containers[i].Resources + break + } + if c.Name == naming.PGBackRestRestoreContainerName { + container.Resources = template.Spec.Containers[i].Resources + break + } + } + // If 'database' is not found, we need to use the 'pgbackrest' resource + // configuration settings instead + if !containsDatabase { + for i, c := range template.Spec.Containers { + if c.Name == naming.PGBackRestRepoContainerName { + container.Resources = template.Spec.Containers[i].Resources + break + } + } + } + } + template.Spec.InitContainers = append(template.Spec.InitContainers, container) } // jobFailed returns "true" if the Job provided has failed. Otherwise it returns "false". @@ -110,7 +256,7 @@ func jobFailed(job *batchv1.Job) bool { conditions := job.Status.Conditions for i := range conditions { if conditions[i].Type == batchv1.JobFailed { - return (conditions[i].Status == v1.ConditionTrue) + return (conditions[i].Status == corev1.ConditionTrue) } } return false @@ -122,7 +268,7 @@ func jobCompleted(job *batchv1.Job) bool { conditions := job.Status.Conditions for i := range conditions { if conditions[i].Type == batchv1.JobComplete { - return (conditions[i].Status == v1.ConditionTrue) + return (conditions[i].Status == corev1.ConditionTrue) } } return false @@ -139,22 +285,3 @@ func safeHash32(content func(w io.Writer) error) (string, error) { } return rand.SafeEncodeString(fmt.Sprint(hash.Sum32())), nil } - -// updateReconcileResult creates a new Result based on the new and existing results provided to it. -// This includes setting "Requeue" to true in the Result if set to true in the new Result but not -// in the existing Result, while also updating RequeueAfter if the RequeueAfter value for the new -// result is less the the RequeueAfter value for the existing Result. -func updateReconcileResult(currResult, newResult reconcile.Result) reconcile.Result { - - if newResult.Requeue { - currResult.Requeue = true - } - - if newResult.RequeueAfter != 0 { - if currResult.RequeueAfter == 0 || newResult.RequeueAfter < currResult.RequeueAfter { - currResult.RequeueAfter = newResult.RequeueAfter - } - } - - return currResult -} diff --git a/internal/controller/postgrescluster/util_test.go b/internal/controller/postgrescluster/util_test.go index e702b06452..51a32f1e85 100644 --- a/internal/controller/postgrescluster/util_test.go +++ b/internal/controller/postgrescluster/util_test.go @@ -1,33 +1,21 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster import ( "errors" - "fmt" "io" "testing" - "time" "gotest.tools/v3/assert" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" - "sigs.k8s.io/controller-runtime/pkg/reconcile" + "k8s.io/apimachinery/pkg/api/resource" "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/internal/testing/cmp" ) func TestSafeHash32(t *testing.T) { @@ -51,158 +39,83 @@ func TestSafeHash32(t *testing.T) { assert.Equal(t, same, stuff, "expected deterministic hash") } -func TestUpdateReconcileResult(t *testing.T) { +func TestAddDevSHM(t *testing.T) { testCases := []struct { - currResult reconcile.Result - newResult reconcile.Result - requeueExpected bool - expectedRequeueAfter time.Duration + tcName string + podTemplate *corev1.PodTemplateSpec + expected bool }{{ - currResult: reconcile.Result{}, - newResult: reconcile.Result{}, - requeueExpected: false, - expectedRequeueAfter: 0, - }, { - currResult: reconcile.Result{Requeue: false}, - newResult: reconcile.Result{Requeue: true}, - requeueExpected: true, - expectedRequeueAfter: 0, - }, { - currResult: reconcile.Result{Requeue: true}, - newResult: reconcile.Result{Requeue: false}, - requeueExpected: true, - expectedRequeueAfter: 0, - }, { - currResult: reconcile.Result{Requeue: true}, - newResult: reconcile.Result{Requeue: true}, - requeueExpected: true, - expectedRequeueAfter: 0, - }, { - currResult: reconcile.Result{Requeue: false}, - newResult: reconcile.Result{Requeue: false}, - requeueExpected: false, - expectedRequeueAfter: 0, - }, { - currResult: reconcile.Result{}, - newResult: reconcile.Result{RequeueAfter: 5 * time.Second}, - requeueExpected: false, - expectedRequeueAfter: 5 * time.Second, - }, { - currResult: reconcile.Result{RequeueAfter: 5 * time.Second}, - newResult: reconcile.Result{}, - requeueExpected: false, - expectedRequeueAfter: 5 * time.Second, - }, { - currResult: reconcile.Result{RequeueAfter: 1 * time.Second}, - newResult: reconcile.Result{RequeueAfter: 5 * time.Second}, - requeueExpected: false, - expectedRequeueAfter: 1 * time.Second, - }, { - currResult: reconcile.Result{RequeueAfter: 5 * time.Second}, - newResult: reconcile.Result{RequeueAfter: 1 * time.Second}, - requeueExpected: false, - expectedRequeueAfter: 1 * time.Second, - }, { - currResult: reconcile.Result{RequeueAfter: 5 * time.Second}, - newResult: reconcile.Result{RequeueAfter: 5 * time.Second}, - requeueExpected: false, - expectedRequeueAfter: 5 * time.Second, - }, { - currResult: reconcile.Result{ - Requeue: true, RequeueAfter: 5 * time.Second, - }, - newResult: reconcile.Result{ - Requeue: true, RequeueAfter: 1 * time.Second, - }, - requeueExpected: true, - expectedRequeueAfter: 1 * time.Second, - }, { - currResult: reconcile.Result{ - Requeue: true, RequeueAfter: 1 * time.Second, - }, - newResult: reconcile.Result{ - Requeue: true, RequeueAfter: 5 * time.Second, - }, - requeueExpected: true, - expectedRequeueAfter: 1 * time.Second, - }, { - currResult: reconcile.Result{ - Requeue: false, RequeueAfter: 1 * time.Second, - }, - newResult: reconcile.Result{ - Requeue: true, RequeueAfter: 5 * time.Second, - }, - requeueExpected: true, - expectedRequeueAfter: 1 * time.Second, - }, { - currResult: reconcile.Result{ - Requeue: true, RequeueAfter: 1 * time.Second, - }, - newResult: reconcile.Result{ - Requeue: false, RequeueAfter: 5 * time.Second, - }, - requeueExpected: true, - expectedRequeueAfter: 1 * time.Second, - }, { - currResult: reconcile.Result{ - Requeue: false, RequeueAfter: 5 * time.Second, - }, - newResult: reconcile.Result{ - Requeue: false, RequeueAfter: 1 * time.Second, - }, - requeueExpected: false, - expectedRequeueAfter: 1 * time.Second, + tcName: "database and pgbackrest containers", + podTemplate: &corev1.PodTemplateSpec{Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "database"}, {Name: "pgbackrest"}, {Name: "dontmodify"}, + }}}, + expected: true, }, { - currResult: reconcile.Result{ - Requeue: false, RequeueAfter: 1 * time.Second, - }, - newResult: reconcile.Result{ - Requeue: false, RequeueAfter: 5 * time.Second, - }, - requeueExpected: false, - expectedRequeueAfter: 1 * time.Second, + tcName: "database container only", + podTemplate: &corev1.PodTemplateSpec{Spec: corev1.PodSpec{ + Containers: []corev1.Container{{Name: "database"}, {Name: "dontmodify"}}}}, + expected: true, }, { - currResult: reconcile.Result{}, - newResult: reconcile.Result{ - Requeue: true, RequeueAfter: 5 * time.Second, - }, - requeueExpected: true, - expectedRequeueAfter: 5 * time.Second, + tcName: "pgbackest container only", + podTemplate: &corev1.PodTemplateSpec{Spec: corev1.PodSpec{ + Containers: []corev1.Container{{Name: "dontmodify"}, {Name: "pgbackrest"}}}}, }, { - currResult: reconcile.Result{ - Requeue: true, RequeueAfter: 5 * time.Second, - }, - newResult: reconcile.Result{}, - requeueExpected: true, - expectedRequeueAfter: 5 * time.Second, + tcName: "other containers", + podTemplate: &corev1.PodTemplateSpec{Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "dontmodify1"}, {Name: "dontmodify2"}}}}, }} for _, tc := range testCases { - t.Run(fmt.Sprintf("curr: %v, new: %v", tc.currResult, tc.newResult), func(t *testing.T) { - result := updateReconcileResult(tc.currResult, tc.newResult) - assert.Assert(t, result.Requeue == tc.requeueExpected) - assert.Assert(t, result.RequeueAfter == tc.expectedRequeueAfter) + t.Run(tc.tcName, func(t *testing.T) { + + template := tc.podTemplate + + addDevSHM(template) + + found := false + + // check there is an empty dir mounted under the dshm volume + for _, v := range template.Spec.Volumes { + if v.Name == "dshm" && v.VolumeSource.EmptyDir != nil && v.VolumeSource.EmptyDir.Medium == corev1.StorageMediumMemory { + found = true + break + } + } + assert.Assert(t, found) + + // check that the database container contains a mount to the shared volume + // directory + found = false + + loop: + for _, c := range template.Spec.Containers { + if c.Name == naming.ContainerDatabase { + for _, vm := range c.VolumeMounts { + if vm.Name == "dshm" && vm.MountPath == "/dev/shm" { + found = true + break loop + } + } + } + } + + assert.Equal(t, tc.expected, found) }) } } func TestAddNSSWrapper(t *testing.T) { - databaseBackrestContainerCount := func(template *corev1.PodTemplateSpec) int { - var count int - for _, c := range template.Spec.Containers { - switch c.Name { - case naming.ContainerDatabase: - count++ - case naming.PGBackRestRepoContainerName: - count++ - } - } - return count - } - image := "test-image" + imagePullPolicy := corev1.PullAlways + + expectedResources := corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("200m"), + }} expectedEnv := []corev1.EnvVar{ {Name: "LD_PRELOAD", Value: "/usr/lib64/libnss_wrapper.so"}, @@ -210,70 +123,151 @@ func TestAddNSSWrapper(t *testing.T) { {Name: "NSS_WRAPPER_GROUP", Value: "/tmp/nss_wrapper/postgres/group"}, } - expectedCmd := `NSS_WRAPPER_SUBDIR=postgres CRUNCHY_NSS_USERNAME=postgres ` + - `CRUNCHY_NSS_USER_DESC="postgres" /opt/crunchy/bin/nss_wrapper.sh` + expectedPGAdminEnv := []corev1.EnvVar{ + {Name: "LD_PRELOAD", Value: "/usr/lib64/libnss_wrapper.so"}, + {Name: "NSS_WRAPPER_PASSWD", Value: "/tmp/nss_wrapper/pgadmin/passwd"}, + {Name: "NSS_WRAPPER_GROUP", Value: "/tmp/nss_wrapper/pgadmin/group"}, + } testCases := []struct { - tcName string - podTemplate *corev1.PodTemplateSpec + tcName string + podTemplate *corev1.PodTemplateSpec + pgadmin bool + resourceProvider string + expectedUpdatedContainerCount int }{{ - tcName: "database and pgbackrest containers", + tcName: "database container with pgbackrest sidecar", podTemplate: &corev1.PodTemplateSpec{Spec: corev1.PodSpec{ Containers: []corev1.Container{ - {Name: "database"}, {Name: "pgbackrest"}, {Name: "dontmodify"}, + {Name: naming.ContainerDatabase, Resources: expectedResources}, + {Name: naming.PGBackRestRepoContainerName, Resources: expectedResources}, + {Name: "dontmodify"}, }}}, + expectedUpdatedContainerCount: 2, }, { tcName: "database container only", podTemplate: &corev1.PodTemplateSpec{Spec: corev1.PodSpec{ - Containers: []corev1.Container{{Name: "database"}, {Name: "dontmodify"}}}}, + Containers: []corev1.Container{ + {Name: naming.ContainerDatabase, Resources: expectedResources}, + {Name: "dontmodify"}}}}, + expectedUpdatedContainerCount: 1, }, { tcName: "pgbackest container only", podTemplate: &corev1.PodTemplateSpec{Spec: corev1.PodSpec{ - Containers: []corev1.Container{{Name: "dontmodify"}, {Name: "pgbackrest"}}}}, + Containers: []corev1.Container{ + {Name: naming.PGBackRestRepoContainerName, Resources: expectedResources}, + {Name: "dontmodify"}, + }}}, + expectedUpdatedContainerCount: 1, }, { - tcName: "other containers", + tcName: "pgadmin container only", + podTemplate: &corev1.PodTemplateSpec{Spec: corev1.PodSpec{ + Containers: []corev1.Container{{Name: "dontmodify"}, {Name: "pgadmin"}}}}, + pgadmin: true, + expectedUpdatedContainerCount: 1, + }, { + tcName: "restore container only", podTemplate: &corev1.PodTemplateSpec{Spec: corev1.PodSpec{ Containers: []corev1.Container{ - {Name: "dontmodify1"}, {Name: "dontmodify2"}}}}, + {Name: naming.PGBackRestRestoreContainerName, Resources: expectedResources}, + {Name: "dontmodify"}, + }}}, + expectedUpdatedContainerCount: 1, + }, { + tcName: "custom database container resources", + podTemplate: &corev1.PodTemplateSpec{Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "database", + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("200m"), + }}}}}}, + resourceProvider: "database", + expectedUpdatedContainerCount: 1, + }, { + tcName: "custom pgbackrest container resources", + podTemplate: &corev1.PodTemplateSpec{Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "pgbackrest", + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("300m"), + }}}}}}, + resourceProvider: "pgbackrest", + expectedUpdatedContainerCount: 1, + }, { + tcName: "custom pgadmin container resources", + podTemplate: &corev1.PodTemplateSpec{Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "pgadmin", + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("400m"), + }}}}}}, + pgadmin: true, + resourceProvider: "pgadmin", + expectedUpdatedContainerCount: 1, }} for _, tc := range testCases { t.Run(tc.tcName, func(t *testing.T) { template := tc.podTemplate - - beforeAddNSS := template.Spec.Containers - - addNSSWrapper(image, template) - - // verify proper nss_wrapper env vars - var expectedContainerUpdateCount int - for i, c := range template.Spec.Containers { - if c.Name == "database" || c.Name == "pgbackrest" { - assert.DeepEqual(t, expectedEnv, c.Env) - expectedContainerUpdateCount++ - } else { - assert.DeepEqual(t, beforeAddNSS[i], c) + beforeAddNSS := template.DeepCopy().Spec.Containers + + addNSSWrapper(image, imagePullPolicy, template) + + t.Run("container-updated", func(t *testing.T) { + // Each container that requires the nss_wrapper envs should be updated + var actualUpdatedContainerCount int + for i, c := range template.Spec.Containers { + if c.Name == naming.ContainerDatabase || + c.Name == naming.PGBackRestRepoContainerName || + c.Name == naming.PGBackRestRestoreContainerName { + assert.DeepEqual(t, expectedEnv, c.Env) + actualUpdatedContainerCount++ + } else if c.Name == "pgadmin" { + assert.DeepEqual(t, expectedPGAdminEnv, c.Env) + actualUpdatedContainerCount++ + } else { + assert.DeepEqual(t, beforeAddNSS[i], c) + } } - } - - // verify database and/or pgbackrest containers updated - assert.Equal(t, expectedContainerUpdateCount, - databaseBackrestContainerCount(template)) - - var foundInitContainer bool - // verify init container command, image & name - for _, c := range template.Spec.InitContainers { - if c.Name == naming.ContainerNSSWrapperInit { - assert.Equal(t, expectedCmd, c.Command[2]) // ignore "bash -c" - assert.Assert(t, c.Image == image) - assert.Assert(t, c.SecurityContext != &corev1.SecurityContext{}) - foundInitContainer = true - break + // verify database and/or pgbackrest containers updated + assert.Equal(t, actualUpdatedContainerCount, + tc.expectedUpdatedContainerCount) + }) + + t.Run("init-container-added", func(t *testing.T) { + var foundInitContainer bool + // verify init container command, image & name + for _, ic := range template.Spec.InitContainers { + if ic.Name == naming.ContainerNSSWrapperInit { + if tc.pgadmin { + assert.Equal(t, pgAdminNSSWrapperPrefix+nssWrapperScript, ic.Command[2]) // ignore "bash -c" + } else { + assert.Equal(t, postgresNSSWrapperPrefix+nssWrapperScript, ic.Command[2]) // ignore "bash -c" + } + assert.Assert(t, ic.Image == image) + assert.Assert(t, ic.ImagePullPolicy == imagePullPolicy) + assert.Assert(t, !cmp.DeepEqual(ic.SecurityContext, + &corev1.SecurityContext{})().Success()) + + if tc.resourceProvider != "" { + for _, c := range template.Spec.Containers { + if c.Name == tc.resourceProvider { + assert.DeepEqual(t, ic.Resources.Requests, + c.Resources.Requests) + } + } + } + foundInitContainer = true + break + } } - } - // verify init container is present - assert.Assert(t, foundInitContainer) + // verify init container is present + assert.Assert(t, foundInitContainer) + }) }) } } diff --git a/internal/controller/postgrescluster/volumes.go b/internal/controller/postgrescluster/volumes.go index 40cbbb3c08..e40710d4ff 100644 --- a/internal/controller/postgrescluster/volumes.go +++ b/internal/controller/postgrescluster/volumes.go @@ -1,24 +1,16 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster import ( "context" + "fmt" + "strconv" "github.com/pkg/errors" + batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" @@ -26,12 +18,17 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/validation/field" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "github.com/crunchydata/postgres-operator/internal/config" + "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/internal/pgbackrest" + "github.com/crunchydata/postgres-operator/internal/postgres" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) -// +kubebuilder:rbac:groups="",resources=persistentvolumeclaims,verbs=list +// +kubebuilder:rbac:groups="",resources="persistentvolumeclaims",verbs={list} // observePersistentVolumeClaims reads all PVCs for cluster from the Kubernetes // API and sets the PersistentVolumeResizing condition as appropriate. @@ -67,7 +64,26 @@ func (r *Reconciler) observePersistentVolumeClaims( for _, condition := range pvc.Status.Conditions { switch condition.Type { case + // When the resize controller sees `spec.resources != status.capacity`, + // it sets a "Resizing" condition and invokes the storage provider. + // NOTE: The oldest KEP talks about "ResizeStarted", but that + // changed to "Resizing" during the merge to Kubernetes v1.8. + // - https://git.k8s.io/enhancements/keps/sig-storage/284-enable-volume-expansion + // - https://pr.k8s.io/49727#discussion_r136678508 corev1.PersistentVolumeClaimResizing, + + // Kubernetes v1.10 added the "FileSystemResizePending" condition + // to indicate when the storage provider has finished its work. + // When a CSI implementation indicates that it performed the + // *entire* resize, this condition does not appear. + // - https://git.k8s.io/enhancements/keps/sig-storage/556-csi-volume-resizing + // - https://pr.k8s.io/58415 + // + // Kubernetes v1.15 ("ExpandInUsePersistentVolumes" feature gate) + // finishes the resize of mounted and writable PVCs that have + // the "FileSystemResizePending" condition. When the work is done, + // the condition is removed and `spec.resources == status.capacity`. + // - https://git.k8s.io/enhancements/keps/sig-storage/531-online-pv-resizing corev1.PersistentVolumeClaimFileSystemResizePending: // Initialize from the first condition. @@ -103,6 +119,15 @@ func (r *Reconciler) observePersistentVolumeClaims( resizing.LastTransitionTime = minNotZero( resizing.LastTransitionTime, condition.LastTransitionTime) } + + case + // The "ModifyingVolume" and "ModifyVolumeError" conditions occur + // when the attribute class of a PVC is changing. These attributes + // do not affect the size of a volume, so there's nothing to do. + // See the "VolumeAttributesClass" feature gate. + // - https://git.k8s.io/enhancements/keps/sig-storage/3751-volume-attributes-class + corev1.PersistentVolumeClaimVolumeModifyingVolume, + corev1.PersistentVolumeClaimVolumeModifyVolumeError: } } } @@ -110,18 +135,627 @@ func (r *Reconciler) observePersistentVolumeClaims( if resizing.Status != "" { meta.SetStatusCondition(&cluster.Status.Conditions, resizing) } else { - // Avoid a panic! Fixed in Kubernetes v1.21.0 and controller-runtime v0.9.0-alpha.0. - // - https://issue.k8s.io/99714 - if len(cluster.Status.Conditions) > 0 { - // NOTE(cbandy): This clears the condition, but it may immediately - // return with a new LastTransitionTime when a PVC spec is invalid. - meta.RemoveStatusCondition(&cluster.Status.Conditions, resizing.Type) - } + // NOTE(cbandy): This clears the condition, but it may immediately + // return with a new LastTransitionTime when a PVC spec is invalid. + meta.RemoveStatusCondition(&cluster.Status.Conditions, resizing.Type) } return volumes.Items, err } +// configureExistingPVCs configures the defined pgData, pg_wal and pgBackRest +// repo volumes to be used by the PostgresCluster. In the case of existing +// pgData volumes, an appropriate instance set name is defined that will be +// used for the PostgresCluster. Existing pg_wal volumes MUST be defined along +// with existing pgData volumes to ensure consistent naming and proper +// bootstrapping. +func (r *Reconciler) configureExistingPVCs( + ctx context.Context, cluster *v1beta1.PostgresCluster, + volumes []corev1.PersistentVolumeClaim, +) ([]corev1.PersistentVolumeClaim, error) { + + var err error + + if cluster.Spec.DataSource != nil && + cluster.Spec.DataSource.Volumes != nil && + cluster.Spec.DataSource.Volumes.PGDataVolume != nil { + // If the startup instance name isn't set, use the instance set defined at position zero. + if cluster.Status.StartupInstance == "" { + set := &cluster.Spec.InstanceSets[0] + cluster.Status.StartupInstanceSet = set.Name + cluster.Status.StartupInstance = naming.GenerateStartupInstance(cluster, set).Name + } + volumes, err = r.configureExistingPGVolumes(ctx, cluster, volumes, + cluster.Status.StartupInstance) + + // existing WAL volume must be paired with an existing pgData volume + if cluster.Spec.DataSource != nil && + cluster.Spec.DataSource.Volumes != nil && + cluster.Spec.DataSource.Volumes.PGWALVolume != nil && + err == nil { + volumes, err = r.configureExistingPGWALVolume(ctx, cluster, volumes, + cluster.Status.StartupInstance) + } + } + + if cluster.Spec.DataSource != nil && + cluster.Spec.DataSource.Volumes != nil && + cluster.Spec.DataSource.Volumes.PGBackRestVolume != nil && + err == nil { + + volumes, err = r.configureExistingRepoVolumes(ctx, cluster, volumes) + } + return volumes, err +} + +// +kubebuilder:rbac:groups="",resources="persistentvolumeclaims",verbs={create,patch} + +// configureExistingPGVolumes first searches the observed volumes list to see +// if the existing pgData volume defined in the spec is already updated. If not, +// this sets the appropriate labels and ownership for the volume to be used in +// the PostgresCluster. +func (r *Reconciler) configureExistingPGVolumes( + ctx context.Context, + cluster *v1beta1.PostgresCluster, + volumes []corev1.PersistentVolumeClaim, + instanceName string, +) ([]corev1.PersistentVolumeClaim, error) { + + // if the volume is already in the list, move on + for i := range volumes { + if cluster.Spec.DataSource.Volumes.PGDataVolume. + PVCName == volumes[i].Name { + return volumes, nil + } + } + + if len(cluster.Spec.InstanceSets) > 0 { + if volName := cluster.Spec.DataSource.Volumes. + PGDataVolume.PVCName; volName != "" { + volume := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: volName, + Namespace: cluster.Namespace, + }, + Spec: cluster.Spec.InstanceSets[0].DataVolumeClaimSpec, + } + + volume.ObjectMeta.Labels = map[string]string{ + naming.LabelCluster: cluster.Name, + naming.LabelInstanceSet: cluster.Spec.InstanceSets[0].Name, + naming.LabelInstance: instanceName, + naming.LabelRole: naming.RolePostgresData, + naming.LabelData: naming.DataPostgres, + } + volume.SetGroupVersionKind(corev1.SchemeGroupVersion. + WithKind("PersistentVolumeClaim")) + if err := r.setControllerReference(cluster, volume); err != nil { + return volumes, err + } + if err := errors.WithStack(r.apply(ctx, volume)); err != nil { + return volumes, err + } + volumes = append(volumes, *volume) + } + } + return volumes, nil +} + +// +kubebuilder:rbac:groups="",resources="persistentvolumeclaims",verbs={create,patch} + +// configureExistingPGWALVolume first searches the observed volumes list to see +// if the existing pg_wal volume defined in the spec is already updated. If not, +// this sets the appropriate labels and ownership for the volume to be used in +// the PostgresCluster. +func (r *Reconciler) configureExistingPGWALVolume( + ctx context.Context, + cluster *v1beta1.PostgresCluster, + volumes []corev1.PersistentVolumeClaim, + instanceName string, +) ([]corev1.PersistentVolumeClaim, error) { + + // if the volume is already in the list, move on + for i := range volumes { + if cluster.Spec.DataSource.Volumes.PGWALVolume. + PVCName == volumes[i].Name { + return volumes, nil + } + } + + if volName := cluster.Spec.DataSource.Volumes.PGWALVolume. + PVCName; volName != "" { + + volume := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: volName, + Namespace: cluster.Namespace, + }, + Spec: cluster.Spec.InstanceSets[0].DataVolumeClaimSpec, + } + + volume.ObjectMeta.Labels = map[string]string{ + naming.LabelCluster: cluster.Name, + naming.LabelInstanceSet: cluster.Spec.InstanceSets[0].Name, + naming.LabelInstance: instanceName, + naming.LabelRole: naming.RolePostgresWAL, + naming.LabelData: naming.DataPostgres, + } + volume.SetGroupVersionKind(corev1.SchemeGroupVersion. + WithKind("PersistentVolumeClaim")) + if err := r.setControllerReference(cluster, volume); err != nil { + return volumes, err + } + if err := errors.WithStack(r.apply(ctx, volume)); err != nil { + return volumes, err + } + volumes = append(volumes, *volume) + } + return volumes, nil +} + +// +kubebuilder:rbac:groups="",resources="persistentvolumeclaims",verbs={create,patch} + +// configureExistingRepoVolumes first searches the observed volumes list to see +// if the existing pgBackRest repo volume defined in the spec is already updated. +// If not, this sets the appropriate labels and ownership for the volume to be +// used in the PostgresCluster. +func (r *Reconciler) configureExistingRepoVolumes( + ctx context.Context, + cluster *v1beta1.PostgresCluster, + volumes []corev1.PersistentVolumeClaim, +) ([]corev1.PersistentVolumeClaim, error) { + + // if the volume is already in the list, move on + for i := range volumes { + if cluster.Spec.DataSource.Volumes.PGBackRestVolume. + PVCName == volumes[i].Name { + return volumes, nil + } + } + + if len(cluster.Spec.Backups.PGBackRest.Repos) > 0 { + // there must be at least on pgBackrest repo defined + if volName := cluster.Spec.DataSource.Volumes. + PGBackRestVolume.PVCName; volName != "" { + volume := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: volName, + Namespace: cluster.Namespace, + Labels: naming.PGBackRestRepoVolumeLabels(cluster.Name, + cluster.Spec.Backups.PGBackRest.Repos[0].Name), + }, + Spec: cluster.Spec.Backups.PGBackRest.Repos[0].Volume. + VolumeClaimSpec, + } + + //volume.ObjectMeta = naming.PGBackRestRepoVolume(cluster, cluster.Spec.Backups.PGBackRest.Repos[0].Name) + volume.SetGroupVersionKind(corev1.SchemeGroupVersion. + WithKind("PersistentVolumeClaim")) + if err := r.setControllerReference(cluster, volume); err != nil { + return volumes, err + } + if err := errors.WithStack(r.apply(ctx, volume)); err != nil { + return volumes, err + } + volumes = append(volumes, *volume) + } + } + return volumes, nil +} + +// +kubebuilder:rbac:groups="batch",resources="jobs",verbs={list} + +// reconcileDirMoveJobs creates the existing volume move Jobs as defined in +// the PostgresCluster spec. A boolean value is return to indicate whether +// the main control loop should return early. +func (r *Reconciler) reconcileDirMoveJobs(ctx context.Context, + cluster *v1beta1.PostgresCluster) (bool, error) { + + if cluster.Spec.DataSource != nil && + cluster.Spec.DataSource.Volumes != nil { + + moveJobs := &batchv1.JobList{} + if err := r.Client.List(ctx, moveJobs, &client.ListOptions{ + Namespace: cluster.Namespace, + LabelSelector: naming.DirectoryMoveJobLabels(cluster.Name).AsSelector(), + }); err != nil { + return false, errors.WithStack(err) + } + + var err error + var pgDataReturn, pgWALReturn, repoReturn bool + + if cluster.Spec.DataSource.Volumes.PGDataVolume != nil && + cluster.Spec.DataSource.Volumes.PGDataVolume. + Directory != "" && + cluster.Spec.DataSource.Volumes.PGDataVolume. + PVCName != "" { + pgDataReturn, err = r.reconcileMovePGDataDir(ctx, cluster, moveJobs) + } + + if err == nil && + cluster.Spec.DataSource.Volumes.PGWALVolume != nil && + cluster.Spec.DataSource.Volumes.PGWALVolume. + Directory != "" && + cluster.Spec.DataSource.Volumes.PGWALVolume. + PVCName != "" { + pgWALReturn, err = r.reconcileMoveWALDir(ctx, cluster, moveJobs) + } + + if err == nil && + cluster.Spec.DataSource.Volumes.PGBackRestVolume != nil && + cluster.Spec.DataSource.Volumes.PGBackRestVolume. + Directory != "" && + cluster.Spec.DataSource.Volumes.PGBackRestVolume. + PVCName != "" { + repoReturn, err = r.reconcileMoveRepoDir(ctx, cluster, moveJobs) + } + // if any of the 'return early' values are true, return true + return pgDataReturn || pgWALReturn || repoReturn, err + } + + return false, nil +} + +// +kubebuilder:rbac:groups="batch",resources="jobs",verbs={create,patch,delete} + +// reconcileMovePGDataDir creates a Job to move the provided pgData directory +// in the given volume to the expected location before the PostgresCluster is +// bootstrapped. It returns any errors and a boolean indicating whether the +// main control loop should continue or return early to allow time for the job +// to complete. +func (r *Reconciler) reconcileMovePGDataDir(ctx context.Context, + cluster *v1beta1.PostgresCluster, moveJobs *batchv1.JobList) (bool, error) { + + moveDirJob := &batchv1.Job{} + moveDirJob.ObjectMeta = naming.MovePGDataDirJob(cluster) + + // check for an existing Job + for i := range moveJobs.Items { + if moveJobs.Items[i].Name == moveDirJob.Name { + if jobCompleted(&moveJobs.Items[i]) { + // if the Job is completed, return as this only needs to run once + return false, nil + } + if !jobFailed(&moveJobs.Items[i]) { + // if the Job otherwise exists and has not failed, return and + // give the Job time to finish + return true, nil + } + } + } + + // at this point, the Job either wasn't found or it has failed, so the it + // should be created + moveDirJob.ObjectMeta.Annotations = naming.Merge(cluster.Spec.Metadata. + GetAnnotationsOrNil()) + labels := naming.Merge(cluster.Spec.Metadata.GetLabelsOrNil(), + naming.DirectoryMoveJobLabels(cluster.Name), + map[string]string{ + naming.LabelMovePGDataDir: "", + }) + moveDirJob.ObjectMeta.Labels = labels + + // `patroni.dynamic.json` holds the previous state of the DCS. Since we are + // migrating the volumes, we want to clear out any obsolete configuration info. + script := fmt.Sprintf(`echo "Preparing cluster %s volumes for PGO v5.x" + echo "pgdata_pvc=%s" + echo "Current PG data directory volume contents:" + ls -lh "/pgdata" + echo "Now updating PG data directory..." + [ -d "/pgdata/%s" ] && mv "/pgdata/%s" "/pgdata/pg%s_bootstrap" + rm -f "/pgdata/pg%s/patroni.dynamic.json" + echo "Updated PG data directory contents:" + ls -lh "/pgdata" + echo "PG Data directory preparation complete" + `, cluster.Name, + cluster.Spec.DataSource.Volumes.PGDataVolume.PVCName, + cluster.Spec.DataSource.Volumes.PGDataVolume.Directory, + cluster.Spec.DataSource.Volumes.PGDataVolume.Directory, + strconv.Itoa(cluster.Spec.PostgresVersion), + strconv.Itoa(cluster.Spec.PostgresVersion)) + + container := corev1.Container{ + Command: []string{"bash", "-ceu", script}, + Image: config.PostgresContainerImage(cluster), + ImagePullPolicy: cluster.Spec.ImagePullPolicy, + Name: naming.ContainerJobMovePGDataDir, + SecurityContext: initialize.RestrictedSecurityContext(), + VolumeMounts: []corev1.VolumeMount{postgres.DataVolumeMount()}, + } + if len(cluster.Spec.InstanceSets) > 0 { + container.Resources = cluster.Spec.InstanceSets[0].Resources + } + + jobSpec := &batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: labels}, + Spec: corev1.PodSpec{ + // Set the image pull secrets, if any exist. + // This is set here rather than using the service account due to the lack + // of propagation to existing pods when the CRD is updated: + // https://github.com/kubernetes/kubernetes/issues/88456 + ImagePullSecrets: cluster.Spec.ImagePullSecrets, + Containers: []corev1.Container{container}, + SecurityContext: postgres.PodSecurityContext(cluster), + // Set RestartPolicy to "Never" since we want a new Pod to be + // created by the Job controller when there is a failure + // (instead of the container simply restarting). + RestartPolicy: corev1.RestartPolicyNever, + // These Jobs don't make Kubernetes API calls, so we can just + // use the default ServiceAccount and not mount its credentials. + AutomountServiceAccountToken: initialize.Bool(false), + EnableServiceLinks: initialize.Bool(false), + Volumes: []corev1.Volume{{ + Name: "postgres-data", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: cluster.Spec.DataSource.Volumes. + PGDataVolume.PVCName, + }, + }}, + }, + }, + }, + } + // set the priority class name, if it exists + if len(cluster.Spec.InstanceSets) > 0 { + jobSpec.Template.Spec.PriorityClassName = + initialize.FromPointer(cluster.Spec.InstanceSets[0].PriorityClassName) + } + moveDirJob.Spec = *jobSpec + + // set gvk and ownership refs + moveDirJob.SetGroupVersionKind(batchv1.SchemeGroupVersion.WithKind("Job")) + if err := controllerutil.SetControllerReference(cluster, moveDirJob, + r.Client.Scheme()); err != nil { + return true, errors.WithStack(err) + } + + // server-side apply the backup Job intent + if err := r.apply(ctx, moveDirJob); err != nil { + return true, errors.WithStack(err) + } + + return true, nil +} + +// +kubebuilder:rbac:groups="batch",resources="jobs",verbs={create,patch,delete} + +// reconcileMoveWalDir creates a Job to move the provided pg_wal directory +// in the given volume to the expected location before the PostgresCluster is +// bootstrapped. It returns any errors and a boolean indicating whether the +// main control loop should continue or return early to allow time for the job +// to complete. +func (r *Reconciler) reconcileMoveWALDir(ctx context.Context, + cluster *v1beta1.PostgresCluster, moveJobs *batchv1.JobList) (bool, error) { + + moveDirJob := &batchv1.Job{} + moveDirJob.ObjectMeta = naming.MovePGWALDirJob(cluster) + + // check for an existing Job + for i := range moveJobs.Items { + if moveJobs.Items[i].Name == moveDirJob.Name { + if jobCompleted(&moveJobs.Items[i]) { + // if the Job is completed, return as this only needs to run once + return false, nil + } + if !jobFailed(&moveJobs.Items[i]) { + // if the Job otherwise exists and has not failed, return and + // give the Job time to finish + return true, nil + } + } + } + + moveDirJob.ObjectMeta.Annotations = naming.Merge(cluster.Spec.Metadata. + GetAnnotationsOrNil()) + labels := naming.Merge(cluster.Spec.Metadata.GetLabelsOrNil(), + naming.DirectoryMoveJobLabels(cluster.Name), + map[string]string{ + naming.LabelMovePGWalDir: "", + }) + moveDirJob.ObjectMeta.Labels = labels + + script := fmt.Sprintf(`echo "Preparing cluster %s volumes for PGO v5.x" + echo "pg_wal_pvc=%s" + echo "Current PG WAL directory volume contents:" + ls -lh "/pgwal" + echo "Now updating PG WAL directory..." + [ -d "/pgwal/%s" ] && mv "/pgwal/%s" "/pgwal/%s-wal" + echo "Updated PG WAL directory contents:" + ls -lh "/pgwal" + echo "PG WAL directory preparation complete" + `, cluster.Name, + cluster.Spec.DataSource.Volumes.PGWALVolume.PVCName, + cluster.Spec.DataSource.Volumes.PGWALVolume.Directory, + cluster.Spec.DataSource.Volumes.PGWALVolume.Directory, + cluster.ObjectMeta.Name) + + container := corev1.Container{ + Command: []string{"bash", "-ceu", script}, + Image: config.PostgresContainerImage(cluster), + ImagePullPolicy: cluster.Spec.ImagePullPolicy, + Name: naming.ContainerJobMovePGWALDir, + SecurityContext: initialize.RestrictedSecurityContext(), + VolumeMounts: []corev1.VolumeMount{postgres.WALVolumeMount()}, + } + if len(cluster.Spec.InstanceSets) > 0 { + container.Resources = cluster.Spec.InstanceSets[0].Resources + } + + jobSpec := &batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: labels}, + Spec: corev1.PodSpec{ + // Set the image pull secrets, if any exist. + // This is set here rather than using the service account due to the lack + // of propagation to existing pods when the CRD is updated: + // https://github.com/kubernetes/kubernetes/issues/88456 + ImagePullSecrets: cluster.Spec.ImagePullSecrets, + Containers: []corev1.Container{container}, + SecurityContext: postgres.PodSecurityContext(cluster), + // Set RestartPolicy to "Never" since we want a new Pod to be + // created by the Job controller when there is a failure + // (instead of the container simply restarting). + RestartPolicy: corev1.RestartPolicyNever, + // These Jobs don't make Kubernetes API calls, so we can just + // use the default ServiceAccount and not mount its credentials. + AutomountServiceAccountToken: initialize.Bool(false), + EnableServiceLinks: initialize.Bool(false), + Volumes: []corev1.Volume{{ + Name: "postgres-wal", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: cluster.Spec.DataSource.Volumes. + PGWALVolume.PVCName, + }, + }}, + }, + }, + }, + } + // set the priority class name, if it exists + if len(cluster.Spec.InstanceSets) > 0 { + jobSpec.Template.Spec.PriorityClassName = + initialize.FromPointer(cluster.Spec.InstanceSets[0].PriorityClassName) + } + moveDirJob.Spec = *jobSpec + + // set gvk and ownership refs + moveDirJob.SetGroupVersionKind(batchv1.SchemeGroupVersion.WithKind("Job")) + if err := controllerutil.SetControllerReference(cluster, moveDirJob, + r.Client.Scheme()); err != nil { + return true, errors.WithStack(err) + } + + // server-side apply the backup Job intent + if err := r.apply(ctx, moveDirJob); err != nil { + return true, errors.WithStack(err) + } + + return true, nil +} + +// +kubebuilder:rbac:groups="batch",resources="jobs",verbs={create,patch,delete} + +// reconcileMoveRepoDir creates a Job to move the provided pgBackRest repo +// directory in the given volume to the expected location before the +// PostgresCluster is bootstrapped. It returns any errors and a boolean +// indicating whether the main control loop should continue or return early +// to allow time for the job to complete. +func (r *Reconciler) reconcileMoveRepoDir(ctx context.Context, + cluster *v1beta1.PostgresCluster, moveJobs *batchv1.JobList) (bool, error) { + + moveDirJob := &batchv1.Job{} + moveDirJob.ObjectMeta = naming.MovePGBackRestRepoDirJob(cluster) + + // check for an existing Job + for i := range moveJobs.Items { + if moveJobs.Items[i].Name == moveDirJob.Name { + if jobCompleted(&moveJobs.Items[i]) { + // if the Job is completed, return as this only needs to run once + return false, nil + } + if !jobFailed(&moveJobs.Items[i]) { + // if the Job otherwise exists and has not failed, return and + // give the Job time to finish + return true, nil + } + } + } + + moveDirJob.ObjectMeta.Annotations = naming.Merge( + cluster.Spec.Metadata.GetAnnotationsOrNil()) + labels := naming.Merge(cluster.Spec.Metadata.GetLabelsOrNil(), + naming.DirectoryMoveJobLabels(cluster.Name), + map[string]string{ + naming.LabelMovePGBackRestRepoDir: "", + }) + moveDirJob.ObjectMeta.Labels = labels + + script := fmt.Sprintf(`echo "Preparing cluster %s pgBackRest repo volume for PGO v5.x" + echo "repo_pvc=%s" + echo "pgbackrest directory:" + ls -lh /pgbackrest + echo "Current pgBackRest repo directory volume contents:" + ls -lh "/pgbackrest/%s" + echo "Now updating repo directory..." + [ -d "/pgbackrest/%s" ] && mv -t "/pgbackrest/" "/pgbackrest/%s/archive" + [ -d "/pgbackrest/%s" ] && mv -t "/pgbackrest/" "/pgbackrest/%s/backup" + echo "Updated /pgbackrest directory contents:" + ls -lh "/pgbackrest" + echo "Repo directory preparation complete" + `, cluster.Name, + cluster.Spec.DataSource.Volumes.PGBackRestVolume.PVCName, + cluster.Spec.DataSource.Volumes.PGBackRestVolume.Directory, + cluster.Spec.DataSource.Volumes.PGBackRestVolume.Directory, + cluster.Spec.DataSource.Volumes.PGBackRestVolume.Directory, + cluster.Spec.DataSource.Volumes.PGBackRestVolume.Directory, + cluster.Spec.DataSource.Volumes.PGBackRestVolume.Directory) + + container := corev1.Container{ + Command: []string{"bash", "-ceu", script}, + Image: config.PGBackRestContainerImage(cluster), + ImagePullPolicy: cluster.Spec.ImagePullPolicy, + Name: naming.ContainerJobMovePGBackRestRepoDir, + SecurityContext: initialize.RestrictedSecurityContext(), + VolumeMounts: []corev1.VolumeMount{pgbackrest.RepoVolumeMount()}, + } + if cluster.Spec.Backups.PGBackRest.RepoHost != nil { + container.Resources = cluster.Spec.Backups.PGBackRest.RepoHost.Resources + } + + jobSpec := &batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: labels}, + Spec: corev1.PodSpec{ + // Set the image pull secrets, if any exist. + // This is set here rather than using the service account due to the lack + // of propagation to existing pods when the CRD is updated: + // https://github.com/kubernetes/kubernetes/issues/88456 + ImagePullSecrets: cluster.Spec.ImagePullSecrets, + Containers: []corev1.Container{container}, + SecurityContext: postgres.PodSecurityContext(cluster), + // Set RestartPolicy to "Never" since we want a new Pod to be created by the Job + // controller when there is a failure (instead of the container simply restarting). + RestartPolicy: corev1.RestartPolicyNever, + // These Jobs don't make Kubernetes API calls, so we can just + // use the default ServiceAccount and not mount its credentials. + AutomountServiceAccountToken: initialize.Bool(false), + EnableServiceLinks: initialize.Bool(false), + Volumes: []corev1.Volume{{ + Name: "pgbackrest-repo", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: cluster.Spec.DataSource.Volumes. + PGBackRestVolume.PVCName, + }, + }}, + }, + }, + }, + } + // set the priority class name, if it exists + if repoHost := cluster.Spec.Backups.PGBackRest.RepoHost; repoHost != nil { + jobSpec.Template.Spec.PriorityClassName = initialize.FromPointer(repoHost.PriorityClassName) + } + moveDirJob.Spec = *jobSpec + + // set gvk and ownership refs + moveDirJob.SetGroupVersionKind(batchv1.SchemeGroupVersion.WithKind("Job")) + if err := controllerutil.SetControllerReference(cluster, moveDirJob, + r.Client.Scheme()); err != nil { + return true, errors.WithStack(err) + } + + // server-side apply the backup Job intent + if err := r.apply(ctx, moveDirJob); err != nil { + return true, errors.WithStack(err) + } + return true, nil +} + // handlePersistentVolumeClaimError inspects err for expected Kubernetes API // responses to writing a PVC. It turns errors it understands into conditions // and events. When err is handled it returns nil. Otherwise it returns err. @@ -141,7 +775,6 @@ func (r *Reconciler) handlePersistentVolumeClaimError( Message: "One or more volumes cannot be resized", ObservedGeneration: cluster.Generation, - LastTransitionTime: metav1.Now(), }) } diff --git a/internal/controller/postgrescluster/volumes_test.go b/internal/controller/postgrescluster/volumes_test.go index f6825f5894..96eef5f916 100644 --- a/internal/controller/postgrescluster/volumes_test.go +++ b/internal/controller/postgrescluster/volumes_test.go @@ -1,474 +1,243 @@ -// +build envtest - -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster import ( "context" "errors" - "os" - "strings" "testing" "time" "gotest.tools/v3/assert" + batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" - v1 "k8s.io/api/core/v1" - storagev1 "k8s.io/api/storage/v1" - "k8s.io/apimachinery/pkg/api/equality" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/yaml" + "github.com/crunchydata/postgres-operator/internal/controller/runtime" + "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/internal/testing/cmp" + "github.com/crunchydata/postgres-operator/internal/testing/events" + "github.com/crunchydata/postgres-operator/internal/testing/require" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) -func TestPersistentVolumeClaimLimitations(t *testing.T) { - if !strings.EqualFold(os.Getenv("USE_EXISTING_CLUSTER"), "true") { - t.Skip("requires a running persistent volume controller") +func TestHandlePersistentVolumeClaimError(t *testing.T) { + recorder := events.NewRecorder(t, runtime.Scheme) + reconciler := &Reconciler{ + Recorder: recorder, } - ctx := context.Background() - tEnv, cc, _ := setupTestEnv(t, t.Name()) - t.Cleanup(func() { teardownTestEnv(t, tEnv) }) - - ns := &corev1.Namespace{} - ns.GenerateName = "postgres-operator-test-" - ns.Labels = map[string]string{"postgres-operator-test": t.Name()} - assert.NilError(t, cc.Create(ctx, ns)) - t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, ns)) }) - - // Stub to see that handlePersistentVolumeClaimError returns nil. cluster := new(v1beta1.PostgresCluster) - reconciler := &Reconciler{ - Recorder: new(record.FakeRecorder), - } + cluster.Namespace = "ns1" + cluster.Name = "pg2" - apiErrorStatus := func(t testing.TB, err error) metav1.Status { - t.Helper() - var status apierrors.APIStatus - assert.Assert(t, errors.As(err, &status)) - return status.Status() + reset := func() { + cluster.Status.Conditions = cluster.Status.Conditions[:0] + recorder.Events = recorder.Events[:0] } - // NOTE(cbandy): use multiples of 1Gi below to stay compatible with AWS, GCP, etc. - - // Statically provisioned volumes cannot be resized. The API response depends - // on the phase of the volume claim. - t.Run("StaticNoResize", func(t *testing.T) { - // A static PVC is one with a present-and-blank storage class. - // - https://docs.k8s.io/concepts/storage/persistent-volumes/#static - // - https://docs.k8s.io/concepts/storage/persistent-volumes/#class-1 - base := &corev1.PersistentVolumeClaim{} - assert.NilError(t, yaml.Unmarshal([]byte(`{ - spec: { - storageClassName: "", - accessModes: [ReadWriteOnce], - selector: { matchLabels: { postgres-operator-test: static-no-resize } }, - resources: { requests: { storage: 2Gi } }, - }, - }`), base)) - base.Namespace = ns.Name - - t.Run("Pending", func(t *testing.T) { - // No persistent volume for this claim. - pvc := base.DeepCopy() - pvc.Name = "static-pvc-pending" - assert.NilError(t, cc.Create(ctx, pvc)) - t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, pvc)) }) - - // Not able to shrink the storage request. - pvc.Spec.Resources.Requests[corev1.ResourceStorage] = resource.MustParse("1Gi") - - err := cc.Update(ctx, pvc) - assert.Assert(t, apierrors.IsInvalid(err), "expected Invalid, got\n%#v", err) - assert.ErrorContains(t, err, "less than previous") - assert.ErrorContains(t, err, pvc.Name, "expected mention of the object") - - status := apiErrorStatus(t, err) - assert.Assert(t, status.Details != nil) - assert.Assert(t, len(status.Details.Causes) != 0) - assert.Equal(t, status.Details.Causes[0].Field, "spec") - assert.Equal(t, status.Details.Causes[0].Type, metav1.CauseType(field.ErrorTypeForbidden)) + // It returns any error it does not recognize completely. + t.Run("Unexpected", func(t *testing.T) { + t.Cleanup(reset) - assert.NilError(t, reconciler.handlePersistentVolumeClaimError(cluster, err)) + err := errors.New("whomp") - // Not able to grow the storage request. - pvc.Spec.Resources.Requests[corev1.ResourceStorage] = resource.MustParse("4Gi") + assert.Equal(t, err, reconciler.handlePersistentVolumeClaimError(cluster, err)) + assert.Assert(t, len(cluster.Status.Conditions) == 0) + assert.Assert(t, len(recorder.Events) == 0) - err = cc.Update(ctx, pvc) - assert.Assert(t, apierrors.IsInvalid(err), "expected Invalid, got\n%#v", err) - assert.ErrorContains(t, err, "bound claim") - assert.ErrorContains(t, err, pvc.Name, "expected mention of the object") + err = apierrors.NewInvalid( + corev1.SchemeGroupVersion.WithKind("PersistentVolumeClaim").GroupKind(), + "some-pvc", + field.ErrorList{ + field.Forbidden(field.NewPath("metadata"), "dunno"), + }) - status = apiErrorStatus(t, err) - assert.Assert(t, status.Details != nil) - assert.Assert(t, len(status.Details.Causes) != 0) - assert.Equal(t, status.Details.Causes[0].Field, "spec") - assert.Equal(t, status.Details.Causes[0].Type, metav1.CauseType(field.ErrorTypeForbidden)) + assert.Equal(t, err, reconciler.handlePersistentVolumeClaimError(cluster, err)) + assert.Assert(t, len(cluster.Status.Conditions) == 0) + assert.Assert(t, len(recorder.Events) == 0) + }) + // Neither statically nor dynamically provisioned claims can be resized + // before they are bound to a persistent volume. Kubernetes rejects such + // changes during PVC validation. + // + // A static PVC is one with a present-and-blank storage class. It is + // pending until a PV exists that matches its selector, requests, etc. + // - https://docs.k8s.io/concepts/storage/persistent-volumes/#static + // - https://docs.k8s.io/concepts/storage/persistent-volumes/#class-1 + // + // A dynamic PVC is associated with a storage class. Storage classes that + // "WaitForFirstConsumer" do not bind a PV until there is a pod. + // - https://docs.k8s.io/concepts/storage/persistent-volumes/#dynamic + t.Run("Pending", func(t *testing.T) { + t.Run("Grow", func(t *testing.T) { + t.Cleanup(reset) + + err := apierrors.NewInvalid( + corev1.SchemeGroupVersion.WithKind("PersistentVolumeClaim").GroupKind(), + "my-pending-pvc", + field.ErrorList{ + // - https://releases.k8s.io/v1.24.0/pkg/apis/core/validation/validation.go#L2184 + field.Forbidden(field.NewPath("spec"), "… immutable … bound claim …"), + }) + + // PVCs will bind eventually. This error should become an event without a condition. assert.NilError(t, reconciler.handlePersistentVolumeClaimError(cluster, err)) - }) - - t.Run("Bound", func(t *testing.T) { - // A persistent volume that will match the claim. - pv := &corev1.PersistentVolume{} - assert.NilError(t, yaml.Unmarshal([]byte(`{ - metadata: { - generateName: postgres-operator-test-, - labels: { postgres-operator-test: static-no-resize }, - }, - spec: { - accessModes: [ReadWriteOnce], - capacity: { storage: 4Gi }, - hostPath: { path: /tmp }, - persistentVolumeReclaimPolicy: Delete, - }, - }`), pv)) - - assert.NilError(t, cc.Create(ctx, pv)) - t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, pv)) }) - - assert.NilError(t, wait.PollImmediate(time.Second, Scale(10*time.Second), func() (bool, error) { - err := cc.Get(ctx, client.ObjectKeyFromObject(pv), pv) - return pv.Status.Phase != corev1.VolumePending, err - }), "expected Available, got %#v", pv.Status) - pvc := base.DeepCopy() - pvc.Name = "static-pvc-bound" - assert.NilError(t, cc.Create(ctx, pvc)) - t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, pvc)) }) - - assert.NilError(t, wait.PollImmediate(time.Second, Scale(10*time.Second), func() (bool, error) { - err := cc.Get(ctx, client.ObjectKeyFromObject(pvc), pvc) - return pvc.Status.Phase != corev1.ClaimPending, err - }), "expected Bound, got %#v", pvc.Status) + assert.Check(t, len(cluster.Status.Conditions) == 0) + assert.Check(t, len(recorder.Events) > 0) + + for _, event := range recorder.Events { + assert.Equal(t, event.Type, "Warning") + assert.Equal(t, event.Reason, "PersistentVolumeError") + assert.Assert(t, cmp.Contains(event.Note, "PersistentVolumeClaim")) + assert.Assert(t, cmp.Contains(event.Note, "my-pending-pvc")) + assert.Assert(t, cmp.Contains(event.Note, "bound claim")) + assert.DeepEqual(t, event.Regarding, corev1.ObjectReference{ + APIVersion: v1beta1.GroupVersion.Identifier(), + Kind: "PostgresCluster", + Namespace: "ns1", Name: "pg2", + }) + } + }) - // Not able to shrink the storage request. - pvc.Spec.Resources.Requests[corev1.ResourceStorage] = resource.MustParse("1Gi") + t.Run("Shrink", func(t *testing.T) { + t.Cleanup(reset) - err := cc.Update(ctx, pvc) - assert.Assert(t, apierrors.IsInvalid(err), "expected Invalid, got\n%#v", err) - assert.ErrorContains(t, err, "less than previous") - assert.ErrorContains(t, err, pvc.Name, "expected mention of the object") + // Requests to make a pending PVC smaller fail for multiple reasons. + err := apierrors.NewInvalid( + corev1.SchemeGroupVersion.WithKind("PersistentVolumeClaim").GroupKind(), + "my-pending-pvc", + field.ErrorList{ + // - https://releases.k8s.io/v1.24.0/pkg/apis/core/validation/validation.go#L2184 + field.Forbidden(field.NewPath("spec"), "… immutable … bound claim …"), - status := apiErrorStatus(t, err) - assert.Assert(t, status.Details != nil) - assert.Assert(t, len(status.Details.Causes) != 0) - assert.Equal(t, status.Details.Causes[0].Field, "spec.resources.requests.storage") - assert.Equal(t, status.Details.Causes[0].Type, metav1.CauseType(field.ErrorTypeForbidden)) + // - https://releases.k8s.io/v1.24.0/pkg/apis/core/validation/validation.go#L2188 + field.Forbidden(field.NewPath("spec", "resources", "requests", "storage"), "… not be less …"), + }) + // PVCs will bind eventually, but the size is rejected. assert.NilError(t, reconciler.handlePersistentVolumeClaimError(cluster, err)) - // Not able to grow the storage request. - pvc.Spec.Resources.Requests[corev1.ResourceStorage] = resource.MustParse("4Gi") + assert.Check(t, len(cluster.Status.Conditions) > 0) + assert.Check(t, len(recorder.Events) > 0) - err = cc.Update(ctx, pvc) - assert.Assert(t, apierrors.IsForbidden(err), "expected Forbidden, got\n%#v", err) - assert.ErrorContains(t, err, "only dynamic") - assert.ErrorContains(t, err, pvc.Name, "expected mention of the object") + for _, condition := range cluster.Status.Conditions { + assert.Equal(t, condition.Type, "PersistentVolumeResizing") + assert.Equal(t, condition.Status, metav1.ConditionFalse) + assert.Equal(t, condition.Reason, "Invalid") + assert.Assert(t, cmp.Contains(condition.Message, "cannot be resized")) + } - assert.NilError(t, reconciler.handlePersistentVolumeClaimError(cluster, err)) + for _, event := range recorder.Events { + assert.Equal(t, event.Type, "Warning") + assert.Equal(t, event.Reason, "PersistentVolumeError") + assert.Assert(t, cmp.Contains(event.Note, "PersistentVolumeClaim")) + assert.Assert(t, cmp.Contains(event.Note, "my-pending-pvc")) + assert.Assert(t, cmp.Contains(event.Note, "bound claim")) + assert.Assert(t, cmp.Contains(event.Note, "not be less")) + assert.DeepEqual(t, event.Regarding, corev1.ObjectReference{ + APIVersion: v1beta1.GroupVersion.Identifier(), + Kind: "PostgresCluster", + Namespace: "ns1", Name: "pg2", + }) + } }) }) - // Dynamically provisioned volumes can be resized under certain conditions. - // The API response depends on the phase of the volume claim. - // - https://releases.k8s.io/v1.21.0/plugin/pkg/admission/storage/persistentvolume/resize/admission.go - t.Run("Dynamic", func(t *testing.T) { - // Create a claim without a storage class to detect the default. - find := &corev1.PersistentVolumeClaim{} - assert.NilError(t, yaml.Unmarshal([]byte(`{ - spec: { - accessModes: [ReadWriteOnce], - selector: { matchLabels: { postgres-operator-test: find-dynamic } }, - resources: { requests: { storage: 1Gi } }, - }, - }`), find)) - find.Namespace, find.Name = ns.Name, "find-dynamic" - - assert.NilError(t, cc.Create(ctx, find)) - t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, find)) }) - - if find.Spec.StorageClassName == nil { - t.Skip("requires a default storage class and expansion controller") - } - - base := &storagev1.StorageClass{} - base.Name = *find.Spec.StorageClassName - - if err := cc.Get(ctx, client.ObjectKeyFromObject(base), base); err != nil { - t.Skipf("requires a default storage class, got\n%#v", err) - } - - t.Run("Pending", func(t *testing.T) { - // A storage class that will not bind until there is a pod. - sc := base.DeepCopy() - sc.ObjectMeta = metav1.ObjectMeta{ - GenerateName: "postgres-operator-test-", - Labels: map[string]string{ - "postgres-operator-test": "pvc-limitations-pending", - }, - } - sc.ReclaimPolicy = new(corev1.PersistentVolumeReclaimPolicy) - *sc.ReclaimPolicy = corev1.PersistentVolumeReclaimDelete - sc.VolumeBindingMode = new(storagev1.VolumeBindingMode) - *sc.VolumeBindingMode = storagev1.VolumeBindingWaitForFirstConsumer - - assert.NilError(t, cc.Create(ctx, sc)) - t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, sc)) }) - - pvc := &corev1.PersistentVolumeClaim{} - assert.NilError(t, yaml.Unmarshal([]byte(`{ - spec: { - accessModes: [ReadWriteOnce], - resources: { requests: { storage: 2Gi } }, - }, - }`), pvc)) - pvc.Namespace, pvc.Name = ns.Name, "dynamic-pvc-pending" - pvc.Spec.StorageClassName = &sc.Name - - assert.NilError(t, cc.Create(ctx, pvc)) - t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, pvc)) }) - - // Not able to shrink the storage request. - pvc.Spec.Resources.Requests[corev1.ResourceStorage] = resource.MustParse("1Gi") - - err := cc.Update(ctx, pvc) - assert.Assert(t, apierrors.IsInvalid(err), "expected Invalid, got\n%#v", err) - assert.ErrorContains(t, err, "less than previous") - assert.ErrorContains(t, err, pvc.Name, "expected mention of the object") - - status := apiErrorStatus(t, err) - assert.Assert(t, status.Details != nil) - assert.Assert(t, len(status.Details.Causes) != 0) - assert.Equal(t, status.Details.Causes[0].Field, "spec") - assert.Equal(t, status.Details.Causes[0].Type, metav1.CauseType(field.ErrorTypeForbidden)) - + // Statically provisioned claims cannot be resized. Kubernetes responds + // differently based on the size growing or shrinking. + // + // Dynamically provisioned claims of storage classes that do *not* + // "allowVolumeExpansion" behave the same way. + t.Run("NoExpansion", func(t *testing.T) { + t.Run("Grow", func(t *testing.T) { + t.Cleanup(reset) + + // - https://releases.k8s.io/v1.24.0/plugin/pkg/admission/storage/persistentvolume/resize/admission.go#L108 + err := apierrors.NewForbidden( + corev1.Resource("persistentvolumeclaims"), "my-static-pvc", + errors.New("… only dynamically provisioned …")) + + // This PVC cannot resize. The error should become an event and condition. assert.NilError(t, reconciler.handlePersistentVolumeClaimError(cluster, err)) - // Not able to grow the storage request. - pvc.Spec.Resources.Requests[corev1.ResourceStorage] = resource.MustParse("4Gi") + assert.Check(t, len(cluster.Status.Conditions) > 0) + assert.Check(t, len(recorder.Events) > 0) - err = cc.Update(ctx, pvc) - assert.Assert(t, apierrors.IsInvalid(err), "expected Invalid, got\n%#v", err) - assert.ErrorContains(t, err, "bound claim") - assert.ErrorContains(t, err, pvc.Name, "expected mention of the object") - - status = apiErrorStatus(t, err) - assert.Assert(t, status.Details != nil) - assert.Assert(t, len(status.Details.Causes) != 0) - assert.Equal(t, status.Details.Causes[0].Field, "spec") - assert.Equal(t, status.Details.Causes[0].Type, metav1.CauseType(field.ErrorTypeForbidden)) + for _, condition := range cluster.Status.Conditions { + assert.Equal(t, condition.Type, "PersistentVolumeResizing") + assert.Equal(t, condition.Status, metav1.ConditionFalse) + assert.Equal(t, condition.Reason, "Forbidden") + assert.Assert(t, cmp.Contains(condition.Message, "cannot be resized")) + } - assert.NilError(t, reconciler.handlePersistentVolumeClaimError(cluster, err)) + for _, event := range recorder.Events { + assert.Equal(t, event.Type, "Warning") + assert.Equal(t, event.Reason, "PersistentVolumeError") + assert.Assert(t, cmp.Contains(event.Note, "persistentvolumeclaim")) + assert.Assert(t, cmp.Contains(event.Note, "my-static-pvc")) + assert.Assert(t, cmp.Contains(event.Note, "only dynamic")) + assert.DeepEqual(t, event.Regarding, corev1.ObjectReference{ + APIVersion: v1beta1.GroupVersion.Identifier(), + Kind: "PostgresCluster", + Namespace: "ns1", Name: "pg2", + }) + } }) - t.Run("Bound", func(t *testing.T) { - setup := func(t testing.TB, expansion bool) *corev1.PersistentVolumeClaim { - // A storage class that binds when there is a pod and deletes volumes. - sc := base.DeepCopy() - sc.ObjectMeta = metav1.ObjectMeta{ - GenerateName: "postgres-operator-test-", - Labels: map[string]string{ - "postgres-operator-test": "pvc-limitations-bound", - }, - } - sc.AllowVolumeExpansion = &expansion - sc.ReclaimPolicy = new(corev1.PersistentVolumeReclaimPolicy) - *sc.ReclaimPolicy = corev1.PersistentVolumeReclaimDelete - - assert.NilError(t, cc.Create(ctx, sc)) - t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, sc)) }) - - pvc := &corev1.PersistentVolumeClaim{} - pvc.ObjectMeta = metav1.ObjectMeta{ - Namespace: ns.Name, - GenerateName: "postgres-operator-test-", - Labels: map[string]string{ - "postgres-operator-test": "pvc-limitations-bound", - }, - } - assert.NilError(t, yaml.Unmarshal([]byte(`{ - spec: { - accessModes: [ReadWriteOnce], - resources: { requests: { storage: 2Gi } }, - }, - }`), pvc)) - pvc.Spec.StorageClassName = &sc.Name - - assert.NilError(t, cc.Create(ctx, pvc)) - t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, pvc)) }) - - pod := &corev1.Pod{} - pod.Namespace, pod.Name = ns.Name, pvc.Name - pod.Spec.Containers = []corev1.Container{{ - Name: "any", - Image: CrunchyPostgresHAImage, - Command: []string{"true"}, - VolumeMounts: []corev1.VolumeMount{{ - MountPath: "/tmp", Name: "volume", - }}, - }} - pod.Spec.Volumes = []corev1.Volume{{ - Name: "volume", - VolumeSource: corev1.VolumeSource{ - PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ - ClaimName: pvc.Name, - }, - }, - }} - - assert.NilError(t, cc.Create(ctx, pod)) - t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, pod)) }) + // Dynamically provisioned claims of storage classes that *do* + // "allowVolumeExpansion" can grow but cannot shrink. Kubernetes + // rejects such changes during PVC validation, just like static claims. + // + // A future version of Kubernetes will allow `spec.resources` to shrink + // so long as it is greater than `status.capacity`. + // - https://git.k8s.io/enhancements/keps/sig-storage/1790-recover-resize-failure + t.Run("Shrink", func(t *testing.T) { + t.Cleanup(reset) + + err := apierrors.NewInvalid( + corev1.SchemeGroupVersion.WithKind("PersistentVolumeClaim").GroupKind(), + "my-static-pvc", + field.ErrorList{ + // - https://releases.k8s.io/v1.24.0/pkg/apis/core/validation/validation.go#L2188 + field.Forbidden(field.NewPath("spec", "resources", "requests", "storage"), "… not be less …"), + }) + + // The PVC size is rejected. This error should become an event and condition. + assert.NilError(t, reconciler.handlePersistentVolumeClaimError(cluster, err)) - assert.NilError(t, wait.PollImmediate(time.Second, Scale(30*time.Second), func() (bool, error) { - err := cc.Get(ctx, client.ObjectKeyFromObject(pvc), pvc) - return pvc.Status.Phase != corev1.ClaimPending, err - }), "expected Bound, got %#v", pvc.Status) + assert.Check(t, len(cluster.Status.Conditions) > 0) + assert.Check(t, len(recorder.Events) > 0) - return pvc + for _, condition := range cluster.Status.Conditions { + assert.Equal(t, condition.Type, "PersistentVolumeResizing") + assert.Equal(t, condition.Status, metav1.ConditionFalse) + assert.Equal(t, condition.Reason, "Invalid") + assert.Assert(t, cmp.Contains(condition.Message, "cannot be resized")) } - t.Run("NoExpansionNoResize", func(t *testing.T) { - pvc := setup(t, false) - - // Not able to shrink the storage request. - pvc.Spec.Resources.Requests[corev1.ResourceStorage] = resource.MustParse("1Gi") - - err := cc.Update(ctx, pvc) - assert.Assert(t, apierrors.IsInvalid(err), "expected Invalid, got\n%#v", err) - assert.ErrorContains(t, err, "less than previous") - assert.ErrorContains(t, err, pvc.Name, "expected mention of the object") - - status := apiErrorStatus(t, err) - assert.Assert(t, status.Details != nil) - assert.Assert(t, len(status.Details.Causes) != 0) - assert.Equal(t, status.Details.Causes[0].Field, "spec.resources.requests.storage") - assert.Equal(t, status.Details.Causes[0].Type, metav1.CauseType(field.ErrorTypeForbidden)) - - assert.NilError(t, reconciler.handlePersistentVolumeClaimError(cluster, err)) - - // Not able to grow the storage request. - pvc.Spec.Resources.Requests[corev1.ResourceStorage] = resource.MustParse("4Gi") - - err = cc.Update(ctx, pvc) - assert.Assert(t, apierrors.IsForbidden(err), "expected Forbidden, got\n%#v", err) - assert.ErrorContains(t, err, "only dynamic") - assert.ErrorContains(t, err, pvc.Name, "expected mention of the object") - - assert.NilError(t, reconciler.handlePersistentVolumeClaimError(cluster, err)) - }) - - t.Run("ExpansionNoShrink", func(t *testing.T) { - if base.AllowVolumeExpansion == nil || !*base.AllowVolumeExpansion { - t.Skip("requires a default storage class that allows expansion") - } - - // Not able to shrink the storage request. - pvc := setup(t, true) - pvc.Spec.Resources.Requests[corev1.ResourceStorage] = resource.MustParse("1Gi") - - err := cc.Update(ctx, pvc) - assert.Assert(t, apierrors.IsInvalid(err), "expected Invalid, got\n%#v", err) - assert.ErrorContains(t, err, "less than previous") - assert.ErrorContains(t, err, pvc.Name, "expected mention of the object") - - status := apiErrorStatus(t, err) - assert.Assert(t, status.Details != nil) - assert.Assert(t, len(status.Details.Causes) != 0) - assert.Equal(t, status.Details.Causes[0].Field, "spec.resources.requests.storage") - assert.Equal(t, status.Details.Causes[0].Type, metav1.CauseType(field.ErrorTypeForbidden)) - - assert.NilError(t, reconciler.handlePersistentVolumeClaimError(cluster, err)) - }) - - t.Run("ExpansionResizeConditions", func(t *testing.T) { - if base.AllowVolumeExpansion == nil || !*base.AllowVolumeExpansion { - t.Skip("requires a default storage class that allows expansion") - } - - pvc := setup(t, true) - pvc.Spec.Resources.Requests[corev1.ResourceStorage] = resource.MustParse("4Gi") - assert.NilError(t, cc.Update(ctx, pvc)) - - var condition *corev1.PersistentVolumeClaimCondition - - // When the resize controller sees that `spec.resources != status.capacity`, - // it sets a "Resizing" condition and invokes the storage provider. - // The provider could work very quickly and we miss the condition. - // NOTE(cbandy): The oldest KEP talks about "ResizeStarted", but - // that changed to "Resizing" during the merge to Kubernetes v1.8. - // - https://git.k8s.io/enhancements/keps/sig-storage/284-enable-volume-expansion - // - https://pr.k8s.io/49727#discussion_r136678508 - assert.NilError(t, wait.PollImmediate(time.Second, Scale(10*time.Second), func() (bool, error) { - err := cc.Get(ctx, client.ObjectKeyFromObject(pvc), pvc) - for i := range pvc.Status.Conditions { - if pvc.Status.Conditions[i].Type == corev1.PersistentVolumeClaimResizing { - condition = &pvc.Status.Conditions[i] - } - } - return condition != nil || - equality.Semantic.DeepEqual(pvc.Spec.Resources, pvc.Status.Capacity), err - }), "expected Resizing, got %+v", pvc.Status) - - if condition != nil { - assert.Equal(t, condition.Status, corev1.ConditionTrue, - "expected Resizing, got %+v", condition) - } - - // Kubernetes v1.10 added the "FileSystemResizePending" condition - // to indicate when the storage provider has finished its work. - // When a CSI implementation indicates that it performed the - // *entire* resize, this condition does not appear. - // - https://pr.k8s.io/58415 - // - https://git.k8s.io/enhancements/keps/sig-storage/556-csi-volume-resizing - assert.NilError(t, wait.PollImmediate(time.Second, Scale(30*time.Second), func() (bool, error) { - err := cc.Get(ctx, client.ObjectKeyFromObject(pvc), pvc) - for i := range pvc.Status.Conditions { - if pvc.Status.Conditions[i].Type == corev1.PersistentVolumeClaimFileSystemResizePending { - condition = &pvc.Status.Conditions[i] - } - } - return condition != nil || - equality.Semantic.DeepEqual(pvc.Spec.Resources, pvc.Status.Capacity), err - }), "expected FileSystemResizePending, got %+v", pvc.Status) - - if condition != nil { - assert.Equal(t, condition.Status, corev1.ConditionTrue, - "expected FileSystemResizePending, got %+v", condition) - } - - // Kubernetes v1.15 ("ExpandInUsePersistentVolumes" feature gate) - // will finish the resize of mounted and writable PVCs that have - // the "FileSystemResizePending" condition. When the work is done, - // the condition is removed and `spec.resources == status.capacity`. - // - https://git.k8s.io/enhancements/keps/sig-storage/531-online-pv-resizing - - // A future version of Kubernetes will allow `spec.resources` to - // shrink so long as it is greater than `status.capacity`. - // - https://git.k8s.io/enhancements/keps/sig-storage/1790-recover-resize-failure - }) + for _, event := range recorder.Events { + assert.Equal(t, event.Type, "Warning") + assert.Equal(t, event.Reason, "PersistentVolumeError") + assert.Assert(t, cmp.Contains(event.Note, "PersistentVolumeClaim")) + assert.Assert(t, cmp.Contains(event.Note, "my-static-pvc")) + assert.Assert(t, cmp.Contains(event.Note, "not be less")) + assert.DeepEqual(t, event.Regarding, corev1.ObjectReference{ + APIVersion: v1beta1.GroupVersion.Identifier(), + Kind: "PostgresCluster", + Namespace: "ns1", Name: "pg2", + }) + } }) }) } @@ -497,11 +266,11 @@ func TestGetPVCNameMethods(t *testing.T) { naming.LabelCluster: cluster.Name, }, }, - Spec: v1.PersistentVolumeClaimSpec{ - AccessModes: []v1.PersistentVolumeAccessMode{ + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ "ReadWriteMany", }, - Resources: corev1.ResourceRequirements{ + Resources: corev1.VolumeResourceRequirements{ Requests: corev1.ResourceList{ corev1.ResourceStorage: resource.MustParse("1Gi"), }, @@ -526,7 +295,7 @@ func TestGetPVCNameMethods(t *testing.T) { naming.LabelInstance: "testinstance1-abcd", naming.LabelRole: naming.RolePostgresWAL, } - clusterVolumes := []v1.PersistentVolumeClaim{*pgDataPVC, *walPVC} + clusterVolumes := []corev1.PersistentVolumeClaim{*pgDataPVC, *walPVC} repoPVC1 := pvc.DeepCopy() repoPVC1.Name = "testrepovol1" @@ -536,7 +305,7 @@ func TestGetPVCNameMethods(t *testing.T) { naming.LabelPGBackRestRepo: "testrepo1", naming.LabelPGBackRestRepoVolume: "", } - repoPVCs := []*v1.PersistentVolumeClaim{repoPVC1} + repoPVCs := []*corev1.PersistentVolumeClaim{repoPVC1} repoPVC2 := pvc.DeepCopy() repoPVC2.Name = "testrepovol2" @@ -601,3 +370,549 @@ func TestGetPVCNameMethods(t *testing.T) { assert.DeepEqual(t, getRepoPVCNames(cluster, repoPVCs2), expectedMap) }) } + +func TestReconcileConfigureExistingPVCs(t *testing.T) { + ctx := context.Background() + _, tClient := setupKubernetes(t) + require.ParallelCapacity(t, 1) + + r := &Reconciler{Client: tClient, Owner: client.FieldOwner(t.Name())} + + ns := setupNamespace(t, tClient) + cluster := &v1beta1.PostgresCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testcluster", + Namespace: ns.GetName(), + }, + Spec: v1beta1.PostgresClusterSpec{ + PostgresVersion: 13, + Image: "example.com/crunchy-postgres-ha:test", + DataSource: &v1beta1.DataSource{ + Volumes: &v1beta1.DataSourceVolumes{}, + }, + InstanceSets: []v1beta1.PostgresInstanceSetSpec{{ + Name: "instance1", + DataVolumeClaimSpec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteMany}, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1Gi"), + }, + }, + }, + }}, + Backups: v1beta1.Backups{ + PGBackRest: v1beta1.PGBackRestArchive{ + Image: "example.com/crunchy-pgbackrest:test", + Repos: []v1beta1.PGBackRestRepo{{ + Name: "repo1", + Volume: &v1beta1.RepoPVC{ + VolumeClaimSpec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteMany}, + Resources: corev1.VolumeResourceRequirements{ + Requests: map[corev1.ResourceName]resource. + Quantity{ + corev1.ResourceStorage: resource. + MustParse("1Gi"), + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + // create base PostgresCluster + assert.NilError(t, tClient.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, tClient.Delete(ctx, cluster)) }) + + t.Run("existing pgdata volume", func(t *testing.T) { + volume := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pgdatavolume", + Namespace: cluster.Namespace, + Labels: map[string]string{ + "somelabel": "labelvalue-pgdata", + }, + }, + Spec: cluster.Spec.InstanceSets[0].DataVolumeClaimSpec, + } + + assert.NilError(t, tClient.Create(ctx, volume)) + + // add the pgData PVC name to the CRD + cluster.Spec.DataSource.Volumes. + PGDataVolume = &v1beta1.DataSourceVolume{ + PVCName: "pgdatavolume", + } + + clusterVolumes, err := r.observePersistentVolumeClaims(ctx, cluster) + assert.NilError(t, err) + // check that created volume does not show up in observed volumes since + // it does not have appropriate labels + assert.Assert(t, len(clusterVolumes) == 0) + + clusterVolumes, err = r.configureExistingPVCs(ctx, cluster, + clusterVolumes) + assert.NilError(t, err) + + // now, check that the label volume is returned + assert.Assert(t, len(clusterVolumes) == 1) + + // observe again, but allow time for the change to be observed + err = wait.PollUntilContextTimeout(ctx, time.Second/2, Scale(time.Second*15), false, func(ctx context.Context) (bool, error) { + clusterVolumes, err = r.observePersistentVolumeClaims(ctx, cluster) + return len(clusterVolumes) == 1, err + }) + assert.NilError(t, err) + // check that created volume is now in the list + assert.Assert(t, len(clusterVolumes) == 1) + + // validate the expected labels are in place + // expected volume labels, plus the original label + expected := map[string]string{ + naming.LabelCluster: cluster.Name, + naming.LabelInstanceSet: cluster.Spec.InstanceSets[0].Name, + naming.LabelInstance: cluster.Status.StartupInstance, + naming.LabelRole: naming.RolePostgresData, + naming.LabelData: naming.DataPostgres, + "somelabel": "labelvalue-pgdata", + } + + // ensure volume is found and labeled correctly + var found bool + for i := range clusterVolumes { + if clusterVolumes[i].Name == cluster.Spec.DataSource.Volumes. + PGDataVolume.PVCName { + found = true + assert.DeepEqual(t, expected, clusterVolumes[i].Labels) + } + } + assert.Assert(t, found) + }) + + t.Run("existing pg_wal volume", func(t *testing.T) { + pgWALVolume := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pgwalvolume", + Namespace: cluster.Namespace, + Labels: map[string]string{ + "somelabel": "labelvalue-pgwal", + }, + }, + Spec: cluster.Spec.InstanceSets[0].DataVolumeClaimSpec, + } + + assert.NilError(t, tClient.Create(ctx, pgWALVolume)) + + // add the pg_wal PVC name to the CRD + cluster.Spec.DataSource.Volumes.PGWALVolume = + &v1beta1.DataSourceVolume{ + PVCName: "pgwalvolume", + } + + clusterVolumes, err := r.observePersistentVolumeClaims(ctx, cluster) + assert.NilError(t, err) + // check that created pgwal volume does not show up in observed volumes + // since it does not have appropriate labels, only the previously created + // pgdata volume should be in the observed list + assert.Assert(t, len(clusterVolumes) == 1) + + clusterVolumes, err = r.configureExistingPVCs(ctx, cluster, + clusterVolumes) + assert.NilError(t, err) + + // now, check that the label volume is returned + assert.Assert(t, len(clusterVolumes) == 2) + + // observe again, but allow time for the change to be observed + err = wait.PollUntilContextTimeout(ctx, time.Second/2, Scale(time.Second*15), false, func(ctx context.Context) (bool, error) { + clusterVolumes, err = r.observePersistentVolumeClaims(ctx, cluster) + return len(clusterVolumes) == 2, err + }) + assert.NilError(t, err) + // check that created volume is now in the list + assert.Assert(t, len(clusterVolumes) == 2) + + // validate the expected labels are in place + // expected volume labels, plus the original label + expected := map[string]string{ + naming.LabelCluster: cluster.Name, + naming.LabelInstanceSet: cluster.Spec.InstanceSets[0].Name, + naming.LabelInstance: cluster.Status.StartupInstance, + naming.LabelRole: naming.RolePostgresWAL, + naming.LabelData: naming.DataPostgres, + "somelabel": "labelvalue-pgwal", + } + + // ensure volume is found and labeled correctly + var found bool + for i := range clusterVolumes { + if clusterVolumes[i].Name == cluster.Spec.DataSource.Volumes. + PGWALVolume.PVCName { + found = true + assert.DeepEqual(t, expected, clusterVolumes[i].Labels) + } + } + assert.Assert(t, found) + }) + + t.Run("existing repo volume", func(t *testing.T) { + volume := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "repovolume", + Namespace: cluster.Namespace, + Labels: map[string]string{ + "somelabel": "labelvalue-repo", + }, + }, + Spec: cluster.Spec.InstanceSets[0].DataVolumeClaimSpec, + } + + assert.NilError(t, tClient.Create(ctx, volume)) + + // add the pgBackRest repo PVC name to the CRD + cluster.Spec.DataSource.Volumes.PGBackRestVolume = + &v1beta1.DataSourceVolume{ + PVCName: "repovolume", + } + + clusterVolumes, err := r.observePersistentVolumeClaims(ctx, cluster) + assert.NilError(t, err) + // check that created volume does not show up in observed volumes since + // it does not have appropriate labels + // check that created pgBackRest repo volume does not show up in observed + // volumes since it does not have appropriate labels, only the previously + // created pgdata and pg_wal volumes should be in the observed list + assert.Assert(t, len(clusterVolumes) == 2) + + clusterVolumes, err = r.configureExistingPVCs(ctx, cluster, + clusterVolumes) + assert.NilError(t, err) + + // now, check that the label volume is returned + assert.Assert(t, len(clusterVolumes) == 3) + + // observe again, but allow time for the change to be observed + err = wait.PollUntilContextTimeout(ctx, time.Second/2, Scale(time.Second*15), false, func(ctx context.Context) (bool, error) { + clusterVolumes, err = r.observePersistentVolumeClaims(ctx, cluster) + return len(clusterVolumes) == 3, err + }) + assert.NilError(t, err) + // check that created volume is now in the list + assert.Assert(t, len(clusterVolumes) == 3) + + // validate the expected labels are in place + // expected volume labels, plus the original label + expected := map[string]string{ + naming.LabelCluster: cluster.Name, + naming.LabelData: naming.DataPGBackRest, + naming.LabelPGBackRest: "", + naming.LabelPGBackRestRepo: "repo1", + naming.LabelPGBackRestRepoVolume: "", + "somelabel": "labelvalue-repo", + } + + // ensure volume is found and labeled correctly + var found bool + for i := range clusterVolumes { + if clusterVolumes[i].Name == cluster.Spec.DataSource.Volumes. + PGBackRestVolume.PVCName { + found = true + assert.DeepEqual(t, expected, clusterVolumes[i].Labels) + } + } + assert.Assert(t, found) + }) +} + +func TestReconcileMoveDirectories(t *testing.T) { + ctx := context.Background() + _, tClient := setupKubernetes(t) + require.ParallelCapacity(t, 1) + + r := &Reconciler{Client: tClient, Owner: client.FieldOwner(t.Name())} + + ns := setupNamespace(t, tClient) + cluster := &v1beta1.PostgresCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testcluster", + Namespace: ns.GetName(), + }, + Spec: v1beta1.PostgresClusterSpec{ + PostgresVersion: 13, + Image: "example.com/crunchy-postgres-ha:test", + ImagePullPolicy: corev1.PullAlways, + ImagePullSecrets: []corev1.LocalObjectReference{{ + Name: "test-secret", + }}, + DataSource: &v1beta1.DataSource{ + Volumes: &v1beta1.DataSourceVolumes{ + PGDataVolume: &v1beta1.DataSourceVolume{ + PVCName: "testpgdata", + Directory: "testpgdatadir", + }, + PGWALVolume: &v1beta1.DataSourceVolume{ + PVCName: "testwal", + Directory: "testwaldir", + }, + PGBackRestVolume: &v1beta1.DataSourceVolume{ + PVCName: "testrepo", + Directory: "testrepodir", + }, + }, + }, + InstanceSets: []v1beta1.PostgresInstanceSetSpec{{ + Name: "instance1", + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1m"), + }, + }, + PriorityClassName: initialize.String("some-priority-class"), + DataVolumeClaimSpec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteMany}, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1Gi"), + }, + }, + }, + }}, + Backups: v1beta1.Backups{ + PGBackRest: v1beta1.PGBackRestArchive{ + Image: "example.com/crunchy-pgbackrest:test", + RepoHost: &v1beta1.PGBackRestRepoHost{ + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1m"), + }, + }, + PriorityClassName: initialize.String("some-priority-class"), + }, + Repos: []v1beta1.PGBackRestRepo{{ + Name: "repo1", + Volume: &v1beta1.RepoPVC{ + VolumeClaimSpec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteMany}, + Resources: corev1.VolumeResourceRequirements{ + Requests: map[corev1.ResourceName]resource. + Quantity{ + corev1.ResourceStorage: resource. + MustParse("1Gi"), + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + // create PostgresCluster + assert.NilError(t, tClient.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, tClient.Delete(ctx, cluster)) }) + + returnEarly, err := r.reconcileDirMoveJobs(ctx, cluster) + assert.NilError(t, err) + // returnEarly will initially be true because the Jobs will not have + // completed yet + assert.Assert(t, returnEarly) + + moveJobs := &batchv1.JobList{} + err = r.Client.List(ctx, moveJobs, &client.ListOptions{ + Namespace: cluster.Namespace, + LabelSelector: naming.DirectoryMoveJobLabels(cluster.Name).AsSelector(), + }) + assert.NilError(t, err) + + t.Run("check pgdata move job pod spec", func(t *testing.T) { + + for i := range moveJobs.Items { + if moveJobs.Items[i].Name == "testcluster-move-pgdata-dir" { + compare := ` +automountServiceAccountToken: false +containers: +- command: + - bash + - -ceu + - "echo \"Preparing cluster testcluster volumes for PGO v5.x\"\n echo \"pgdata_pvc=testpgdata\"\n + \ echo \"Current PG data directory volume contents:\" \n ls -lh \"/pgdata\"\n + \ echo \"Now updating PG data directory...\"\n [ -d \"/pgdata/testpgdatadir\" + ] && mv \"/pgdata/testpgdatadir\" \"/pgdata/pg13_bootstrap\"\n rm -f \"/pgdata/pg13/patroni.dynamic.json\"\n + \ echo \"Updated PG data directory contents:\" \n ls -lh \"/pgdata\"\n echo + \"PG Data directory preparation complete\"\n " + image: example.com/crunchy-postgres-ha:test + imagePullPolicy: Always + name: pgdata-move-job + resources: + requests: + cpu: 1m + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /pgdata + name: postgres-data +dnsPolicy: ClusterFirst +enableServiceLinks: false +imagePullSecrets: +- name: test-secret +priorityClassName: some-priority-class +restartPolicy: Never +schedulerName: default-scheduler +securityContext: + fsGroup: 26 + fsGroupChangePolicy: OnRootMismatch +terminationGracePeriodSeconds: 30 +volumes: +- name: postgres-data + persistentVolumeClaim: + claimName: testpgdata + ` + + assert.Assert(t, cmp.MarshalMatches(moveJobs.Items[i].Spec.Template.Spec, compare+"\n")) + } + } + + }) + + t.Run("check pgwal move job pod spec", func(t *testing.T) { + + for i := range moveJobs.Items { + if moveJobs.Items[i].Name == "testcluster-move-pgwal-dir" { + compare := ` +automountServiceAccountToken: false +containers: +- command: + - bash + - -ceu + - "echo \"Preparing cluster testcluster volumes for PGO v5.x\"\n echo \"pg_wal_pvc=testwal\"\n + \ echo \"Current PG WAL directory volume contents:\"\n ls -lh \"/pgwal\"\n + \ echo \"Now updating PG WAL directory...\"\n [ -d \"/pgwal/testwaldir\" + ] && mv \"/pgwal/testwaldir\" \"/pgwal/testcluster-wal\"\n echo \"Updated PG + WAL directory contents:\"\n ls -lh \"/pgwal\"\n echo \"PG WAL directory + preparation complete\"\n " + image: example.com/crunchy-postgres-ha:test + imagePullPolicy: Always + name: pgwal-move-job + resources: + requests: + cpu: 1m + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /pgwal + name: postgres-wal +dnsPolicy: ClusterFirst +enableServiceLinks: false +imagePullSecrets: +- name: test-secret +priorityClassName: some-priority-class +restartPolicy: Never +schedulerName: default-scheduler +securityContext: + fsGroup: 26 + fsGroupChangePolicy: OnRootMismatch +terminationGracePeriodSeconds: 30 +volumes: +- name: postgres-wal + persistentVolumeClaim: + claimName: testwal + ` + + assert.Assert(t, cmp.MarshalMatches(moveJobs.Items[i].Spec.Template.Spec, compare+"\n")) + } + } + + }) + + t.Run("check repo move job pod spec", func(t *testing.T) { + + for i := range moveJobs.Items { + if moveJobs.Items[i].Name == "testcluster-move-pgbackrest-repo-dir" { + compare := ` +automountServiceAccountToken: false +containers: +- command: + - bash + - -ceu + - "echo \"Preparing cluster testcluster pgBackRest repo volume for PGO v5.x\"\n + \ echo \"repo_pvc=testrepo\"\n echo \"pgbackrest directory:\"\n ls -lh + /pgbackrest\n echo \"Current pgBackRest repo directory volume contents:\" \n + \ ls -lh \"/pgbackrest/testrepodir\"\n echo \"Now updating repo directory...\"\n + \ [ -d \"/pgbackrest/testrepodir\" ] && mv -t \"/pgbackrest/\" \"/pgbackrest/testrepodir/archive\"\n + \ [ -d \"/pgbackrest/testrepodir\" ] && mv -t \"/pgbackrest/\" \"/pgbackrest/testrepodir/backup\"\n + \ echo \"Updated /pgbackrest directory contents:\"\n ls -lh \"/pgbackrest\"\n + \ echo \"Repo directory preparation complete\"\n " + image: example.com/crunchy-pgbackrest:test + imagePullPolicy: Always + name: repo-move-job + resources: + requests: + cpu: 1m + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /pgbackrest + name: pgbackrest-repo +dnsPolicy: ClusterFirst +enableServiceLinks: false +imagePullSecrets: +- name: test-secret +priorityClassName: some-priority-class +restartPolicy: Never +schedulerName: default-scheduler +securityContext: + fsGroup: 26 + fsGroupChangePolicy: OnRootMismatch +terminationGracePeriodSeconds: 30 +volumes: +- name: pgbackrest-repo + persistentVolumeClaim: + claimName: testrepo + ` + assert.Assert(t, cmp.MarshalMatches(moveJobs.Items[i].Spec.Template.Spec, compare+"\n")) + } + } + + }) +} diff --git a/internal/controller/postgrescluster/watches.go b/internal/controller/postgrescluster/watches.go index 5f4aba14dc..0b5ba5fa87 100644 --- a/internal/controller/postgrescluster/watches.go +++ b/internal/controller/postgrescluster/watches.go @@ -1,21 +1,12 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster import ( + "context" + "k8s.io/client-go/util/workqueue" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" @@ -29,7 +20,7 @@ import ( // watchPods returns a handler.EventHandler for Pods. func (*Reconciler) watchPods() handler.Funcs { return handler.Funcs{ - UpdateFunc: func(e event.UpdateEvent, q workqueue.RateLimitingInterface) { + UpdateFunc: func(ctx context.Context, e event.UpdateEvent, q workqueue.RateLimitingInterface) { labels := e.ObjectNew.GetLabels() cluster := labels[naming.LabelCluster] @@ -43,6 +34,42 @@ func (*Reconciler) watchPods() handler.Funcs { Namespace: e.ObjectNew.GetNamespace(), Name: cluster, }}) + return + } + + // Queue an event when a Patroni pod indicates it needs to restart + // or finished restarting. + if len(cluster) != 0 && + (patroni.PodRequiresRestart(e.ObjectOld) || + patroni.PodRequiresRestart(e.ObjectNew)) { + q.Add(reconcile.Request{NamespacedName: client.ObjectKey{ + Namespace: e.ObjectNew.GetNamespace(), + Name: cluster, + }}) + return + } + + // Queue an event to start applying changes if the PostgreSQL instance + // now has the "master" role. + if len(cluster) != 0 && + !patroni.PodIsPrimary(e.ObjectOld) && + patroni.PodIsPrimary(e.ObjectNew) { + q.Add(reconcile.Request{NamespacedName: client.ObjectKey{ + Namespace: e.ObjectNew.GetNamespace(), + Name: cluster, + }}) + return + } + + oldAnnotations := e.ObjectOld.GetAnnotations() + newAnnotations := e.ObjectNew.GetAnnotations() + // If the suggested-pgdata-pvc-size annotation is added or changes, reconcile. + if len(cluster) != 0 && oldAnnotations["suggested-pgdata-pvc-size"] != newAnnotations["suggested-pgdata-pvc-size"] { + q.Add(reconcile.Request{NamespacedName: client.ObjectKey{ + Namespace: e.ObjectNew.GetNamespace(), + Name: cluster, + }}) + return } }, } diff --git a/internal/controller/postgrescluster/watches_test.go b/internal/controller/postgrescluster/watches_test.go index 7ba2a5b852..fdea498862 100644 --- a/internal/controller/postgrescluster/watches_test.go +++ b/internal/controller/postgrescluster/watches_test.go @@ -1,21 +1,11 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster import ( + "context" "testing" "gotest.tools/v3/assert" @@ -28,21 +18,22 @@ import ( ) func TestWatchPodsUpdate(t *testing.T) { - queue := controllertest.Queue{Interface: workqueue.New()} + ctx := context.Background() + queue := &controllertest.Queue{Interface: workqueue.New()} reconciler := &Reconciler{} update := reconciler.watchPods().UpdateFunc assert.Assert(t, update != nil) // No metadata; no reconcile. - update(event.UpdateEvent{ + update(ctx, event.UpdateEvent{ ObjectOld: &corev1.Pod{}, ObjectNew: &corev1.Pod{}, }, queue) assert.Equal(t, queue.Len(), 0) - // Cluster label, but not Patroni standby leader; no reconcile. - update(event.UpdateEvent{ + // Cluster label, but nothing else; no reconcile. + update(ctx, event.UpdateEvent{ ObjectOld: &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ @@ -61,7 +52,7 @@ func TestWatchPodsUpdate(t *testing.T) { assert.Equal(t, queue.Len(), 0) // Cluster standby leader changed; one reconcile by label. - update(event.UpdateEvent{ + update(ctx, event.UpdateEvent{ ObjectOld: &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ @@ -89,4 +80,105 @@ func TestWatchPodsUpdate(t *testing.T) { expected.Namespace = "some-ns" expected.Name = "starfish" assert.Equal(t, item, expected) + queue.Done(item) + + t.Run("PendingRestart", func(t *testing.T) { + expected := reconcile.Request{} + expected.Namespace = "some-ns" + expected.Name = "starfish" + + base := &corev1.Pod{} + base.Namespace = "some-ns" + base.Labels = map[string]string{ + "postgres-operator.crunchydata.com/cluster": "starfish", + } + + pending := base.DeepCopy() + pending.Annotations = map[string]string{ + "status": `{"pending_restart":true}`, + } + + // Newly pending; one reconcile by label. + update(ctx, event.UpdateEvent{ + ObjectOld: base.DeepCopy(), + ObjectNew: pending.DeepCopy(), + }, queue) + assert.Equal(t, queue.Len(), 1, "expected one reconcile") + + item, _ := queue.Get() + assert.Equal(t, item, expected) + queue.Done(item) + + // Still pending; one reconcile by label. + update(ctx, event.UpdateEvent{ + ObjectOld: pending.DeepCopy(), + ObjectNew: pending.DeepCopy(), + }, queue) + assert.Equal(t, queue.Len(), 1, "expected one reconcile") + + item, _ = queue.Get() + assert.Equal(t, item, expected) + queue.Done(item) + + // No longer pending; one reconcile by label. + update(ctx, event.UpdateEvent{ + ObjectOld: pending.DeepCopy(), + ObjectNew: base.DeepCopy(), + }, queue) + assert.Equal(t, queue.Len(), 1, "expected one reconcile") + + item, _ = queue.Get() + assert.Equal(t, item, expected) + queue.Done(item) + }) + + // Pod annotation with arbitrary key; no reconcile. + update(ctx, event.UpdateEvent{ + ObjectOld: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "clortho": "vince", + }, + Labels: map[string]string{ + "postgres-operator.crunchydata.com/cluster": "starfish", + }, + }, + }, + ObjectNew: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "clortho": "vin", + }, + Labels: map[string]string{ + "postgres-operator.crunchydata.com/cluster": "starfish", + }, + }, + }, + }, queue) + assert.Equal(t, queue.Len(), 0) + + // Pod annotation with suggested-pgdata-pvc-size; reconcile. + update(ctx, event.UpdateEvent{ + ObjectOld: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "suggested-pgdata-pvc-size": "5000Mi", + }, + Labels: map[string]string{ + "postgres-operator.crunchydata.com/cluster": "starfish", + }, + }, + }, + ObjectNew: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "suggested-pgdata-pvc-size": "8000Mi", + }, + Labels: map[string]string{ + "postgres-operator.crunchydata.com/cluster": "starfish", + }, + }, + }, + }, queue) + assert.Equal(t, queue.Len(), 1) } diff --git a/internal/controller/runtime/client.go b/internal/controller/runtime/client.go new file mode 100644 index 0000000000..4cc05c9835 --- /dev/null +++ b/internal/controller/runtime/client.go @@ -0,0 +1,76 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package runtime + +import ( + "context" + + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// Types that implement single methods of the [client.Reader] interface. +type ( + ClientGet func(context.Context, client.ObjectKey, client.Object, ...client.GetOption) error + ClientList func(context.Context, client.ObjectList, ...client.ListOption) error +) + +// ClientReader implements [client.Reader] by composing assignable functions. +type ClientReader struct { + ClientGet + ClientList +} + +var _ client.Reader = ClientReader{} + +// Types that implement single methods of the [client.Writer] interface. +type ( + ClientCreate func(context.Context, client.Object, ...client.CreateOption) error + ClientDelete func(context.Context, client.Object, ...client.DeleteOption) error + ClientPatch func(context.Context, client.Object, client.Patch, ...client.PatchOption) error + ClientDeleteAll func(context.Context, client.Object, ...client.DeleteAllOfOption) error + ClientUpdate func(context.Context, client.Object, ...client.UpdateOption) error +) + +// ClientWriter implements [client.Writer] by composing assignable functions. +type ClientWriter struct { + ClientCreate + ClientDelete + ClientDeleteAll + ClientPatch + ClientUpdate +} + +var _ client.Writer = ClientWriter{} + +// NOTE: The following implementations can go away following https://go.dev/issue/47487. +// The function types above would become single-method interfaces. + +func (fn ClientCreate) Create(ctx context.Context, obj client.Object, opts ...client.CreateOption) error { + return fn(ctx, obj, opts...) +} + +func (fn ClientDelete) Delete(ctx context.Context, obj client.Object, opts ...client.DeleteOption) error { + return fn(ctx, obj, opts...) +} + +func (fn ClientDeleteAll) DeleteAllOf(ctx context.Context, obj client.Object, opts ...client.DeleteAllOfOption) error { + return fn(ctx, obj, opts...) +} + +func (fn ClientGet) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { + return fn(ctx, key, obj, opts...) +} + +func (fn ClientList) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + return fn(ctx, list, opts...) +} + +func (fn ClientPatch) Patch(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + return fn(ctx, obj, patch, opts...) +} + +func (fn ClientUpdate) Update(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { + return fn(ctx, obj, opts...) +} diff --git a/internal/controller/postgrescluster/pod_client.go b/internal/controller/runtime/pod_client.go similarity index 53% rename from internal/controller/postgrescluster/pod_client.go rename to internal/controller/runtime/pod_client.go index b3b2109b87..e842601aa7 100644 --- a/internal/controller/postgrescluster/pod_client.go +++ b/internal/controller/runtime/pod_client.go @@ -1,24 +1,14 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package postgrescluster +package runtime import ( + "context" "io" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" @@ -29,29 +19,33 @@ import ( // podExecutor runs command on container in pod in namespace. Non-nil streams // (stdin, stdout, and stderr) are attached the to the remote process. type podExecutor func( - namespace, pod, container string, + ctx context.Context, namespace, pod, container string, stdin io.Reader, stdout, stderr io.Writer, command ...string, ) error func newPodClient(config *rest.Config) (rest.Interface, error) { codecs := serializer.NewCodecFactory(scheme.Scheme) - gvk, _ := apiutil.GVKForObject(&v1.Pod{}, scheme.Scheme) - return apiutil.RESTClientForGVK(gvk, false, config, codecs) + gvk, _ := apiutil.GVKForObject(&corev1.Pod{}, scheme.Scheme) + httpClient, err := rest.HTTPClientFor(config) + if err != nil { + return nil, err + } + return apiutil.RESTClientForGVK(gvk, false, config, codecs, httpClient) } -// +kubebuilder:rbac:groups="",resources=pods/exec,verbs=create +// +kubebuilder:rbac:groups="",resources="pods/exec",verbs={create} -func newPodExecutor(config *rest.Config) (podExecutor, error) { +func NewPodExecutor(config *rest.Config) (podExecutor, error) { client, err := newPodClient(config) return func( - namespace, pod, container string, + ctx context.Context, namespace, pod, container string, stdin io.Reader, stdout, stderr io.Writer, command ...string, ) error { request := client.Post(). Resource("pods").SubResource("exec"). Namespace(namespace).Name(pod). - VersionedParams(&v1.PodExecOptions{ + VersionedParams(&corev1.PodExecOptions{ Container: container, Command: command, Stdin: stdin != nil, @@ -62,7 +56,7 @@ func newPodExecutor(config *rest.Config) (podExecutor, error) { exec, err := remotecommand.NewSPDYExecutor(config, "POST", request.URL()) if err == nil { - err = exec.Stream(remotecommand.StreamOptions{ + err = exec.StreamWithContext(ctx, remotecommand.StreamOptions{ Stdin: stdin, Stdout: stdout, Stderr: stderr, diff --git a/internal/controller/runtime/reconcile.go b/internal/controller/runtime/reconcile.go new file mode 100644 index 0000000000..a2196d1626 --- /dev/null +++ b/internal/controller/runtime/reconcile.go @@ -0,0 +1,69 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package runtime + +import ( + "time" + + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +// ErrorWithBackoff returns a Result and error that indicate a non-nil err +// should be logged and measured and its [reconcile.Request] should be retried +// later. When err is nil, nothing is logged and the Request is not retried. +// When err unwraps to [reconcile.TerminalError], the Request is not retried. +func ErrorWithBackoff(err error) (reconcile.Result, error) { + // Result should be zero to avoid warning messages. + return reconcile.Result{}, err + + // When error is not nil and not a TerminalError, the controller-runtime Controller + // puts [reconcile.Request] back into the workqueue using AddRateLimited. + // - https://github.com/kubernetes-sigs/controller-runtime/blob/v0.18.4/pkg/internal/controller/controller.go#L317 + // - https://pkg.go.dev/k8s.io/client-go/util/workqueue#RateLimitingInterface +} + +// ErrorWithoutBackoff returns a Result and error that indicate a non-nil err +// should be logged and measured without retrying its [reconcile.Request]. +// When err is nil, nothing is logged and the Request is not retried. +func ErrorWithoutBackoff(err error) (reconcile.Result, error) { + if err != nil { + err = reconcile.TerminalError(err) + } + + // Result should be zero to avoid warning messages. + return reconcile.Result{}, err + + // When error is a TerminalError, the controller-runtime Controller increments + // a counter rather than interact with the workqueue. + // - https://github.com/kubernetes-sigs/controller-runtime/blob/v0.18.4/pkg/internal/controller/controller.go#L314 +} + +// RequeueWithBackoff returns a Result that indicates a [reconcile.Request] +// should be retried later. +func RequeueWithBackoff() reconcile.Result { + return reconcile.Result{Requeue: true} + + // When [reconcile.Result].Requeue is true, the controller-runtime Controller + // puts [reconcile.Request] back into the workqueue using AddRateLimited. + // - https://github.com/kubernetes-sigs/controller-runtime/blob/v0.18.4/pkg/internal/controller/controller.go#L334 + // - https://pkg.go.dev/k8s.io/client-go/util/workqueue#RateLimitingInterface +} + +// RequeueWithoutBackoff returns a Result that indicates a [reconcile.Request] +// should be retried on or before delay. +func RequeueWithoutBackoff(delay time.Duration) reconcile.Result { + // RequeueAfter must be positive to not backoff. + if delay <= 0 { + delay = time.Nanosecond + } + + // RequeueAfter implies Requeue, but set both to remove any ambiguity. + return reconcile.Result{Requeue: true, RequeueAfter: delay} + + // When [reconcile.Result].RequeueAfter is positive, the controller-runtime Controller + // puts [reconcile.Request] back into the workqueue using AddAfter. + // - https://github.com/kubernetes-sigs/controller-runtime/blob/v0.18.4/pkg/internal/controller/controller.go#L325 + // - https://pkg.go.dev/k8s.io/client-go/util/workqueue#DelayingInterface +} diff --git a/internal/controller/runtime/reconcile_test.go b/internal/controller/runtime/reconcile_test.go new file mode 100644 index 0000000000..925b3cf47d --- /dev/null +++ b/internal/controller/runtime/reconcile_test.go @@ -0,0 +1,57 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package runtime + +import ( + "errors" + "testing" + "time" + + "gotest.tools/v3/assert" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +func TestErrorWithBackoff(t *testing.T) { + result, err := ErrorWithBackoff(nil) + assert.Assert(t, result.IsZero()) + assert.NilError(t, err) + + expected := errors.New("doot") + result, err = ErrorWithBackoff(expected) + assert.Assert(t, result.IsZero()) + assert.Equal(t, err, expected) +} + +func TestErrorWithoutBackoff(t *testing.T) { + result, err := ErrorWithoutBackoff(nil) + assert.Assert(t, result.IsZero()) + assert.NilError(t, err) + + expected := errors.New("doot") + result, err = ErrorWithoutBackoff(expected) + assert.Assert(t, result.IsZero()) + assert.Assert(t, errors.Is(err, reconcile.TerminalError(nil))) + assert.Equal(t, errors.Unwrap(err), expected) +} + +func TestRequeueWithBackoff(t *testing.T) { + result := RequeueWithBackoff() + assert.Assert(t, result.Requeue) + assert.Assert(t, result.RequeueAfter == 0) +} + +func TestRequeueWithoutBackoff(t *testing.T) { + result := RequeueWithoutBackoff(0) + assert.Assert(t, result.Requeue) + assert.Assert(t, result.RequeueAfter > 0) + + result = RequeueWithoutBackoff(-1) + assert.Assert(t, result.Requeue) + assert.Assert(t, result.RequeueAfter > 0) + + result = RequeueWithoutBackoff(time.Minute) + assert.Assert(t, result.Requeue) + assert.Equal(t, result.RequeueAfter, time.Minute) +} diff --git a/internal/controller/runtime/runtime.go b/internal/controller/runtime/runtime.go index 153cb07e33..34bfeabf61 100644 --- a/internal/controller/runtime/runtime.go +++ b/internal/controller/runtime/runtime.go @@ -1,86 +1,76 @@ -package runtime - -/* -Copyright 2021 Crunchy Data -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +package runtime import ( - "time" + "context" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client/config" + "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/manager/signals" + "github.com/crunchydata/postgres-operator/internal/logging" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" + + volumesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" ) -// default refresh interval in minutes -var refreshInterval = 60 * time.Minute +type ( + CacheConfig = cache.Config + Manager = manager.Manager + Options = manager.Options +) -// CreateRuntimeManager creates a new controller runtime manager for the PostgreSQL Operator. The -// manager returned is configured specifically for the PostgreSQL Operator, and includes any -// controllers that will be responsible for managing PostgreSQL clusters using the -// 'postgrescluster' custom resource. Additionally, the manager will only watch for resources in -// the namespace specified, with an empty string resulting in the manager watching all namespaces. -func CreateRuntimeManager(namespace string, config *rest.Config, - disableMetrics bool) (manager.Manager, error) { +// Scheme associates standard Kubernetes API objects and PGO API objects with Go structs. +var Scheme *runtime.Scheme = runtime.NewScheme() - pgoScheme, err := CreatePostgresOperatorScheme() - if err != nil { - return nil, err +func init() { + if err := scheme.AddToScheme(Scheme); err != nil { + panic(err) } - - options := manager.Options{ - Namespace: namespace, // if empty then watching all namespaces - SyncPeriod: &refreshInterval, - Scheme: pgoScheme, + if err := v1beta1.AddToScheme(Scheme); err != nil { + panic(err) } - if disableMetrics { - options.MetricsBindAddress = "0" + if err := volumesnapshotv1.AddToScheme(Scheme); err != nil { + panic(err) } - - // create controller runtime manager - mgr, err := manager.New(config, options) - if err != nil { - return nil, err - } - - return mgr, nil } -// GetConfig creates a *rest.Config for talking to a Kubernetes API server. +// GetConfig returns a Kubernetes client configuration from KUBECONFIG or the +// service account Kubernetes gives to pods. func GetConfig() (*rest.Config, error) { return config.GetConfig() } -// CreatePostgresOperatorScheme creates a scheme containing the resource types required by the -// PostgreSQL Operator. This includes any custom resource types specific to the PostgreSQL -// Operator, as well as any standard Kubernetes resource types. -func CreatePostgresOperatorScheme() (*runtime.Scheme, error) { +// NewManager returns a Manager that interacts with the Kubernetes API of config. +// When config is nil, it reads from KUBECONFIG or the local service account. +// When options.Scheme is nil, it uses the Scheme from this package. +func NewManager(config *rest.Config, options manager.Options) (manager.Manager, error) { + var m manager.Manager + var err error - // create a new scheme specifically for this manager - pgoScheme := runtime.NewScheme() + if config == nil { + config, err = GetConfig() + } - // add standard resource types to the scheme - if err := scheme.AddToScheme(pgoScheme); err != nil { - return nil, err + if options.Scheme == nil { + options.Scheme = Scheme } - // add custom resource types to the default scheme - if err := v1beta1.AddToScheme(pgoScheme); err != nil { - return nil, err + if err == nil { + m, err = manager.New(config, options) } - return pgoScheme, nil + return m, err } + +// SetLogger assigns the default Logger used by [sigs.k8s.io/controller-runtime]. +func SetLogger(logger logging.Logger) { log.SetLogger(logger) } + +// SignalHandler returns a Context that is canceled on SIGINT or SIGTERM. +func SignalHandler() context.Context { return signals.SetupSignalHandler() } diff --git a/internal/controller/runtime/ticker.go b/internal/controller/runtime/ticker.go new file mode 100644 index 0000000000..830179eafc --- /dev/null +++ b/internal/controller/runtime/ticker.go @@ -0,0 +1,70 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package runtime + +import ( + "context" + "time" + + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/source" +) + +type ticker struct { + time.Duration + event.GenericEvent + Handler handler.EventHandler + Immediate bool +} + +// NewTicker returns a Source that emits e every d. +func NewTicker(d time.Duration, e event.GenericEvent, + h handler.EventHandler) source.Source { + return &ticker{Duration: d, GenericEvent: e, Handler: h} +} + +// NewTickerImmediate returns a Source that emits e at start and every d. +func NewTickerImmediate(d time.Duration, e event.GenericEvent, + h handler.EventHandler) source.Source { + return &ticker{Duration: d, GenericEvent: e, Handler: h, Immediate: true} +} + +func (t ticker) String() string { return "every " + t.Duration.String() } + +// Start is called by controller-runtime Controller and returns quickly. +// It cleans up when ctx is cancelled. +func (t ticker) Start( + ctx context.Context, q workqueue.RateLimitingInterface, +) error { + ticker := time.NewTicker(t.Duration) + + // Pass t.GenericEvent to h when it is not filtered out by p. + // - https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/source/internal#EventHandler + emit := func() { + t.Handler.Generic(ctx, t.GenericEvent, q) + } + + if t.Immediate { + emit() + } + + // Repeat until ctx is cancelled. + go func() { + defer ticker.Stop() + + for { + select { + case <-ticker.C: + emit() + case <-ctx.Done(): + return + } + } + }() + + return nil +} diff --git a/internal/controller/runtime/ticker_test.go b/internal/controller/runtime/ticker_test.go new file mode 100644 index 0000000000..49cecd79d7 --- /dev/null +++ b/internal/controller/runtime/ticker_test.go @@ -0,0 +1,70 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package runtime + +import ( + "context" + "testing" + "time" + + "gotest.tools/v3/assert" + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" +) + +func TestTickerString(t *testing.T) { + assert.Equal(t, ticker{Duration: time.Millisecond}.String(), "every 1ms") + assert.Equal(t, ticker{Duration: 10 * time.Second}.String(), "every 10s") + assert.Equal(t, ticker{Duration: time.Hour}.String(), "every 1h0m0s") +} + +func TestTicker(t *testing.T) { + t.Parallel() + + var called []event.GenericEvent + expected := event.GenericEvent{Object: new(corev1.ConfigMap)} + + tq := workqueue.NewRateLimitingQueue(workqueue.DefaultItemBasedRateLimiter()) + th := handler.Funcs{GenericFunc: func(ctx context.Context, e event.GenericEvent, q workqueue.RateLimitingInterface) { + called = append(called, e) + + assert.Equal(t, q, tq, "should be called with the queue passed in Start") + }} + + t.Run("NotImmediate", func(t *testing.T) { + called = nil + + ticker := NewTicker(100*time.Millisecond, expected, th) + ctx, cancel := context.WithTimeout(context.Background(), 250*time.Millisecond) + t.Cleanup(cancel) + + // Start the ticker and wait for the deadline to pass. + assert.NilError(t, ticker.Start(ctx, tq)) + <-ctx.Done() + + assert.Equal(t, len(called), 2) + assert.Equal(t, called[0], expected, "expected at 100ms") + assert.Equal(t, called[1], expected, "expected at 200ms") + }) + + t.Run("Immediate", func(t *testing.T) { + called = nil + + ticker := NewTickerImmediate(100*time.Millisecond, expected, th) + ctx, cancel := context.WithTimeout(context.Background(), 250*time.Millisecond) + t.Cleanup(cancel) + + // Start the ticker and wait for the deadline to pass. + assert.NilError(t, ticker.Start(ctx, tq)) + <-ctx.Done() + + assert.Assert(t, len(called) > 2) + assert.Equal(t, called[0], expected, "expected at 0ms") + assert.Equal(t, called[1], expected, "expected at 100ms") + assert.Equal(t, called[2], expected, "expected at 200ms") + }) +} diff --git a/internal/controller/standalone_pgadmin/apply.go b/internal/controller/standalone_pgadmin/apply.go new file mode 100644 index 0000000000..0eaa613df8 --- /dev/null +++ b/internal/controller/standalone_pgadmin/apply.go @@ -0,0 +1,47 @@ +// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package standalone_pgadmin + +import ( + "context" + "reflect" + + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// patch sends patch to object's endpoint in the Kubernetes API and updates +// object with any returned content. The fieldManager is set to r.Owner, but +// can be overridden in options. +// - https://docs.k8s.io/reference/using-api/server-side-apply/#managers +// +// TODO(tjmoore4): This function is duplicated from a version that takes a PostgresCluster object. +func (r *PGAdminReconciler) patch( + ctx context.Context, object client.Object, + patch client.Patch, options ...client.PatchOption, +) error { + options = append([]client.PatchOption{r.Owner}, options...) + return r.Client.Patch(ctx, object, patch, options...) +} + +// apply sends an apply patch to object's endpoint in the Kubernetes API and +// updates object with any returned content. The fieldManager is set to +// r.Owner and the force parameter is true. +// - https://docs.k8s.io/reference/using-api/server-side-apply/#managers +// - https://docs.k8s.io/reference/using-api/server-side-apply/#conflicts +// +// TODO(tjmoore4): This function is duplicated from a version that takes a PostgresCluster object. +func (r *PGAdminReconciler) apply(ctx context.Context, object client.Object) error { + // Generate an apply-patch by comparing the object to its zero value. + zero := reflect.New(reflect.TypeOf(object).Elem()).Interface() + data, err := client.MergeFrom(zero.(client.Object)).Data(object) + apply := client.RawPatch(client.Apply.Type(), data) + + // Send the apply-patch with force=true. + if err == nil { + err = r.patch(ctx, object, apply, client.ForceOwnership) + } + + return err +} diff --git a/internal/controller/standalone_pgadmin/config.go b/internal/controller/standalone_pgadmin/config.go new file mode 100644 index 0000000000..ddd080985b --- /dev/null +++ b/internal/controller/standalone_pgadmin/config.go @@ -0,0 +1,19 @@ +// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package standalone_pgadmin + +// Include configs here used by multiple files +const ( + // ConfigMap keys used also in mounting volume to pod + settingsConfigMapKey = "pgadmin-settings.json" + settingsClusterMapKey = "pgadmin-shared-clusters.json" + gunicornConfigKey = "gunicorn-config.json" + + // Port address used to define pod and service + pgAdminPort = 5050 + + // Directory for pgAdmin in container + pgAdminDir = "/usr/local/lib/python3.11/site-packages/pgadmin4" +) diff --git a/internal/controller/standalone_pgadmin/configmap.go b/internal/controller/standalone_pgadmin/configmap.go new file mode 100644 index 0000000000..d1ec39bf13 --- /dev/null +++ b/internal/controller/standalone_pgadmin/configmap.go @@ -0,0 +1,209 @@ +// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package standalone_pgadmin + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "sort" + "strconv" + + corev1 "k8s.io/api/core/v1" + + "github.com/pkg/errors" + + "github.com/crunchydata/postgres-operator/internal/initialize" + "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +// +kubebuilder:rbac:groups="",resources="configmaps",verbs={get} +// +kubebuilder:rbac:groups="",resources="configmaps",verbs={create,delete,patch} + +// reconcilePGAdminConfigMap writes the ConfigMap for pgAdmin. +func (r *PGAdminReconciler) reconcilePGAdminConfigMap( + ctx context.Context, pgadmin *v1beta1.PGAdmin, + clusters map[string]*v1beta1.PostgresClusterList, +) (*corev1.ConfigMap, error) { + configmap, err := configmap(pgadmin, clusters) + if err == nil { + err = errors.WithStack(r.setControllerReference(pgadmin, configmap)) + } + if err == nil { + err = errors.WithStack(r.apply(ctx, configmap)) + } + + return configmap, err +} + +// configmap returns a v1.ConfigMap for pgAdmin. +func configmap(pgadmin *v1beta1.PGAdmin, + clusters map[string]*v1beta1.PostgresClusterList, +) (*corev1.ConfigMap, error) { + configmap := &corev1.ConfigMap{ObjectMeta: naming.StandalonePGAdmin(pgadmin)} + configmap.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("ConfigMap")) + + configmap.Annotations = pgadmin.Spec.Metadata.GetAnnotationsOrNil() + configmap.Labels = naming.Merge( + pgadmin.Spec.Metadata.GetLabelsOrNil(), + naming.StandalonePGAdminLabels(pgadmin.Name)) + + // TODO(tjmoore4): Populate configuration details. + initialize.Map(&configmap.Data) + configSettings, err := generateConfig(pgadmin) + if err == nil { + configmap.Data[settingsConfigMapKey] = configSettings + } + + clusterSettings, err := generateClusterConfig(clusters) + if err == nil { + configmap.Data[settingsClusterMapKey] = clusterSettings + } + + gunicornSettings, err := generateGunicornConfig(pgadmin) + if err == nil { + configmap.Data[gunicornConfigKey] = gunicornSettings + } + + return configmap, err +} + +// generateConfig generates the config settings for the pgAdmin +func generateConfig(pgadmin *v1beta1.PGAdmin) (string, error) { + settings := map[string]any{ + // Bind to all IPv4 addresses by default. "0.0.0.0" here represents INADDR_ANY. + // - https://flask.palletsprojects.com/en/2.2.x/api/#flask.Flask.run + // - https://flask.palletsprojects.com/en/2.3.x/api/#flask.Flask.run + "DEFAULT_SERVER": "0.0.0.0", + } + + // Copy any specified settings over the defaults. + for k, v := range pgadmin.Spec.Config.Settings { + settings[k] = v + } + + // Write mandatory settings over any specified ones. + // SERVER_MODE must always be enabled when running on a webserver. + // - https://github.com/pgadmin-org/pgadmin4/blob/REL-7_7/web/config.py#L110 + settings["SERVER_MODE"] = true + settings["UPGRADE_CHECK_ENABLED"] = false + settings["UPGRADE_CHECK_URL"] = "" + settings["UPGRADE_CHECK_KEY"] = "" + + // To avoid spurious reconciles, the following value must not change when + // the spec does not change. [json.Encoder] and [json.Marshal] do this by + // emitting map keys in sorted order. Indent so the value is not rendered + // as one long line by `kubectl`. + buffer := new(bytes.Buffer) + encoder := json.NewEncoder(buffer) + encoder.SetEscapeHTML(false) + encoder.SetIndent("", " ") + err := encoder.Encode(settings) + + return buffer.String(), err +} + +// generateClusterConfig generates the settings for the servers registered in pgAdmin. +// pgAdmin's `setup.py --load-server` function ingests this list of servers as JSON, +// in the following form: +// +// { +// "Servers": { +// "1": { +// "Name": "Minimally Defined Server", +// "Group": "Server Group 1", +// "Port": 5432, +// "Username": "postgres", +// "Host": "localhost", +// "SSLMode": "prefer", +// "MaintenanceDB": "postgres" +// }, +// "2": { ... } +// } +// } +func generateClusterConfig( + clusters map[string]*v1beta1.PostgresClusterList, +) (string, error) { + // To avoid spurious reconciles, the following value must not change when + // the spec does not change. [json.Encoder] and [json.Marshal] do this by + // emitting map keys in sorted order. Indent so the value is not rendered + // as one long line by `kubectl`. + buffer := new(bytes.Buffer) + encoder := json.NewEncoder(buffer) + encoder.SetEscapeHTML(false) + encoder.SetIndent("", " ") + + // To avoid spurious reconciles, we want to keep the `clusters` order consistent + // which we can do by + // a) sorting the ServerGroup name used as a key; and + // b) sorting the clusters by name; + keys := []string{} + for key := range clusters { + keys = append(keys, key) + } + sort.Strings(keys) + + clusterServers := map[int]any{} + for _, serverGroupName := range keys { + sort.Slice(clusters[serverGroupName].Items, + func(i, j int) bool { + return clusters[serverGroupName].Items[i].Name < clusters[serverGroupName].Items[j].Name + }) + for _, cluster := range clusters[serverGroupName].Items { + object := map[string]any{ + "Name": cluster.Name, + "Group": serverGroupName, + "Host": fmt.Sprintf("%s-primary.%s.svc", cluster.Name, cluster.Namespace), + "Port": 5432, + "MaintenanceDB": "postgres", + "Username": cluster.Name, + // `SSLMode` and some other settings may need to be set by the user in the future + "SSLMode": "prefer", + "Shared": true, + } + clusterServers[len(clusterServers)+1] = object + } + } + servers := map[string]any{ + "Servers": clusterServers, + } + err := encoder.Encode(servers) + return buffer.String(), err +} + +// generateGunicornConfig generates the config settings for the gunicorn server +// - https://docs.gunicorn.org/en/latest/settings.html +func generateGunicornConfig(pgadmin *v1beta1.PGAdmin) (string, error) { + settings := map[string]any{ + // Bind to all IPv4 addresses and set 25 threads by default. + // - https://docs.gunicorn.org/en/latest/settings.html#bind + // - https://docs.gunicorn.org/en/latest/settings.html#threads + "bind": "0.0.0.0:" + strconv.Itoa(pgAdminPort), + "threads": 25, + } + + // Copy any specified settings over the defaults. + for k, v := range pgadmin.Spec.Config.Gunicorn { + settings[k] = v + } + + // Write mandatory settings over any specified ones. + // - https://docs.gunicorn.org/en/latest/settings.html#workers + settings["workers"] = 1 + + // To avoid spurious reconciles, the following value must not change when + // the spec does not change. [json.Encoder] and [json.Marshal] do this by + // emitting map keys in sorted order. Indent so the value is not rendered + // as one long line by `kubectl`. + buffer := new(bytes.Buffer) + encoder := json.NewEncoder(buffer) + encoder.SetEscapeHTML(false) + encoder.SetIndent("", " ") + err := encoder.Encode(settings) + + return buffer.String(), err +} diff --git a/internal/controller/standalone_pgadmin/configmap_test.go b/internal/controller/standalone_pgadmin/configmap_test.go new file mode 100644 index 0000000000..5a844e520c --- /dev/null +++ b/internal/controller/standalone_pgadmin/configmap_test.go @@ -0,0 +1,293 @@ +// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package standalone_pgadmin + +import ( + "testing" + + "gotest.tools/v3/assert" + + "github.com/crunchydata/postgres-operator/internal/testing/cmp" + "github.com/crunchydata/postgres-operator/internal/testing/require" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +func TestGenerateConfig(t *testing.T) { + require.ParallelCapacity(t, 0) + + t.Run("Default", func(t *testing.T) { + pgadmin := new(v1beta1.PGAdmin) + result, err := generateConfig(pgadmin) + + assert.NilError(t, err) + assert.Equal(t, result, `{ + "DEFAULT_SERVER": "0.0.0.0", + "SERVER_MODE": true, + "UPGRADE_CHECK_ENABLED": false, + "UPGRADE_CHECK_KEY": "", + "UPGRADE_CHECK_URL": "" +}`+"\n") + }) + + t.Run("Mandatory", func(t *testing.T) { + pgadmin := new(v1beta1.PGAdmin) + pgadmin.Spec.Config.Settings = map[string]any{ + "SERVER_MODE": false, + "UPGRADE_CHECK_ENABLED": true, + } + result, err := generateConfig(pgadmin) + + assert.NilError(t, err) + assert.Equal(t, result, `{ + "DEFAULT_SERVER": "0.0.0.0", + "SERVER_MODE": true, + "UPGRADE_CHECK_ENABLED": false, + "UPGRADE_CHECK_KEY": "", + "UPGRADE_CHECK_URL": "" +}`+"\n") + }) + + t.Run("Specified", func(t *testing.T) { + pgadmin := new(v1beta1.PGAdmin) + pgadmin.Spec.Config.Settings = map[string]any{ + "ALLOWED_HOSTS": []any{"225.0.0.0/8", "226.0.0.0/7", "228.0.0.0/6"}, + "DEFAULT_SERVER": "::", + } + result, err := generateConfig(pgadmin) + + assert.NilError(t, err) + assert.Equal(t, result, `{ + "ALLOWED_HOSTS": [ + "225.0.0.0/8", + "226.0.0.0/7", + "228.0.0.0/6" + ], + "DEFAULT_SERVER": "::", + "SERVER_MODE": true, + "UPGRADE_CHECK_ENABLED": false, + "UPGRADE_CHECK_KEY": "", + "UPGRADE_CHECK_URL": "" +}`+"\n") + }) +} + +func TestGenerateClusterConfig(t *testing.T) { + require.ParallelCapacity(t, 0) + + cluster := testCluster() + cluster.Namespace = "postgres-operator" + clusterList := &v1beta1.PostgresClusterList{ + Items: []v1beta1.PostgresCluster{*cluster, *cluster}, + } + clusters := map[string]*v1beta1.PostgresClusterList{ + "shared": clusterList, + "test": clusterList, + "hello": clusterList, + } + + expectedString := `{ + "Servers": { + "1": { + "Group": "hello", + "Host": "hippo-primary.postgres-operator.svc", + "MaintenanceDB": "postgres", + "Name": "hippo", + "Port": 5432, + "SSLMode": "prefer", + "Shared": true, + "Username": "hippo" + }, + "2": { + "Group": "hello", + "Host": "hippo-primary.postgres-operator.svc", + "MaintenanceDB": "postgres", + "Name": "hippo", + "Port": 5432, + "SSLMode": "prefer", + "Shared": true, + "Username": "hippo" + }, + "3": { + "Group": "shared", + "Host": "hippo-primary.postgres-operator.svc", + "MaintenanceDB": "postgres", + "Name": "hippo", + "Port": 5432, + "SSLMode": "prefer", + "Shared": true, + "Username": "hippo" + }, + "4": { + "Group": "shared", + "Host": "hippo-primary.postgres-operator.svc", + "MaintenanceDB": "postgres", + "Name": "hippo", + "Port": 5432, + "SSLMode": "prefer", + "Shared": true, + "Username": "hippo" + }, + "5": { + "Group": "test", + "Host": "hippo-primary.postgres-operator.svc", + "MaintenanceDB": "postgres", + "Name": "hippo", + "Port": 5432, + "SSLMode": "prefer", + "Shared": true, + "Username": "hippo" + }, + "6": { + "Group": "test", + "Host": "hippo-primary.postgres-operator.svc", + "MaintenanceDB": "postgres", + "Name": "hippo", + "Port": 5432, + "SSLMode": "prefer", + "Shared": true, + "Username": "hippo" + } + } +} +` + actualString, err := generateClusterConfig(clusters) + assert.NilError(t, err) + assert.Equal(t, actualString, expectedString) +} + +func TestGeneratePGAdminConfigMap(t *testing.T) { + require.ParallelCapacity(t, 0) + + pgadmin := new(v1beta1.PGAdmin) + pgadmin.Namespace = "some-ns" + pgadmin.Name = "pg1" + clusters := map[string]*v1beta1.PostgresClusterList{} + t.Run("Data,ObjectMeta,TypeMeta", func(t *testing.T) { + pgadmin := pgadmin.DeepCopy() + + configmap, err := configmap(pgadmin, clusters) + + assert.NilError(t, err) + assert.Assert(t, cmp.MarshalMatches(configmap.TypeMeta, ` +apiVersion: v1 +kind: ConfigMap + `)) + assert.Assert(t, cmp.MarshalMatches(configmap.ObjectMeta, ` +creationTimestamp: null +labels: + postgres-operator.crunchydata.com/pgadmin: pg1 + postgres-operator.crunchydata.com/role: pgadmin +name: pgadmin- +namespace: some-ns + `)) + + assert.Assert(t, len(configmap.Data) > 0, "expected some configuration") + }) + + t.Run("Annotations,Labels", func(t *testing.T) { + pgadmin := pgadmin.DeepCopy() + pgadmin.Spec.Metadata = &v1beta1.Metadata{ + Annotations: map[string]string{"a": "v1", "b": "v2"}, + Labels: map[string]string{"c": "v3", "d": "v4"}, + } + + configmap, err := configmap(pgadmin, clusters) + + assert.NilError(t, err) + // Annotations present in the metadata. + assert.DeepEqual(t, configmap.ObjectMeta.Annotations, map[string]string{ + "a": "v1", "b": "v2", + }) + + // Labels present in the metadata. + assert.DeepEqual(t, configmap.ObjectMeta.Labels, map[string]string{ + "c": "v3", "d": "v4", + "postgres-operator.crunchydata.com/pgadmin": "pg1", + "postgres-operator.crunchydata.com/role": "pgadmin", + }) + }) +} + +func TestGenerateGunicornConfig(t *testing.T) { + require.ParallelCapacity(t, 0) + + t.Run("Default", func(t *testing.T) { + pgAdmin := &v1beta1.PGAdmin{} + pgAdmin.Name = "test" + pgAdmin.Namespace = "postgres-operator" + + expectedString := `{ + "bind": "0.0.0.0:5050", + "threads": 25, + "workers": 1 +} +` + actualString, err := generateGunicornConfig(pgAdmin) + assert.NilError(t, err) + assert.Equal(t, actualString, expectedString) + }) + + t.Run("Add Settings", func(t *testing.T) { + pgAdmin := &v1beta1.PGAdmin{} + pgAdmin.Name = "test" + pgAdmin.Namespace = "postgres-operator" + pgAdmin.Spec.Config.Gunicorn = map[string]any{ + "keyfile": "/path/to/keyfile", + "certfile": "/path/to/certfile", + } + + expectedString := `{ + "bind": "0.0.0.0:5050", + "certfile": "/path/to/certfile", + "keyfile": "/path/to/keyfile", + "threads": 25, + "workers": 1 +} +` + actualString, err := generateGunicornConfig(pgAdmin) + assert.NilError(t, err) + assert.Equal(t, actualString, expectedString) + }) + + t.Run("Update Defaults", func(t *testing.T) { + pgAdmin := &v1beta1.PGAdmin{} + pgAdmin.Name = "test" + pgAdmin.Namespace = "postgres-operator" + pgAdmin.Spec.Config.Gunicorn = map[string]any{ + "bind": "127.0.0.1:5051", + "threads": 30, + } + + expectedString := `{ + "bind": "127.0.0.1:5051", + "threads": 30, + "workers": 1 +} +` + actualString, err := generateGunicornConfig(pgAdmin) + assert.NilError(t, err) + assert.Equal(t, actualString, expectedString) + }) + + t.Run("Update Mandatory", func(t *testing.T) { + pgAdmin := &v1beta1.PGAdmin{} + pgAdmin.Name = "test" + pgAdmin.Namespace = "postgres-operator" + pgAdmin.Spec.Config.Gunicorn = map[string]any{ + "workers": "100", + } + + expectedString := `{ + "bind": "0.0.0.0:5050", + "threads": 25, + "workers": 1 +} +` + actualString, err := generateGunicornConfig(pgAdmin) + assert.NilError(t, err) + assert.Equal(t, actualString, expectedString) + }) + +} diff --git a/internal/controller/standalone_pgadmin/controller.go b/internal/controller/standalone_pgadmin/controller.go new file mode 100644 index 0000000000..81d5fc2d40 --- /dev/null +++ b/internal/controller/standalone_pgadmin/controller.go @@ -0,0 +1,178 @@ +// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package standalone_pgadmin + +import ( + "context" + "io" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + controllerruntime "github.com/crunchydata/postgres-operator/internal/controller/runtime" + "github.com/crunchydata/postgres-operator/internal/logging" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +// PGAdminReconciler reconciles a PGAdmin object +type PGAdminReconciler struct { + client.Client + Owner client.FieldOwner + PodExec func( + ctx context.Context, namespace, pod, container string, + stdin io.Reader, stdout, stderr io.Writer, command ...string, + ) error + Recorder record.EventRecorder + IsOpenShift bool +} + +//+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="pgadmins",verbs={list,watch} +//+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="postgresclusters",verbs={list,watch} +//+kubebuilder:rbac:groups="",resources="persistentvolumeclaims",verbs={list,watch} +//+kubebuilder:rbac:groups="",resources="secrets",verbs={list,watch} +//+kubebuilder:rbac:groups="",resources="configmaps",verbs={list,watch} +//+kubebuilder:rbac:groups="apps",resources="statefulsets",verbs={list,watch} + +// SetupWithManager sets up the controller with the Manager. +// +// TODO(tjmoore4): This function is duplicated from a version that takes a PostgresCluster object. +func (r *PGAdminReconciler) SetupWithManager(mgr ctrl.Manager) error { + if r.PodExec == nil { + var err error + r.PodExec, err = controllerruntime.NewPodExecutor(mgr.GetConfig()) + if err != nil { + return err + } + } + + return ctrl.NewControllerManagedBy(mgr). + For(&v1beta1.PGAdmin{}). + Owns(&corev1.ConfigMap{}). + Owns(&corev1.PersistentVolumeClaim{}). + Owns(&corev1.Secret{}). + Owns(&appsv1.StatefulSet{}). + Owns(&corev1.Service{}). + Watches( + v1beta1.NewPostgresCluster(), + r.watchPostgresClusters(), + ). + Watches( + &corev1.Secret{}, + r.watchForRelatedSecret(), + ). + Complete(r) +} + +//+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="pgadmins",verbs={get} +//+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="pgadmins/status",verbs={patch} + +// Reconcile which aims to move the current state of the pgAdmin closer to the +// desired state described in a [v1beta1.PGAdmin] identified by request. +func (r *PGAdminReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + + var err error + log := logging.FromContext(ctx) + + pgAdmin := &v1beta1.PGAdmin{} + if err := r.Get(ctx, req.NamespacedName, pgAdmin); err != nil { + // NotFound cannot be fixed by requeuing so ignore it. During background + // deletion, we receive delete events from pgadmin's dependents after + // pgadmin is deleted. + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + // Write any changes to the pgadmin status on the way out. + before := pgAdmin.DeepCopy() + defer func() { + if !equality.Semantic.DeepEqual(before.Status, pgAdmin.Status) { + statusErr := r.Status().Patch(ctx, pgAdmin, client.MergeFrom(before), r.Owner) + if statusErr != nil { + log.Error(statusErr, "Patching PGAdmin status") + } + if err == nil { + err = statusErr + } + } + }() + + log.V(1).Info("Reconciling pgAdmin") + + // Set defaults if unset + pgAdmin.Default() + + var ( + configmap *corev1.ConfigMap + dataVolume *corev1.PersistentVolumeClaim + clusters map[string]*v1beta1.PostgresClusterList + _ *corev1.Service + ) + + if err == nil { + clusters, err = r.getClustersForPGAdmin(ctx, pgAdmin) + } + if err == nil { + configmap, err = r.reconcilePGAdminConfigMap(ctx, pgAdmin, clusters) + } + if err == nil { + dataVolume, err = r.reconcilePGAdminDataVolume(ctx, pgAdmin) + } + if err == nil { + err = r.reconcilePGAdminService(ctx, pgAdmin) + } + if err == nil { + err = r.reconcilePGAdminStatefulSet(ctx, pgAdmin, configmap, dataVolume) + } + if err == nil { + err = r.reconcilePGAdminUsers(ctx, pgAdmin) + } + + if err == nil { + // at this point everything reconciled successfully, and we can update the + // observedGeneration + pgAdmin.Status.ObservedGeneration = pgAdmin.GetGeneration() + log.V(1).Info("Reconciled pgAdmin") + } + + return ctrl.Result{}, err +} + +// The owner reference created by controllerutil.SetControllerReference blocks +// deletion. The OwnerReferencesPermissionEnforcement plugin requires that the +// creator of such a reference have either "delete" permission on the owner or +// "update" permission on the owner's "finalizers" subresource. +// - https://docs.k8s.io/reference/access-authn-authz/admission-controllers/ +// +kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="pgadmins/finalizers",verbs={update} + +// setControllerReference sets owner as a Controller OwnerReference on controlled. +// Only one OwnerReference can be a controller, so it returns an error if another +// is already set. +// +// TODO(tjmoore4): This function is duplicated from a version that takes a PostgresCluster object. +func (r *PGAdminReconciler) setControllerReference( + owner *v1beta1.PGAdmin, controlled client.Object, +) error { + return controllerutil.SetControllerReference(owner, controlled, r.Client.Scheme()) +} + +// deleteControlled safely deletes object when it is controlled by pgAdmin. +func (r *PGAdminReconciler) deleteControlled( + ctx context.Context, pgadmin *v1beta1.PGAdmin, object client.Object, +) error { + if metav1.IsControlledBy(object, pgadmin) { + uid := object.GetUID() + version := object.GetResourceVersion() + exactly := client.Preconditions{UID: &uid, ResourceVersion: &version} + + return r.Client.Delete(ctx, object, exactly) + } + + return nil +} diff --git a/internal/controller/standalone_pgadmin/controller_test.go b/internal/controller/standalone_pgadmin/controller_test.go new file mode 100644 index 0000000000..b0fe17cbe6 --- /dev/null +++ b/internal/controller/standalone_pgadmin/controller_test.go @@ -0,0 +1,75 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package standalone_pgadmin + +import ( + "context" + "strings" + "testing" + + "gotest.tools/v3/assert" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/crunchydata/postgres-operator/internal/testing/require" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +func TestDeleteControlled(t *testing.T) { + ctx := context.Background() + cc := setupKubernetes(t) + require.ParallelCapacity(t, 1) + + ns := setupNamespace(t, cc) + reconciler := PGAdminReconciler{Client: cc} + + pgadmin := new(v1beta1.PGAdmin) + pgadmin.Namespace = ns.Name + pgadmin.Name = strings.ToLower(t.Name()) + assert.NilError(t, cc.Create(ctx, pgadmin)) + + t.Run("NoOwnership", func(t *testing.T) { + secret := &corev1.Secret{} + secret.Namespace = ns.Name + secret.Name = "solo" + + assert.NilError(t, cc.Create(ctx, secret)) + + // No-op when there's no ownership + assert.NilError(t, reconciler.deleteControlled(ctx, pgadmin, secret)) + assert.NilError(t, cc.Get(ctx, client.ObjectKeyFromObject(secret), secret)) + }) + + // We aren't currently using setOwnerReference in the pgAdmin controller + // If that changes we can uncomment this code + // t.Run("Owned", func(t *testing.T) { + // secret := &corev1.Secret{} + // secret.Namespace = ns.Name + // secret.Name = "owned" + + // assert.NilError(t, reconciler.setOwnerReference(pgadmin, secret)) + // assert.NilError(t, cc.Create(ctx, secret)) + + // // No-op when not controlled by cluster. + // assert.NilError(t, reconciler.deleteControlled(ctx, pgadmin, secret)) + // assert.NilError(t, cc.Get(ctx, client.ObjectKeyFromObject(secret), secret)) + // }) + + t.Run("Controlled", func(t *testing.T) { + secret := &corev1.Secret{} + secret.Namespace = ns.Name + secret.Name = "controlled" + + assert.NilError(t, reconciler.setControllerReference(pgadmin, secret)) + assert.NilError(t, cc.Create(ctx, secret)) + + // Deletes when controlled by cluster. + assert.NilError(t, reconciler.deleteControlled(ctx, pgadmin, secret)) + + err := cc.Get(ctx, client.ObjectKeyFromObject(secret), secret) + assert.Assert(t, apierrors.IsNotFound(err), "expected NotFound, got %#v", err) + }) +} diff --git a/internal/controller/standalone_pgadmin/helpers_test.go b/internal/controller/standalone_pgadmin/helpers_test.go new file mode 100644 index 0000000000..9096edb5a1 --- /dev/null +++ b/internal/controller/standalone_pgadmin/helpers_test.go @@ -0,0 +1,76 @@ +// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package standalone_pgadmin + +import ( + "context" + "os" + "strconv" + "testing" + "time" + + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/yaml" + + "github.com/crunchydata/postgres-operator/internal/testing/require" +) + +// Scale extends d according to PGO_TEST_TIMEOUT_SCALE. +// +// TODO(tjmoore4): This function is duplicated from a version that takes a PostgresCluster object. +var Scale = func(d time.Duration) time.Duration { return d } + +func init() { + setting := os.Getenv("PGO_TEST_TIMEOUT_SCALE") + factor, _ := strconv.ParseFloat(setting, 64) + + if setting != "" { + if factor <= 0 { + panic("PGO_TEST_TIMEOUT_SCALE must be a fractional number greater than zero") + } + + Scale = func(d time.Duration) time.Duration { + return time.Duration(factor * float64(d)) + } + } +} + +// setupKubernetes starts or connects to a Kubernetes API and returns a client +// that uses it. See [require.Kubernetes] for more details. +func setupKubernetes(t testing.TB) client.Client { + t.Helper() + + // Start and/or connect to a Kubernetes API, or Skip when that's not configured. + cc := require.Kubernetes(t) + + // Log the status of any test namespaces after this test fails. + t.Cleanup(func() { + if t.Failed() { + var namespaces corev1.NamespaceList + _ = cc.List(context.Background(), &namespaces, client.HasLabels{"postgres-operator-test"}) + + type shaped map[string]corev1.NamespaceStatus + result := make([]shaped, len(namespaces.Items)) + + for i, ns := range namespaces.Items { + result[i] = shaped{ns.Labels["postgres-operator-test"]: ns.Status} + } + + formatted, _ := yaml.Marshal(result) + t.Logf("Test Namespaces:\n%s", formatted) + } + }) + + return cc +} + +// setupNamespace creates a random namespace that will be deleted by t.Cleanup. +// +// Deprecated: Use [require.Namespace] instead. +func setupNamespace(t testing.TB, cc client.Client) *corev1.Namespace { + t.Helper() + return require.Namespace(t, cc) +} diff --git a/internal/controller/standalone_pgadmin/helpers_unit_test.go b/internal/controller/standalone_pgadmin/helpers_unit_test.go new file mode 100644 index 0000000000..63887385fc --- /dev/null +++ b/internal/controller/standalone_pgadmin/helpers_unit_test.go @@ -0,0 +1,76 @@ +// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package standalone_pgadmin + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/crunchydata/postgres-operator/internal/initialize" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +// TODO(benjaminjb): This file is duplicated test help functions +// that could probably be put into a separate test_helper package + +var ( + //TODO(tjmoore4): With the new RELATED_IMAGES defaulting behavior, tests could be refactored + // to reference those environment variables instead of hard coded image values + CrunchyPostgresHAImage = "registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-13.6-1" + CrunchyPGBackRestImage = "registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi8-2.38-0" + CrunchyPGBouncerImage = "registry.developers.crunchydata.com/crunchydata/crunchy-pgbouncer:ubi8-1.16-2" +) + +func testCluster() *v1beta1.PostgresCluster { + // Defines a base cluster spec that can be used by tests to generate a + // cluster with an expected number of instances + cluster := v1beta1.PostgresCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "hippo", + }, + Spec: v1beta1.PostgresClusterSpec{ + PostgresVersion: 13, + Image: CrunchyPostgresHAImage, + ImagePullSecrets: []corev1.LocalObjectReference{{ + Name: "myImagePullSecret"}, + }, + InstanceSets: []v1beta1.PostgresInstanceSetSpec{{ + Name: "instance1", + Replicas: initialize.Int32(1), + DataVolumeClaimSpec: testVolumeClaimSpec(), + }}, + Backups: v1beta1.Backups{ + PGBackRest: v1beta1.PGBackRestArchive{ + Image: CrunchyPGBackRestImage, + Repos: []v1beta1.PGBackRestRepo{{ + Name: "repo1", + Volume: &v1beta1.RepoPVC{ + VolumeClaimSpec: testVolumeClaimSpec(), + }, + }}, + }, + }, + Proxy: &v1beta1.PostgresProxySpec{ + PGBouncer: &v1beta1.PGBouncerPodSpec{ + Image: CrunchyPGBouncerImage, + }, + }, + }, + } + return cluster.DeepCopy() +} + +func testVolumeClaimSpec() corev1.PersistentVolumeClaimSpec { + // Defines a volume claim spec that can be used to create instances + return corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + Resources: corev1.VolumeResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceStorage: resource.MustParse("1Gi"), + }, + }, + } +} diff --git a/internal/controller/standalone_pgadmin/pod.go b/internal/controller/standalone_pgadmin/pod.go new file mode 100644 index 0000000000..bbb39b9322 --- /dev/null +++ b/internal/controller/standalone_pgadmin/pod.go @@ -0,0 +1,462 @@ +// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package standalone_pgadmin + +import ( + "fmt" + "strings" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/util/intstr" + + "github.com/crunchydata/postgres-operator/internal/config" + "github.com/crunchydata/postgres-operator/internal/initialize" + "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +const ( + configMountPath = "/etc/pgadmin/conf.d" + configFilePath = "~postgres-operator/" + settingsConfigMapKey + clusterFilePath = "~postgres-operator/" + settingsClusterMapKey + configDatabaseURIPath = "~postgres-operator/config-database-uri" + ldapFilePath = "~postgres-operator/ldap-bind-password" + gunicornConfigFilePath = "~postgres-operator/" + gunicornConfigKey + + // Nothing should be mounted to this location except the script our initContainer writes + scriptMountPath = "/etc/pgadmin" +) + +// pod populates a PodSpec with the container and volumes needed to run pgAdmin. +func pod( + inPGAdmin *v1beta1.PGAdmin, + inConfigMap *corev1.ConfigMap, + outPod *corev1.PodSpec, + pgAdminVolume *corev1.PersistentVolumeClaim, +) { + const ( + // config and data volume names + configVolumeName = "pgadmin-config" + dataVolumeName = "pgadmin-data" + logVolumeName = "pgadmin-log" + scriptVolumeName = "pgadmin-config-system" + tempVolumeName = "tmp" + ) + + // create the projected volume of config maps for use in + // 1. dynamic server discovery + // 2. adding the config variables during pgAdmin startup + configVolume := corev1.Volume{Name: configVolumeName} + configVolume.VolumeSource = corev1.VolumeSource{ + Projected: &corev1.ProjectedVolumeSource{ + Sources: podConfigFiles(inConfigMap, *inPGAdmin), + }, + } + + // create the data volume for the persistent database + dataVolume := corev1.Volume{Name: dataVolumeName} + dataVolume.VolumeSource = corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: pgAdminVolume.Name, + ReadOnly: false, + }, + } + + // create the temp volume for logs + logVolume := corev1.Volume{Name: logVolumeName} + logVolume.VolumeSource = corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{ + Medium: corev1.StorageMediumMemory, + }, + } + + // Volume used to write a custom config_system.py file in the initContainer + // which then loads the configs found in the `configVolume` + scriptVolume := corev1.Volume{Name: scriptVolumeName} + scriptVolume.VolumeSource = corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{ + Medium: corev1.StorageMediumMemory, + + // When this volume is too small, the Pod will be evicted and recreated + // by the StatefulSet controller. + // - https://kubernetes.io/docs/concepts/storage/volumes/#emptydir + // NOTE: tmpfs blocks are PAGE_SIZE, usually 4KiB, and size rounds up. + SizeLimit: resource.NewQuantity(32<<10, resource.BinarySI), + }, + } + + // create a temp volume for restart pid/other/debugging use + // TODO: discuss tmp vol vs. persistent vol + tmpVolume := corev1.Volume{Name: tempVolumeName} + tmpVolume.VolumeSource = corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{ + Medium: corev1.StorageMediumMemory, + }, + } + + // pgadmin container + container := corev1.Container{ + Name: naming.ContainerPGAdmin, + Command: startupScript(inPGAdmin), + Image: config.StandalonePGAdminContainerImage(inPGAdmin), + ImagePullPolicy: inPGAdmin.Spec.ImagePullPolicy, + Resources: inPGAdmin.Spec.Resources, + SecurityContext: initialize.RestrictedSecurityContext(), + Ports: []corev1.ContainerPort{{ + Name: naming.PortPGAdmin, + ContainerPort: int32(pgAdminPort), + Protocol: corev1.ProtocolTCP, + }}, + Env: []corev1.EnvVar{ + { + Name: "PGADMIN_SETUP_EMAIL", + Value: fmt.Sprintf("admin@%s.%s.svc", inPGAdmin.Name, inPGAdmin.Namespace), + }, + // Setting the KRB5_CONFIG for kerberos + // - https://web.mit.edu/kerberos/krb5-current/doc/admin/conf_files/krb5_conf.html + { + Name: "KRB5_CONFIG", + Value: configMountPath + "/krb5.conf", + }, + // In testing it was determined that we need to set this env var for the replay cache + // otherwise it defaults to the read-only location `/var/tmp/` + // - https://web.mit.edu/kerberos/krb5-current/doc/basic/rcache_def.html#replay-cache-types + { + Name: "KRB5RCACHEDIR", + Value: "/tmp", + }, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: configVolumeName, + MountPath: configMountPath, + ReadOnly: true, + }, + { + Name: dataVolumeName, + MountPath: "/var/lib/pgadmin", + }, + { + Name: logVolumeName, + MountPath: "/var/log/pgadmin", + }, + { + Name: scriptVolumeName, + MountPath: scriptMountPath, + ReadOnly: true, + }, + { + Name: tempVolumeName, + MountPath: "/tmp", + }, + }, + } + + // Creating a readiness probe that will check that the pgAdmin `/login` + // endpoint is reachable at the specified port + readinessProbe := &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Port: intstr.FromInt32(pgAdminPort), + Path: "/login", + Scheme: corev1.URISchemeHTTP, + }, + }, + } + gunicornData := inConfigMap.Data[gunicornConfigKey] + // Check the configmap to see if we think TLS is enabled + // If so, update the readiness check scheme to HTTPS + if strings.Contains(gunicornData, "certfile") && strings.Contains(gunicornData, "keyfile") { + readinessProbe.ProbeHandler.HTTPGet.Scheme = corev1.URISchemeHTTPS + } + container.ReadinessProbe = readinessProbe + + startup := corev1.Container{ + Name: naming.ContainerPGAdminStartup, + Command: startupCommand(), + Image: container.Image, + ImagePullPolicy: container.ImagePullPolicy, + Resources: container.Resources, + SecurityContext: initialize.RestrictedSecurityContext(), + VolumeMounts: []corev1.VolumeMount{ + // Volume to write a custom `config_system.py` file to. + { + Name: scriptVolumeName, + MountPath: scriptMountPath, + ReadOnly: false, + }, + }, + } + + // add volumes and containers + outPod.Volumes = []corev1.Volume{ + configVolume, + dataVolume, + logVolume, + scriptVolume, + tmpVolume, + } + outPod.Containers = []corev1.Container{container} + outPod.InitContainers = []corev1.Container{startup} +} + +// podConfigFiles returns projections of pgAdmin's configuration files to +// include in the configuration volume. +func podConfigFiles(configmap *corev1.ConfigMap, pgadmin v1beta1.PGAdmin) []corev1.VolumeProjection { + + config := append(append([]corev1.VolumeProjection{}, pgadmin.Spec.Config.Files...), + []corev1.VolumeProjection{ + { + ConfigMap: &corev1.ConfigMapProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: configmap.Name, + }, + Items: []corev1.KeyToPath{ + { + Key: settingsConfigMapKey, + Path: configFilePath, + }, + { + Key: settingsClusterMapKey, + Path: clusterFilePath, + }, + { + Key: gunicornConfigKey, + Path: gunicornConfigFilePath, + }, + }, + }, + }, + }...) + + if pgadmin.Spec.Config.ConfigDatabaseURI != nil { + config = append(config, corev1.VolumeProjection{ + Secret: &corev1.SecretProjection{ + LocalObjectReference: pgadmin.Spec.Config.ConfigDatabaseURI.LocalObjectReference, + Optional: pgadmin.Spec.Config.ConfigDatabaseURI.Optional, + Items: []corev1.KeyToPath{ + { + Key: pgadmin.Spec.Config.ConfigDatabaseURI.Key, + Path: configDatabaseURIPath, + }, + }, + }, + }) + } + + // To enable LDAP authentication for pgAdmin, various LDAP settings must be configured. + // While most of the required configuration can be set using the 'settings' + // feature on the spec (.Spec.UserInterface.PGAdmin.Config.Settings), those + // values are stored in a ConfigMap in plaintext. + // As a special case, here we mount a provided Secret containing the LDAP_BIND_PASSWORD + // for use with the other pgAdmin LDAP configuration. + // - https://www.pgadmin.org/docs/pgadmin4/latest/config_py.html + // - https://www.pgadmin.org/docs/pgadmin4/development/enabling_ldap_authentication.html + if pgadmin.Spec.Config.LDAPBindPassword != nil { + config = append(config, corev1.VolumeProjection{ + Secret: &corev1.SecretProjection{ + LocalObjectReference: pgadmin.Spec.Config.LDAPBindPassword.LocalObjectReference, + Optional: pgadmin.Spec.Config.LDAPBindPassword.Optional, + Items: []corev1.KeyToPath{ + { + Key: pgadmin.Spec.Config.LDAPBindPassword.Key, + Path: ldapFilePath, + }, + }, + }, + }) + } + + return config +} + +func startupScript(pgadmin *v1beta1.PGAdmin) []string { + // loadServerCommandV7 is a python command leveraging the pgadmin v7 setup.py script + // with the `--load-servers` flag to replace the servers registered to the admin user + // with the contents of the `settingsClusterMapKey` file + var loadServerCommandV7 = fmt.Sprintf(`python3 ${PGADMIN_DIR}/setup.py --load-servers %s/%s --user %s --replace`, + configMountPath, + clusterFilePath, + fmt.Sprintf("admin@%s.%s.svc", pgadmin.Name, pgadmin.Namespace)) + + // loadServerCommandV8 is a python command leveraging the pgadmin v8 setup.py script + // with the `load-servers` sub-command to replace the servers registered to the admin user + // with the contents of the `settingsClusterMapKey` file + var loadServerCommandV8 = fmt.Sprintf(`python3 ${PGADMIN_DIR}/setup.py load-servers %s/%s --user %s --replace`, + configMountPath, + clusterFilePath, + fmt.Sprintf("admin@%s.%s.svc", pgadmin.Name, pgadmin.Namespace)) + + // setupCommands (v8 requires the 'setup-db' sub-command) + var setupCommandV7 = "python3 ${PGADMIN_DIR}/setup.py" + var setupCommandV8 = setupCommandV7 + " setup-db" + + // startCommands (v8 image includes Gunicorn) + var startCommandV7 = "pgadmin4 &" + var startCommandV8 = "gunicorn -c /etc/pgadmin/gunicorn_config.py --chdir $PGADMIN_DIR pgAdmin4:app &" + + // This script sets up, starts pgadmin, and runs the appropriate `loadServerCommand` to register the discovered servers. + // pgAdmin is hosted by Gunicorn and uses a config file. + // - https://www.pgadmin.org/docs/pgadmin4/development/server_deployment.html#standalone-gunicorn-configuration + // - https://docs.gunicorn.org/en/latest/configure.html + var startScript = fmt.Sprintf(` +export PGADMIN_SETUP_PASSWORD="$(date +%%s | sha256sum | base64 | head -c 32)" +PGADMIN_DIR=%s +APP_RELEASE=$(cd $PGADMIN_DIR && python3 -c "import config; print(config.APP_RELEASE)") + +echo "Running pgAdmin4 Setup" +if [ $APP_RELEASE -eq 7 ]; then + %s +else + %s +fi + +echo "Starting pgAdmin4" +PGADMIN4_PIDFILE=/tmp/pgadmin4.pid +if [ $APP_RELEASE -eq 7 ]; then + %s +else + %s +fi +echo $! > $PGADMIN4_PIDFILE + +loadServerCommand() { + if [ $APP_RELEASE -eq 7 ]; then + %s + else + %s + fi +} +loadServerCommand +`, pgAdminDir, setupCommandV7, setupCommandV8, startCommandV7, startCommandV8, loadServerCommandV7, loadServerCommandV8) + + // Use a Bash loop to periodically check: + // 1. the mtime of the mounted configuration volume for shared/discovered servers. + // When it changes, reload the shared server configuration. + // 2. that the pgadmin process is still running on the saved proc id. + // When it isn't, we consider pgadmin stopped. + // Restart pgadmin and continue watching. + + // Coreutils `sleep` uses a lot of memory, so the following opens a file + // descriptor and uses the timeout of the builtin `read` to wait. That same + // descriptor gets closed and reopened to use the builtin `[ -nt` to check mtimes. + // - https://unix.stackexchange.com/a/407383 + var reloadScript = ` +exec {fd}<> <(:||:) +while read -r -t 5 -u "${fd}" ||:; do + if [[ "${cluster_file}" -nt "/proc/self/fd/${fd}" ]] && loadServerCommand + then + exec {fd}>&- && exec {fd}<> <(:||:) + stat --format='Loaded shared servers dated %y' "${cluster_file}" + fi + if [[ ! -d /proc/$(cat $PGADMIN4_PIDFILE) ]] + then + if [[ $APP_RELEASE -eq 7 ]]; then + ` + startCommandV7 + ` + else + ` + startCommandV8 + ` + fi + echo $! > $PGADMIN4_PIDFILE + echo "Restarting pgAdmin4" + fi +done +` + + wrapper := `monitor() {` + startScript + reloadScript + `}; export cluster_file="$1"; export -f monitor; exec -a "$0" bash -ceu monitor` + + return []string{"bash", "-ceu", "--", wrapper, "pgadmin", fmt.Sprintf("%s/%s", configMountPath, clusterFilePath)} +} + +// startupCommand returns an entrypoint that prepares the filesystem for pgAdmin. +func startupCommand() []string { + // pgAdmin reads from the `/etc/pgadmin/config_system.py` file during startup + // after all other config files. + // - https://github.com/pgadmin-org/pgadmin4/blob/REL-7_7/docs/en_US/config_py.rst + // + // This command writes a script in `/etc/pgadmin/config_system.py` that reads from + // the `pgadmin-settings.json` file and the config-database-uri and/or + // `ldap-bind-password` files (if either exists) and sets those variables globally. + // That way those values are available as pgAdmin configurations when pgAdmin starts. + // + // Note: All pgAdmin settings are uppercase alphanumeric with underscores, so ignore + // any keys/names that are not. + // + // Note: set the pgAdmin LDAP_BIND_PASSWORD and CONFIG_DATABASE_URI settings from the + // Secrets last in order to overwrite the respective configurations set via ConfigMap JSON. + + const ( + // ldapFilePath is the path for mounting the LDAP Bind Password + ldapPasswordAbsolutePath = configMountPath + "/" + ldapFilePath + + // configDatabaseURIPath is the path for mounting the database URI connection string + configDatabaseURIPathAbsolutePath = configMountPath + "/" + configDatabaseURIPath + + configSystem = ` +import glob, json, re, os +DEFAULT_BINARY_PATHS = {'pg': sorted([''] + glob.glob('/usr/pgsql-*/bin')).pop()} +with open('` + configMountPath + `/` + configFilePath + `') as _f: + _conf, _data = re.compile(r'[A-Z_0-9]+'), json.load(_f) + if type(_data) is dict: + globals().update({k: v for k, v in _data.items() if _conf.fullmatch(k)}) +if os.path.isfile('` + ldapPasswordAbsolutePath + `'): + with open('` + ldapPasswordAbsolutePath + `') as _f: + LDAP_BIND_PASSWORD = _f.read() +if os.path.isfile('` + configDatabaseURIPathAbsolutePath + `'): + with open('` + configDatabaseURIPathAbsolutePath + `') as _f: + CONFIG_DATABASE_URI = _f.read() +` + // gunicorn reads from the `/etc/pgadmin/gunicorn_config.py` file during startup + // after all other config files. + // - https://docs.gunicorn.org/en/latest/configure.html#configuration-file + // + // This command writes a script in `/etc/pgadmin/gunicorn_config.py` that reads + // from the `gunicorn-config.json` file and sets those variables globally. + // That way those values are available as settings when gunicorn starts. + // + // Note: All gunicorn settings are lowercase with underscores, so ignore + // any keys/names that are not. + gunicornConfig = ` +import json, re +with open('` + configMountPath + `/` + gunicornConfigFilePath + `') as _f: + _conf, _data = re.compile(r'[a-z_]+'), json.load(_f) + if type(_data) is dict: + globals().update({k: v for k, v in _data.items() if _conf.fullmatch(k)}) +` + ) + + args := []string{strings.TrimLeft(configSystem, "\n"), strings.TrimLeft(gunicornConfig, "\n")} + + script := strings.Join([]string{ + // Use the initContainer to create this path to avoid the error noted here: + // - https://issue.k8s.io/121294 + `mkdir -p ` + configMountPath, + // Write the system and server configurations. + `echo "$1" > ` + scriptMountPath + `/config_system.py`, + `echo "$2" > ` + scriptMountPath + `/gunicorn_config.py`, + }, "\n") + + return append([]string{"bash", "-ceu", "--", script, "startup"}, args...) +} + +// podSecurityContext returns a v1.PodSecurityContext for pgadmin that can write +// to PersistentVolumes. +func podSecurityContext(r *PGAdminReconciler) *corev1.PodSecurityContext { + podSecurityContext := initialize.PodSecurityContext() + + // TODO (dsessler7): Add ability to add supplemental groups + + // OpenShift assigns a filesystem group based on a SecurityContextConstraint. + // Otherwise, set a filesystem group so pgAdmin can write to files + // regardless of the UID or GID of a container. + // - https://cloud.redhat.com/blog/a-guide-to-openshift-and-uids + // - https://docs.k8s.io/tasks/configure-pod-container/security-context/ + // - https://docs.openshift.com/container-platform/4.14/authentication/managing-security-context-constraints.html + if !r.IsOpenShift { + podSecurityContext.FSGroup = initialize.Int64(2) + } + + return podSecurityContext +} diff --git a/internal/controller/standalone_pgadmin/pod_test.go b/internal/controller/standalone_pgadmin/pod_test.go new file mode 100644 index 0000000000..19cee52882 --- /dev/null +++ b/internal/controller/standalone_pgadmin/pod_test.go @@ -0,0 +1,447 @@ +// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package standalone_pgadmin + +import ( + "testing" + + "gotest.tools/v3/assert" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/crunchydata/postgres-operator/internal/initialize" + "github.com/crunchydata/postgres-operator/internal/testing/cmp" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +func TestPod(t *testing.T) { + t.Parallel() + + pgadmin := new(v1beta1.PGAdmin) + pgadmin.Name = "pgadmin" + pgadmin.Namespace = "postgres-operator" + config := new(corev1.ConfigMap) + testpod := new(corev1.PodSpec) + pvc := new(corev1.PersistentVolumeClaim) + + call := func() { pod(pgadmin, config, testpod, pvc) } + + t.Run("Defaults", func(t *testing.T) { + + call() + + assert.Assert(t, cmp.MarshalMatches(testpod, ` +containers: +- command: + - bash + - -ceu + - -- + - |- + monitor() { + export PGADMIN_SETUP_PASSWORD="$(date +%s | sha256sum | base64 | head -c 32)" + PGADMIN_DIR=/usr/local/lib/python3.11/site-packages/pgadmin4 + APP_RELEASE=$(cd $PGADMIN_DIR && python3 -c "import config; print(config.APP_RELEASE)") + + echo "Running pgAdmin4 Setup" + if [ $APP_RELEASE -eq 7 ]; then + python3 ${PGADMIN_DIR}/setup.py + else + python3 ${PGADMIN_DIR}/setup.py setup-db + fi + + echo "Starting pgAdmin4" + PGADMIN4_PIDFILE=/tmp/pgadmin4.pid + if [ $APP_RELEASE -eq 7 ]; then + pgadmin4 & + else + gunicorn -c /etc/pgadmin/gunicorn_config.py --chdir $PGADMIN_DIR pgAdmin4:app & + fi + echo $! > $PGADMIN4_PIDFILE + + loadServerCommand() { + if [ $APP_RELEASE -eq 7 ]; then + python3 ${PGADMIN_DIR}/setup.py --load-servers /etc/pgadmin/conf.d/~postgres-operator/pgadmin-shared-clusters.json --user admin@pgadmin.postgres-operator.svc --replace + else + python3 ${PGADMIN_DIR}/setup.py load-servers /etc/pgadmin/conf.d/~postgres-operator/pgadmin-shared-clusters.json --user admin@pgadmin.postgres-operator.svc --replace + fi + } + loadServerCommand + + exec {fd}<> <(:||:) + while read -r -t 5 -u "${fd}" ||:; do + if [[ "${cluster_file}" -nt "/proc/self/fd/${fd}" ]] && loadServerCommand + then + exec {fd}>&- && exec {fd}<> <(:||:) + stat --format='Loaded shared servers dated %y' "${cluster_file}" + fi + if [[ ! -d /proc/$(cat $PGADMIN4_PIDFILE) ]] + then + if [[ $APP_RELEASE -eq 7 ]]; then + pgadmin4 & + else + gunicorn -c /etc/pgadmin/gunicorn_config.py --chdir $PGADMIN_DIR pgAdmin4:app & + fi + echo $! > $PGADMIN4_PIDFILE + echo "Restarting pgAdmin4" + fi + done + }; export cluster_file="$1"; export -f monitor; exec -a "$0" bash -ceu monitor + - pgadmin + - /etc/pgadmin/conf.d/~postgres-operator/pgadmin-shared-clusters.json + env: + - name: PGADMIN_SETUP_EMAIL + value: admin@pgadmin.postgres-operator.svc + - name: KRB5_CONFIG + value: /etc/pgadmin/conf.d/krb5.conf + - name: KRB5RCACHEDIR + value: /tmp + name: pgadmin + ports: + - containerPort: 5050 + name: pgadmin + protocol: TCP + readinessProbe: + httpGet: + path: /login + port: 5050 + scheme: HTTP + resources: {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /etc/pgadmin/conf.d + name: pgadmin-config + readOnly: true + - mountPath: /var/lib/pgadmin + name: pgadmin-data + - mountPath: /var/log/pgadmin + name: pgadmin-log + - mountPath: /etc/pgadmin + name: pgadmin-config-system + readOnly: true + - mountPath: /tmp + name: tmp +initContainers: +- command: + - bash + - -ceu + - -- + - |- + mkdir -p /etc/pgadmin/conf.d + echo "$1" > /etc/pgadmin/config_system.py + echo "$2" > /etc/pgadmin/gunicorn_config.py + - startup + - | + import glob, json, re, os + DEFAULT_BINARY_PATHS = {'pg': sorted([''] + glob.glob('/usr/pgsql-*/bin')).pop()} + with open('/etc/pgadmin/conf.d/~postgres-operator/pgadmin-settings.json') as _f: + _conf, _data = re.compile(r'[A-Z_0-9]+'), json.load(_f) + if type(_data) is dict: + globals().update({k: v for k, v in _data.items() if _conf.fullmatch(k)}) + if os.path.isfile('/etc/pgadmin/conf.d/~postgres-operator/ldap-bind-password'): + with open('/etc/pgadmin/conf.d/~postgres-operator/ldap-bind-password') as _f: + LDAP_BIND_PASSWORD = _f.read() + if os.path.isfile('/etc/pgadmin/conf.d/~postgres-operator/config-database-uri'): + with open('/etc/pgadmin/conf.d/~postgres-operator/config-database-uri') as _f: + CONFIG_DATABASE_URI = _f.read() + - | + import json, re + with open('/etc/pgadmin/conf.d/~postgres-operator/gunicorn-config.json') as _f: + _conf, _data = re.compile(r'[a-z_]+'), json.load(_f) + if type(_data) is dict: + globals().update({k: v for k, v in _data.items() if _conf.fullmatch(k)}) + name: pgadmin-startup + resources: {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /etc/pgadmin + name: pgadmin-config-system +volumes: +- name: pgadmin-config + projected: + sources: + - configMap: + items: + - key: pgadmin-settings.json + path: ~postgres-operator/pgadmin-settings.json + - key: pgadmin-shared-clusters.json + path: ~postgres-operator/pgadmin-shared-clusters.json + - key: gunicorn-config.json + path: ~postgres-operator/gunicorn-config.json +- name: pgadmin-data + persistentVolumeClaim: + claimName: "" +- emptyDir: + medium: Memory + name: pgadmin-log +- emptyDir: + medium: Memory + sizeLimit: 32Ki + name: pgadmin-config-system +- emptyDir: + medium: Memory + name: tmp +`)) + + // No change when called again. + before := testpod.DeepCopy() + call() + assert.DeepEqual(t, before, testpod) + }) + + t.Run("Customizations", func(t *testing.T) { + pgadmin.Spec.ImagePullPolicy = corev1.PullAlways + pgadmin.Spec.Image = initialize.String("new-image") + pgadmin.Spec.Resources.Requests = corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + } + + call() + + assert.Assert(t, cmp.MarshalMatches(testpod, ` +containers: +- command: + - bash + - -ceu + - -- + - |- + monitor() { + export PGADMIN_SETUP_PASSWORD="$(date +%s | sha256sum | base64 | head -c 32)" + PGADMIN_DIR=/usr/local/lib/python3.11/site-packages/pgadmin4 + APP_RELEASE=$(cd $PGADMIN_DIR && python3 -c "import config; print(config.APP_RELEASE)") + + echo "Running pgAdmin4 Setup" + if [ $APP_RELEASE -eq 7 ]; then + python3 ${PGADMIN_DIR}/setup.py + else + python3 ${PGADMIN_DIR}/setup.py setup-db + fi + + echo "Starting pgAdmin4" + PGADMIN4_PIDFILE=/tmp/pgadmin4.pid + if [ $APP_RELEASE -eq 7 ]; then + pgadmin4 & + else + gunicorn -c /etc/pgadmin/gunicorn_config.py --chdir $PGADMIN_DIR pgAdmin4:app & + fi + echo $! > $PGADMIN4_PIDFILE + + loadServerCommand() { + if [ $APP_RELEASE -eq 7 ]; then + python3 ${PGADMIN_DIR}/setup.py --load-servers /etc/pgadmin/conf.d/~postgres-operator/pgadmin-shared-clusters.json --user admin@pgadmin.postgres-operator.svc --replace + else + python3 ${PGADMIN_DIR}/setup.py load-servers /etc/pgadmin/conf.d/~postgres-operator/pgadmin-shared-clusters.json --user admin@pgadmin.postgres-operator.svc --replace + fi + } + loadServerCommand + + exec {fd}<> <(:||:) + while read -r -t 5 -u "${fd}" ||:; do + if [[ "${cluster_file}" -nt "/proc/self/fd/${fd}" ]] && loadServerCommand + then + exec {fd}>&- && exec {fd}<> <(:||:) + stat --format='Loaded shared servers dated %y' "${cluster_file}" + fi + if [[ ! -d /proc/$(cat $PGADMIN4_PIDFILE) ]] + then + if [[ $APP_RELEASE -eq 7 ]]; then + pgadmin4 & + else + gunicorn -c /etc/pgadmin/gunicorn_config.py --chdir $PGADMIN_DIR pgAdmin4:app & + fi + echo $! > $PGADMIN4_PIDFILE + echo "Restarting pgAdmin4" + fi + done + }; export cluster_file="$1"; export -f monitor; exec -a "$0" bash -ceu monitor + - pgadmin + - /etc/pgadmin/conf.d/~postgres-operator/pgadmin-shared-clusters.json + env: + - name: PGADMIN_SETUP_EMAIL + value: admin@pgadmin.postgres-operator.svc + - name: KRB5_CONFIG + value: /etc/pgadmin/conf.d/krb5.conf + - name: KRB5RCACHEDIR + value: /tmp + image: new-image + imagePullPolicy: Always + name: pgadmin + ports: + - containerPort: 5050 + name: pgadmin + protocol: TCP + readinessProbe: + httpGet: + path: /login + port: 5050 + scheme: HTTP + resources: + requests: + cpu: 100m + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /etc/pgadmin/conf.d + name: pgadmin-config + readOnly: true + - mountPath: /var/lib/pgadmin + name: pgadmin-data + - mountPath: /var/log/pgadmin + name: pgadmin-log + - mountPath: /etc/pgadmin + name: pgadmin-config-system + readOnly: true + - mountPath: /tmp + name: tmp +initContainers: +- command: + - bash + - -ceu + - -- + - |- + mkdir -p /etc/pgadmin/conf.d + echo "$1" > /etc/pgadmin/config_system.py + echo "$2" > /etc/pgadmin/gunicorn_config.py + - startup + - | + import glob, json, re, os + DEFAULT_BINARY_PATHS = {'pg': sorted([''] + glob.glob('/usr/pgsql-*/bin')).pop()} + with open('/etc/pgadmin/conf.d/~postgres-operator/pgadmin-settings.json') as _f: + _conf, _data = re.compile(r'[A-Z_0-9]+'), json.load(_f) + if type(_data) is dict: + globals().update({k: v for k, v in _data.items() if _conf.fullmatch(k)}) + if os.path.isfile('/etc/pgadmin/conf.d/~postgres-operator/ldap-bind-password'): + with open('/etc/pgadmin/conf.d/~postgres-operator/ldap-bind-password') as _f: + LDAP_BIND_PASSWORD = _f.read() + if os.path.isfile('/etc/pgadmin/conf.d/~postgres-operator/config-database-uri'): + with open('/etc/pgadmin/conf.d/~postgres-operator/config-database-uri') as _f: + CONFIG_DATABASE_URI = _f.read() + - | + import json, re + with open('/etc/pgadmin/conf.d/~postgres-operator/gunicorn-config.json') as _f: + _conf, _data = re.compile(r'[a-z_]+'), json.load(_f) + if type(_data) is dict: + globals().update({k: v for k, v in _data.items() if _conf.fullmatch(k)}) + image: new-image + imagePullPolicy: Always + name: pgadmin-startup + resources: + requests: + cpu: 100m + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /etc/pgadmin + name: pgadmin-config-system +volumes: +- name: pgadmin-config + projected: + sources: + - configMap: + items: + - key: pgadmin-settings.json + path: ~postgres-operator/pgadmin-settings.json + - key: pgadmin-shared-clusters.json + path: ~postgres-operator/pgadmin-shared-clusters.json + - key: gunicorn-config.json + path: ~postgres-operator/gunicorn-config.json +- name: pgadmin-data + persistentVolumeClaim: + claimName: "" +- emptyDir: + medium: Memory + name: pgadmin-log +- emptyDir: + medium: Memory + sizeLimit: 32Ki + name: pgadmin-config-system +- emptyDir: + medium: Memory + name: tmp +`)) + }) +} + +func TestPodConfigFiles(t *testing.T) { + configmap := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "some-cm"}} + + pgadmin := v1beta1.PGAdmin{ + Spec: v1beta1.PGAdminSpec{ + Config: v1beta1.StandalonePGAdminConfiguration{Files: []corev1.VolumeProjection{{ + Secret: &corev1.SecretProjection{LocalObjectReference: corev1.LocalObjectReference{ + Name: "test-secret", + }}, + }, { + ConfigMap: &corev1.ConfigMapProjection{LocalObjectReference: corev1.LocalObjectReference{ + Name: "test-cm", + }}, + }}}, + }, + } + + projections := podConfigFiles(configmap, pgadmin) + assert.Assert(t, cmp.MarshalMatches(projections, ` +- secret: + name: test-secret +- configMap: + name: test-cm +- configMap: + items: + - key: pgadmin-settings.json + path: ~postgres-operator/pgadmin-settings.json + - key: pgadmin-shared-clusters.json + path: ~postgres-operator/pgadmin-shared-clusters.json + - key: gunicorn-config.json + path: ~postgres-operator/gunicorn-config.json + name: some-cm + `)) +} + +func TestPodSecurityContext(t *testing.T) { + pgAdminReconciler := &PGAdminReconciler{} + + assert.Assert(t, cmp.MarshalMatches(podSecurityContext(pgAdminReconciler), ` +fsGroup: 2 +fsGroupChangePolicy: OnRootMismatch + `)) + + pgAdminReconciler.IsOpenShift = true + assert.Assert(t, cmp.MarshalMatches(podSecurityContext(pgAdminReconciler), + `fsGroupChangePolicy: OnRootMismatch`)) +} diff --git a/internal/controller/standalone_pgadmin/postgrescluster.go b/internal/controller/standalone_pgadmin/postgrescluster.go new file mode 100644 index 0000000000..5327b8ae70 --- /dev/null +++ b/internal/controller/standalone_pgadmin/postgrescluster.go @@ -0,0 +1,91 @@ +// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package standalone_pgadmin + +import ( + "context" + + "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" + + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +//+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="pgadmins",verbs={list} + +// findPGAdminsForPostgresCluster returns PGAdmins that target a given cluster. +func (r *PGAdminReconciler) findPGAdminsForPostgresCluster( + ctx context.Context, cluster client.Object, +) []*v1beta1.PGAdmin { + var ( + matching []*v1beta1.PGAdmin + pgadmins v1beta1.PGAdminList + ) + + // NOTE: If this becomes slow due to a large number of pgadmins in a single + // namespace, we can configure the [ctrl.Manager] field indexer and pass a + // [fields.Selector] here. + // - https://book.kubebuilder.io/reference/watching-resources/externally-managed.html + if r.List(ctx, &pgadmins, &client.ListOptions{ + Namespace: cluster.GetNamespace(), + }) == nil { + for i := range pgadmins.Items { + for _, serverGroup := range pgadmins.Items[i].Spec.ServerGroups { + if serverGroup.PostgresClusterName == cluster.GetName() { + matching = append(matching, &pgadmins.Items[i]) + continue + } + if selector, err := naming.AsSelector(serverGroup.PostgresClusterSelector); err == nil { + if selector.Matches(labels.Set(cluster.GetLabels())) { + matching = append(matching, &pgadmins.Items[i]) + } + } + } + } + } + return matching +} + +//+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="postgresclusters",verbs={list,watch} + +// getClustersForPGAdmin returns clusters managed by the given pgAdmin +func (r *PGAdminReconciler) getClustersForPGAdmin( + ctx context.Context, + pgAdmin *v1beta1.PGAdmin, +) (map[string]*v1beta1.PostgresClusterList, error) { + matching := make(map[string]*v1beta1.PostgresClusterList) + var err error + var selector labels.Selector + + for _, serverGroup := range pgAdmin.Spec.ServerGroups { + cluster := &v1beta1.PostgresCluster{} + if serverGroup.PostgresClusterName != "" { + err = r.Get(ctx, types.NamespacedName{ + Name: serverGroup.PostgresClusterName, + Namespace: pgAdmin.GetNamespace(), + }, cluster) + if err == nil { + matching[serverGroup.Name] = &v1beta1.PostgresClusterList{ + Items: []v1beta1.PostgresCluster{*cluster}, + } + } + continue + } + if selector, err = naming.AsSelector(serverGroup.PostgresClusterSelector); err == nil { + var filteredList v1beta1.PostgresClusterList + err = r.List(ctx, &filteredList, + client.InNamespace(pgAdmin.Namespace), + client.MatchingLabelsSelector{Selector: selector}, + ) + if err == nil { + matching[serverGroup.Name] = &filteredList + } + } + } + + return matching, err +} diff --git a/internal/controller/standalone_pgadmin/service.go b/internal/controller/standalone_pgadmin/service.go new file mode 100644 index 0000000000..2453a6a1fa --- /dev/null +++ b/internal/controller/standalone_pgadmin/service.go @@ -0,0 +1,140 @@ +// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package standalone_pgadmin + +import ( + "context" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + "sigs.k8s.io/controller-runtime/pkg/client" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + + "github.com/pkg/errors" + + "github.com/crunchydata/postgres-operator/internal/logging" + "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +// +kubebuilder:rbac:groups="",resources="services",verbs={get} +// +kubebuilder:rbac:groups="",resources="services",verbs={create,delete,patch} + +// reconcilePGAdminService will reconcile a ClusterIP service that points to +// pgAdmin. +func (r *PGAdminReconciler) reconcilePGAdminService( + ctx context.Context, + pgadmin *v1beta1.PGAdmin, +) error { + log := logging.FromContext(ctx) + + // Since spec.Service only accepts a single service name, we shouldn't ever + // have more than one service. However, if the user changes ServiceName, we + // need to delete any existing service(s). At the start of every reconcile + // get all services that match the current pgAdmin labels. + services := corev1.ServiceList{} + if err := r.Client.List(ctx, &services, + client.InNamespace(pgadmin.Namespace), + client.MatchingLabels{ + naming.LabelStandalonePGAdmin: pgadmin.Name, + naming.LabelRole: naming.RolePGAdmin, + }); err != nil { + return err + } + + // Delete any controlled and labeled service that is not defined in the spec. + for i := range services.Items { + if services.Items[i].Name != pgadmin.Spec.ServiceName { + log.V(1).Info( + "Deleting service(s) not defined in spec.ServiceName that are owned by pgAdmin", + "serviceName", services.Items[i].Name) + if err := r.deleteControlled(ctx, pgadmin, &services.Items[i]); err != nil { + return err + } + } + } + + // At this point only a service defined by spec.ServiceName should exist. + // Check if the user has requested a service through ServiceName + if pgadmin.Spec.ServiceName != "" { + // Look for an existing service with name ServiceName in the namespace + existingService := &corev1.Service{} + err := r.Client.Get(ctx, types.NamespacedName{ + Name: pgadmin.Spec.ServiceName, + Namespace: pgadmin.GetNamespace(), + }, existingService) + if client.IgnoreNotFound(err) != nil { + return err + } + + // If we found an existing service in our namespace with ServiceName + if !apierrors.IsNotFound(err) { + + // Check if the existing service has ownerReferences. + // If it doesn't we can go ahead and reconcile the service. + // If it does then we need to check if we are the controller. + if len(existingService.OwnerReferences) != 0 { + + // If the service is not controlled by this pgAdmin then we shouldn't reconcile + if !metav1.IsControlledBy(existingService, pgadmin) { + err := errors.New("Service is controlled by another object") + log.V(1).Error(err, "PGO does not force ownership on existing services", + "ServiceName", pgadmin.Spec.ServiceName) + r.Recorder.Event(pgadmin, + corev1.EventTypeWarning, "InvalidServiceWarning", + "Failed to reconcile Service ServiceName: "+pgadmin.Spec.ServiceName) + + return err + } + } + } + + // A service has been requested and we are allowed to create or reconcile + service := service(pgadmin) + + // Set the controller reference on the service + if err := errors.WithStack(r.setControllerReference(pgadmin, service)); err != nil { + return err + } + + return errors.WithStack(r.apply(ctx, service)) + } + + // If we get here then ServiceName was not provided through the spec + return nil +} + +// Generate a corev1.Service for pgAdmin +func service(pgadmin *v1beta1.PGAdmin) *corev1.Service { + + service := &corev1.Service{} + service.ObjectMeta = metav1.ObjectMeta{ + Name: pgadmin.Spec.ServiceName, + Namespace: pgadmin.Namespace, + } + service.SetGroupVersionKind( + corev1.SchemeGroupVersion.WithKind("Service")) + + service.Annotations = pgadmin.Spec.Metadata.GetAnnotationsOrNil() + service.Labels = naming.Merge( + pgadmin.Spec.Metadata.GetLabelsOrNil(), + naming.StandalonePGAdminLabels(pgadmin.Name)) + + service.Spec.Type = corev1.ServiceTypeClusterIP + service.Spec.Selector = map[string]string{ + naming.LabelStandalonePGAdmin: pgadmin.Name, + } + service.Spec.Ports = []corev1.ServicePort{{ + Name: "pgadmin-port", + Port: pgAdminPort, + Protocol: corev1.ProtocolTCP, + TargetPort: intstr.FromInt(pgAdminPort), + }} + + return service +} diff --git a/internal/controller/standalone_pgadmin/service_test.go b/internal/controller/standalone_pgadmin/service_test.go new file mode 100644 index 0000000000..24b20c8247 --- /dev/null +++ b/internal/controller/standalone_pgadmin/service_test.go @@ -0,0 +1,61 @@ +// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package standalone_pgadmin + +import ( + "testing" + + "gotest.tools/v3/assert" + + "github.com/crunchydata/postgres-operator/internal/testing/cmp" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +func TestService(t *testing.T) { + pgadmin := new(v1beta1.PGAdmin) + pgadmin.Name = "daisy" + pgadmin.Namespace = "daisy-service-ns" + pgadmin.Spec.ServiceName = "daisy-service" + pgadmin.Spec.Metadata = &v1beta1.Metadata{ + Labels: map[string]string{ + "test-label": "test-label-val", + "postgres-operator.crunchydata.com/pgadmin": "bad-val", + "postgres-operator.crunchydata.com/role": "bad-val", + }, + Annotations: map[string]string{ + "test-annotation": "test-annotation-val", + }, + } + + service := service(pgadmin) + assert.Assert(t, service != nil) + assert.Assert(t, cmp.MarshalMatches(service.TypeMeta, ` +apiVersion: v1 +kind: Service + `)) + + assert.Assert(t, cmp.MarshalMatches(service.ObjectMeta, ` +annotations: + test-annotation: test-annotation-val +creationTimestamp: null +labels: + postgres-operator.crunchydata.com/pgadmin: daisy + postgres-operator.crunchydata.com/role: pgadmin + test-label: test-label-val +name: daisy-service +namespace: daisy-service-ns + `)) + + assert.Assert(t, cmp.MarshalMatches(service.Spec, ` +ports: +- name: pgadmin-port + port: 5050 + protocol: TCP + targetPort: 5050 +selector: + postgres-operator.crunchydata.com/pgadmin: daisy +type: ClusterIP + `)) +} diff --git a/internal/controller/standalone_pgadmin/statefulset.go b/internal/controller/standalone_pgadmin/statefulset.go new file mode 100644 index 0000000000..e086e333f4 --- /dev/null +++ b/internal/controller/standalone_pgadmin/statefulset.go @@ -0,0 +1,118 @@ +// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package standalone_pgadmin + +import ( + "context" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/pkg/errors" + + "github.com/crunchydata/postgres-operator/internal/initialize" + "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +// reconcilePGAdminStatefulSet writes the StatefulSet that runs pgAdmin. +func (r *PGAdminReconciler) reconcilePGAdminStatefulSet( + ctx context.Context, pgadmin *v1beta1.PGAdmin, + configmap *corev1.ConfigMap, dataVolume *corev1.PersistentVolumeClaim, +) error { + sts := statefulset(r, pgadmin, configmap, dataVolume) + + // Previous versions of PGO used a StatefulSet Pod Management Policy that could leave the Pod + // in a failed state. When we see that it has the wrong policy, we will delete the StatefulSet + // and then recreate it with the correct policy, as this is not a property that can be patched. + // When we delete the StatefulSet, we will leave its Pods in place. They will be claimed by + // the StatefulSet that gets created in the next reconcile. + existing := &appsv1.StatefulSet{} + if err := errors.WithStack(r.Client.Get(ctx, client.ObjectKeyFromObject(sts), existing)); err != nil { + if !apierrors.IsNotFound(err) { + return err + } + } else { + if existing.Spec.PodManagementPolicy != sts.Spec.PodManagementPolicy { + // We want to delete the STS without affecting the Pods, so we set the PropagationPolicy to Orphan. + // The orphaned Pods will be claimed by the StatefulSet that will be created in the next reconcile. + uid := existing.GetUID() + version := existing.GetResourceVersion() + exactly := client.Preconditions{UID: &uid, ResourceVersion: &version} + propagate := client.PropagationPolicy(metav1.DeletePropagationOrphan) + + return errors.WithStack(client.IgnoreNotFound(r.Client.Delete(ctx, existing, exactly, propagate))) + } + } + + if err := errors.WithStack(r.setControllerReference(pgadmin, sts)); err != nil { + return err + } + return errors.WithStack(r.apply(ctx, sts)) +} + +// statefulset defines the StatefulSet needed to run pgAdmin. +func statefulset( + r *PGAdminReconciler, + pgadmin *v1beta1.PGAdmin, + configmap *corev1.ConfigMap, + dataVolume *corev1.PersistentVolumeClaim, +) *appsv1.StatefulSet { + sts := &appsv1.StatefulSet{ObjectMeta: naming.StandalonePGAdmin(pgadmin)} + sts.SetGroupVersionKind(appsv1.SchemeGroupVersion.WithKind("StatefulSet")) + + sts.Annotations = pgadmin.Spec.Metadata.GetAnnotationsOrNil() + sts.Labels = naming.Merge( + pgadmin.Spec.Metadata.GetLabelsOrNil(), + naming.StandalonePGAdminDataLabels(pgadmin.Name), + ) + sts.Spec.Selector = &metav1.LabelSelector{ + MatchLabels: naming.StandalonePGAdminLabels(pgadmin.Name), + } + sts.Spec.Template.Annotations = pgadmin.Spec.Metadata.GetAnnotationsOrNil() + sts.Spec.Template.Labels = naming.Merge( + pgadmin.Spec.Metadata.GetLabelsOrNil(), + naming.StandalonePGAdminDataLabels(pgadmin.Name), + ) + + // Don't clutter the namespace with extra ControllerRevisions. + sts.Spec.RevisionHistoryLimit = initialize.Int32(0) + + // Use StatefulSet's "RollingUpdate" strategy and "Parallel" policy to roll + // out changes to pods even when not Running or not Ready. + // - https://docs.k8s.io/concepts/workloads/controllers/statefulset/#rolling-updates + // - https://docs.k8s.io/concepts/workloads/controllers/statefulset/#forced-rollback + // - https://kep.k8s.io/3541 + sts.Spec.PodManagementPolicy = appsv1.ParallelPodManagement + sts.Spec.UpdateStrategy.Type = appsv1.RollingUpdateStatefulSetStrategyType + + // Use scheduling constraints from the cluster spec. + sts.Spec.Template.Spec.Affinity = pgadmin.Spec.Affinity + sts.Spec.Template.Spec.Tolerations = pgadmin.Spec.Tolerations + sts.Spec.Template.Spec.PriorityClassName = initialize.FromPointer(pgadmin.Spec.PriorityClassName) + + // Restart containers any time they stop, die, are killed, etc. + // - https://docs.k8s.io/concepts/workloads/pods/pod-lifecycle/#restart-policy + sts.Spec.Template.Spec.RestartPolicy = corev1.RestartPolicyAlways + + // pgAdmin does not make any Kubernetes API calls. Use the default + // ServiceAccount and do not mount its credentials. + sts.Spec.Template.Spec.AutomountServiceAccountToken = initialize.Bool(false) + + // Do not add environment variables describing services in this namespace. + sts.Spec.Template.Spec.EnableServiceLinks = initialize.Bool(false) + + // set the image pull secrets, if any exist + sts.Spec.Template.Spec.ImagePullSecrets = pgadmin.Spec.ImagePullSecrets + + sts.Spec.Template.Spec.SecurityContext = podSecurityContext(r) + + pod(pgadmin, configmap, &sts.Spec.Template.Spec, dataVolume) + + return sts +} diff --git a/internal/controller/standalone_pgadmin/statefulset_test.go b/internal/controller/standalone_pgadmin/statefulset_test.go new file mode 100644 index 0000000000..52c501b357 --- /dev/null +++ b/internal/controller/standalone_pgadmin/statefulset_test.go @@ -0,0 +1,207 @@ +// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package standalone_pgadmin + +import ( + "context" + "testing" + + "gotest.tools/v3/assert" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/crunchydata/postgres-operator/internal/initialize" + "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/internal/testing/cmp" + "github.com/crunchydata/postgres-operator/internal/testing/require" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +func TestReconcilePGAdminStatefulSet(t *testing.T) { + ctx := context.Background() + cc := setupKubernetes(t) + require.ParallelCapacity(t, 1) + + reconciler := &PGAdminReconciler{ + Client: cc, + Owner: client.FieldOwner(t.Name()), + } + + ns := setupNamespace(t, cc) + pgadmin := new(v1beta1.PGAdmin) + pgadmin.Name = "test-standalone-pgadmin" + pgadmin.Namespace = ns.Name + + assert.NilError(t, cc.Create(ctx, pgadmin)) + t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, pgadmin)) }) + + configmap := &corev1.ConfigMap{} + configmap.Name = "test-cm" + + pvc := &corev1.PersistentVolumeClaim{} + pvc.Name = "test-pvc" + + t.Run("verify StatefulSet", func(t *testing.T) { + err := reconciler.reconcilePGAdminStatefulSet(ctx, pgadmin, configmap, pvc) + assert.NilError(t, err) + + selector, err := naming.AsSelector(metav1.LabelSelector{ + MatchLabels: map[string]string{ + naming.LabelStandalonePGAdmin: pgadmin.Name, + }, + }) + assert.NilError(t, err) + + list := appsv1.StatefulSetList{} + assert.NilError(t, cc.List(ctx, &list, client.InNamespace(pgadmin.Namespace), + client.MatchingLabelsSelector{Selector: selector})) + assert.Equal(t, len(list.Items), 1) + + template := list.Items[0].Spec.Template.DeepCopy() + + // Containers and Volumes should be populated. + assert.Assert(t, len(template.Spec.Containers) != 0) + assert.Assert(t, len(template.Spec.Volumes) != 0) + + // Ignore Containers and Volumes in the comparison below. + template.Spec.Containers = nil + template.Spec.InitContainers = nil + template.Spec.Volumes = nil + + assert.Assert(t, cmp.MarshalMatches(template.ObjectMeta, ` +creationTimestamp: null +labels: + postgres-operator.crunchydata.com/data: pgadmin + postgres-operator.crunchydata.com/pgadmin: test-standalone-pgadmin + postgres-operator.crunchydata.com/role: pgadmin + `)) + + compare := ` +automountServiceAccountToken: false +containers: null +dnsPolicy: ClusterFirst +enableServiceLinks: false +restartPolicy: Always +schedulerName: default-scheduler +securityContext: + fsGroup: 2 + fsGroupChangePolicy: OnRootMismatch +terminationGracePeriodSeconds: 30 + ` + + assert.Assert(t, cmp.MarshalMatches(template.Spec, compare)) + }) + + t.Run("verify customized deployment", func(t *testing.T) { + + custompgadmin := new(v1beta1.PGAdmin) + + // add pod level customizations + custompgadmin.Name = "custom-pgadmin" + custompgadmin.Namespace = ns.Name + + // annotation and label + custompgadmin.Spec.Metadata = &v1beta1.Metadata{ + Annotations: map[string]string{ + "annotation1": "annotationvalue", + }, + Labels: map[string]string{ + "label1": "labelvalue", + }, + } + + // scheduling constraints + custompgadmin.Spec.Affinity = &corev1.Affinity{ + NodeAffinity: &corev1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ + NodeSelectorTerms: []corev1.NodeSelectorTerm{{ + MatchExpressions: []corev1.NodeSelectorRequirement{{ + Key: "key", + Operator: "Exists", + }}, + }}, + }, + }, + } + custompgadmin.Spec.Tolerations = []corev1.Toleration{ + {Key: "sometoleration"}, + } + + if pgadmin.Spec.PriorityClassName != nil { + custompgadmin.Spec.PriorityClassName = initialize.String("testpriorityclass") + } + + // set an image pull secret + custompgadmin.Spec.ImagePullSecrets = []corev1.LocalObjectReference{{ + Name: "myImagePullSecret"}} + + assert.NilError(t, cc.Create(ctx, custompgadmin)) + t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, custompgadmin)) }) + + err := reconciler.reconcilePGAdminStatefulSet(ctx, custompgadmin, configmap, pvc) + assert.NilError(t, err) + + selector, err := naming.AsSelector(metav1.LabelSelector{ + MatchLabels: map[string]string{ + naming.LabelStandalonePGAdmin: custompgadmin.Name, + }, + }) + assert.NilError(t, err) + + list := appsv1.StatefulSetList{} + assert.NilError(t, cc.List(ctx, &list, client.InNamespace(custompgadmin.Namespace), + client.MatchingLabelsSelector{Selector: selector})) + assert.Equal(t, len(list.Items), 1) + + template := list.Items[0].Spec.Template.DeepCopy() + + // Containers and Volumes should be populated. + assert.Assert(t, len(template.Spec.Containers) != 0) + + // Ignore Containers and Volumes in the comparison below. + template.Spec.Containers = nil + template.Spec.InitContainers = nil + template.Spec.Volumes = nil + + assert.Assert(t, cmp.MarshalMatches(template.ObjectMeta, ` +annotations: + annotation1: annotationvalue +creationTimestamp: null +labels: + label1: labelvalue + postgres-operator.crunchydata.com/data: pgadmin + postgres-operator.crunchydata.com/pgadmin: custom-pgadmin + postgres-operator.crunchydata.com/role: pgadmin + `)) + + compare := ` +affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: key + operator: Exists +automountServiceAccountToken: false +containers: null +dnsPolicy: ClusterFirst +enableServiceLinks: false +imagePullSecrets: +- name: myImagePullSecret +restartPolicy: Always +schedulerName: default-scheduler +securityContext: + fsGroup: 2 + fsGroupChangePolicy: OnRootMismatch +terminationGracePeriodSeconds: 30 +tolerations: +- key: sometoleration +` + + assert.Assert(t, cmp.MarshalMatches(template.Spec, compare)) + }) +} diff --git a/internal/controller/standalone_pgadmin/users.go b/internal/controller/standalone_pgadmin/users.go new file mode 100644 index 0000000000..3c9a3ce05b --- /dev/null +++ b/internal/controller/standalone_pgadmin/users.go @@ -0,0 +1,308 @@ +// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package standalone_pgadmin + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "strconv" + "strings" + + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/crunchydata/postgres-operator/internal/logging" + "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +type Executor func( + ctx context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, +) error + +// pgAdminUserForJson is used for user data that is put in the users.json file in the +// pgAdmin secret. IsAdmin and Username come from the user spec, whereas Password is +// generated when the user is created. +type pgAdminUserForJson struct { + // Whether the user has admin privileges or not. + IsAdmin bool `json:"isAdmin"` + + // The user's password + Password string `json:"password"` + + // The username for User in pgAdmin. + // Must be unique in the pgAdmin's users list. + Username string `json:"username"` +} + +// reconcilePGAdminUsers reconciles the users listed in the pgAdmin spec, adding them +// to the pgAdmin secret, and creating/updating them in pgAdmin when appropriate. +func (r *PGAdminReconciler) reconcilePGAdminUsers(ctx context.Context, pgadmin *v1beta1.PGAdmin) error { + const container = naming.ContainerPGAdmin + var podExecutor Executor + log := logging.FromContext(ctx) + + // Find the running pgAdmin container. When there is none, return early. + pod := &corev1.Pod{ObjectMeta: naming.StandalonePGAdmin(pgadmin)} + pod.Name += "-0" + + err := errors.WithStack(r.Client.Get(ctx, client.ObjectKeyFromObject(pod), pod)) + if err != nil { + return client.IgnoreNotFound(err) + } + + var running bool + var pgAdminImageSha string + for _, status := range pod.Status.ContainerStatuses { + if status.Name == container { + running = status.State.Running != nil + pgAdminImageSha = status.ImageID + } + } + if terminating := pod.DeletionTimestamp != nil; running && !terminating { + ctx = logging.NewContext(ctx, logging.FromContext(ctx).WithValues("pod", pod.Name)) + + podExecutor = func( + ctx context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, + ) error { + return r.PodExec(ctx, pod.Namespace, pod.Name, container, stdin, stdout, stderr, command...) + } + } + if podExecutor == nil { + return nil + } + + // If the pgAdmin version is not in the status or the image SHA has changed, get + // the pgAdmin version and store it in the status. + var pgadminVersion int + if pgadmin.Status.MajorVersion == 0 || pgadmin.Status.ImageSHA != pgAdminImageSha { + pgadminVersion, err = r.reconcilePGAdminMajorVersion(ctx, podExecutor) + if err != nil { + return err + } + pgadmin.Status.MajorVersion = pgadminVersion + pgadmin.Status.ImageSHA = pgAdminImageSha + } else { + pgadminVersion = pgadmin.Status.MajorVersion + } + + // If the pgAdmin version is not v8 or higher, return early as user management is + // only supported for pgAdmin v8 and higher. + if pgadminVersion < 8 { + // If pgAdmin version is less than v8 and user management is being attempted, + // log a message clarifying that it is only supported for pgAdmin v8 and higher. + if len(pgadmin.Spec.Users) > 0 { + log.Info("User management is only supported for pgAdmin v8 and higher.", + "pgadminVersion", pgadminVersion) + } + return err + } + + return r.writePGAdminUsers(ctx, pgadmin, podExecutor) +} + +// reconcilePGAdminMajorVersion execs into the pgAdmin pod and retrieves the pgAdmin major version +func (r *PGAdminReconciler) reconcilePGAdminMajorVersion(ctx context.Context, exec Executor) (int, error) { + script := fmt.Sprintf(` +PGADMIN_DIR=%s +cd $PGADMIN_DIR && python3 -c "import config; print(config.APP_RELEASE)" +`, pgAdminDir) + + var stdin, stdout, stderr bytes.Buffer + + err := exec(ctx, &stdin, &stdout, &stderr, + []string{"bash", "-ceu", "--", script}...) + + if err != nil { + return 0, err + } + + return strconv.Atoi(strings.TrimSpace(stdout.String())) +} + +// writePGAdminUsers takes the users in the pgAdmin spec and writes (adds or updates) their data +// to both pgAdmin and the users.json file that is stored in the pgAdmin secret. If a user is +// removed from the spec, its data is removed from users.json, but it is not deleted from pgAdmin. +func (r *PGAdminReconciler) writePGAdminUsers(ctx context.Context, pgadmin *v1beta1.PGAdmin, + exec Executor) error { + log := logging.FromContext(ctx) + + existingUserSecret := &corev1.Secret{ObjectMeta: naming.StandalonePGAdmin(pgadmin)} + err := errors.WithStack( + r.Client.Get(ctx, client.ObjectKeyFromObject(existingUserSecret), existingUserSecret)) + if client.IgnoreNotFound(err) != nil { + return err + } + + intentUserSecret := &corev1.Secret{ObjectMeta: naming.StandalonePGAdmin(pgadmin)} + intentUserSecret.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("Secret")) + + intentUserSecret.Annotations = naming.Merge( + pgadmin.Spec.Metadata.GetAnnotationsOrNil(), + ) + intentUserSecret.Labels = naming.Merge( + pgadmin.Spec.Metadata.GetLabelsOrNil(), + naming.StandalonePGAdminLabels(pgadmin.Name)) + + // Initialize secret data map, or copy existing data if not nil + intentUserSecret.Data = make(map[string][]byte) + + setupScript := fmt.Sprintf(` +PGADMIN_DIR=%s +cd $PGADMIN_DIR +`, pgAdminDir) + + var existingUsersArr []pgAdminUserForJson + if existingUserSecret.Data["users.json"] != nil { + err := json.Unmarshal(existingUserSecret.Data["users.json"], &existingUsersArr) + if err != nil { + return err + } + } + existingUsersMap := make(map[string]pgAdminUserForJson) + for _, user := range existingUsersArr { + existingUsersMap[user.Username] = user + } + intentUsers := []pgAdminUserForJson{} + for _, user := range pgadmin.Spec.Users { + var stdin, stdout, stderr bytes.Buffer + typeFlag := "--nonadmin" + isAdmin := false + if user.Role == "Administrator" { + typeFlag = "--admin" + isAdmin = true + } + + // Get password from secret + userPasswordSecret := &corev1.Secret{ObjectMeta: metav1.ObjectMeta{ + Namespace: pgadmin.Namespace, + Name: user.PasswordRef.LocalObjectReference.Name, + }} + err := errors.WithStack( + r.Client.Get(ctx, client.ObjectKeyFromObject(userPasswordSecret), userPasswordSecret)) + if err != nil { + log.Error(err, "Could not get user password secret") + continue + } + + // Make sure the password isn't nil or empty + password := userPasswordSecret.Data[user.PasswordRef.Key] + if password == nil { + log.Error(nil, `Could not retrieve password from secret. Make sure secret name and key are correct.`) + continue + } + if len(password) == 0 { + log.Error(nil, `Password must not be empty.`) + continue + } + + // Assemble user that will be used in add/update command and in updating + // the users.json file in the secret + intentUser := pgAdminUserForJson{ + Username: user.Username, + Password: string(password), + IsAdmin: isAdmin, + } + // If the user already exists in users.json and isAdmin or password has + // changed, run the update-user command. If the user already exists in + // users.json, but it hasn't changed, do nothing. If the user doesn't + // exist in users.json, run the add-user command. + if existingUser, present := existingUsersMap[user.Username]; present { + // If Password or IsAdmin have changed, attempt update-user command + if intentUser.IsAdmin != existingUser.IsAdmin || intentUser.Password != existingUser.Password { + script := setupScript + fmt.Sprintf(`python3 setup.py update-user %s --password "%s" "%s"`, + typeFlag, intentUser.Password, intentUser.Username) + "\n" + err = exec(ctx, &stdin, &stdout, &stderr, + []string{"bash", "-ceu", "--", script}...) + + // If any errors occurred during update, we want to log a message, + // add the existing user to users.json since the update was + // unsuccessful, and continue reconciling users. + if err != nil { + log.Error(err, "PodExec failed: ") + intentUsers = append(intentUsers, existingUser) + continue + } else if strings.TrimSpace(stderr.String()) != "" { + log.Error(errors.New(stderr.String()), fmt.Sprintf("pgAdmin setup.py error for %s: ", + intentUser.Username)) + intentUsers = append(intentUsers, existingUser) + continue + } + // If update user fails due to user not found or password length: + // https://github.com/pgadmin-org/pgadmin4/blob/REL-8_5/web/setup.py#L263 + // https://github.com/pgadmin-org/pgadmin4/blob/REL-8_5/web/setup.py#L246 + if strings.Contains(stdout.String(), "User not found") || + strings.Contains(stdout.String(), "Password must be") { + + log.Info("Failed to update pgAdmin user", "user", intentUser.Username, "error", stdout.String()) + r.Recorder.Event(pgadmin, + corev1.EventTypeWarning, "InvalidUserWarning", + fmt.Sprintf("Failed to update pgAdmin user %s: %s", + intentUser.Username, stdout.String())) + intentUsers = append(intentUsers, existingUser) + continue + } + } + } else { + // New user, so attempt add-user command + script := setupScript + fmt.Sprintf(`python3 setup.py add-user %s -- "%s" "%s"`, + typeFlag, intentUser.Username, intentUser.Password) + "\n" + err = exec(ctx, &stdin, &stdout, &stderr, + []string{"bash", "-ceu", "--", script}...) + + // If any errors occurred when attempting to add user, we want to log a message, + // and continue reconciling users. + if err != nil { + log.Error(err, "PodExec failed: ") + continue + } + if strings.TrimSpace(stderr.String()) != "" { + log.Error(errors.New(stderr.String()), fmt.Sprintf("pgAdmin setup.py error for %s: ", + intentUser.Username)) + continue + } + // If add user fails due to invalid username or password length: + // https://github.com/pgadmin-org/pgadmin4/blob/REL-8_5/web/pgadmin/tools/user_management/__init__.py#L457 + // https://github.com/pgadmin-org/pgadmin4/blob/REL-8_5/web/setup.py#L374 + if strings.Contains(stdout.String(), "Invalid email address") || + strings.Contains(stdout.String(), "Password must be") { + + log.Info(fmt.Sprintf("Failed to create pgAdmin user %s: %s", + intentUser.Username, stdout.String())) + r.Recorder.Event(pgadmin, + corev1.EventTypeWarning, "InvalidUserWarning", + fmt.Sprintf("Failed to create pgAdmin user %s: %s", + intentUser.Username, stdout.String())) + continue + } + } + // If we've gotten here, the user was successfully added or updated or nothing was done + // to the user at all, so we want to add it to the slice of users that will be put in the + // users.json file in the secret. + intentUsers = append(intentUsers, intentUser) + } + + // We've at least attempted to reconcile all users in the spec. If errors occurred when attempting + // to add a user, that user will not be in intentUsers. If errors occurred when attempting to + // update a user, the user will be in intentUsers as it existed before. We now want to marshal the + // intentUsers to json and write the users.json file to the secret. + usersJSON, err := json.Marshal(intentUsers) + if err != nil { + return err + } + intentUserSecret.Data["users.json"] = usersJSON + + err = errors.WithStack(r.setControllerReference(pgadmin, intentUserSecret)) + if err == nil { + err = errors.WithStack(r.apply(ctx, intentUserSecret)) + } + + return err +} diff --git a/internal/controller/standalone_pgadmin/users_test.go b/internal/controller/standalone_pgadmin/users_test.go new file mode 100644 index 0000000000..409fcea701 --- /dev/null +++ b/internal/controller/standalone_pgadmin/users_test.go @@ -0,0 +1,709 @@ +// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package standalone_pgadmin + +import ( + "context" + "encoding/json" + "fmt" + "io" + "strings" + "testing" + + "github.com/pkg/errors" + "gotest.tools/v3/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + "github.com/crunchydata/postgres-operator/internal/controller/runtime" + "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/internal/testing/events" + "github.com/crunchydata/postgres-operator/internal/testing/require" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +func TestReconcilePGAdminUsers(t *testing.T) { + ctx := context.Background() + + pgadmin := &v1beta1.PGAdmin{} + pgadmin.Namespace = "ns1" + pgadmin.Name = "pgadmin1" + pgadmin.UID = "123" + pgadmin.Spec.Users = []v1beta1.PGAdminUser{ + { + Username: "testuser", + Role: "Administrator", + }, + } + + t.Run("NoPods", func(t *testing.T) { + r := new(PGAdminReconciler) + r.Client = fake.NewClientBuilder().Build() + assert.NilError(t, r.reconcilePGAdminUsers(ctx, pgadmin)) + }) + + // Pod in the namespace + pod := corev1.Pod{} + pod.Namespace = pgadmin.Namespace + pod.Name = fmt.Sprintf("pgadmin-%s-0", pgadmin.UID) + + t.Run("ContainerNotRunning", func(t *testing.T) { + pod := pod.DeepCopy() + + pod.DeletionTimestamp = nil + pod.Status.ContainerStatuses = nil + + r := new(PGAdminReconciler) + r.Client = fake.NewClientBuilder().WithObjects(pod).Build() + + assert.NilError(t, r.reconcilePGAdminUsers(ctx, pgadmin)) + }) + + t.Run("PodTerminating", func(t *testing.T) { + pod := pod.DeepCopy() + + // Must add finalizer when adding deletion timestamp otherwise fake client will panic: + // https://github.com/kubernetes-sigs/controller-runtime/pull/2316 + pod.Finalizers = append(pod.Finalizers, "some-finalizer") + + pod.DeletionTimestamp = new(metav1.Time) + *pod.DeletionTimestamp = metav1.Now() + pod.Status.ContainerStatuses = + []corev1.ContainerStatus{{Name: naming.ContainerPGAdmin}} + pod.Status.ContainerStatuses[0].State.Running = + new(corev1.ContainerStateRunning) + + r := new(PGAdminReconciler) + r.Client = fake.NewClientBuilder().WithObjects(pod).Build() + + assert.NilError(t, r.reconcilePGAdminUsers(ctx, pgadmin)) + }) + + // We only test v7 because if we did v8 then the writePGAdminUsers would + // be called and that method has its own tests later in this file + t.Run("PodHealthyVersionNotSet", func(t *testing.T) { + pgadmin := pgadmin.DeepCopy() + pod := pod.DeepCopy() + + pod.DeletionTimestamp = nil + pod.Status.ContainerStatuses = + []corev1.ContainerStatus{{Name: naming.ContainerPGAdmin}} + pod.Status.ContainerStatuses[0].State.Running = + new(corev1.ContainerStateRunning) + pod.Status.ContainerStatuses[0].ImageID = "fakeSHA" + + r := new(PGAdminReconciler) + r.Client = fake.NewClientBuilder().WithObjects(pod).Build() + + calls := 0 + r.PodExec = func( + ctx context.Context, namespace, pod, container string, + stdin io.Reader, stdout, stderr io.Writer, command ...string, + ) error { + calls++ + + assert.Equal(t, pod, "pgadmin-123-0") + assert.Equal(t, namespace, pgadmin.Namespace) + assert.Equal(t, container, naming.ContainerPGAdmin) + + // Simulate a v7 version of pgAdmin by setting stdout to "7" for + // podexec call in reconcilePGAdminMajorVersion + stdout.Write([]byte("7")) + return nil + } + + assert.NilError(t, r.reconcilePGAdminUsers(ctx, pgadmin)) + assert.Equal(t, calls, 1, "PodExec should be called once") + assert.Equal(t, pgadmin.Status.MajorVersion, 7) + assert.Equal(t, pgadmin.Status.ImageSHA, "fakeSHA") + }) + + t.Run("PodHealthyShaChanged", func(t *testing.T) { + pgadmin := pgadmin.DeepCopy() + pgadmin.Status.MajorVersion = 7 + pgadmin.Status.ImageSHA = "fakeSHA" + pod := pod.DeepCopy() + + pod.DeletionTimestamp = nil + pod.Status.ContainerStatuses = + []corev1.ContainerStatus{{Name: naming.ContainerPGAdmin}} + pod.Status.ContainerStatuses[0].State.Running = + new(corev1.ContainerStateRunning) + pod.Status.ContainerStatuses[0].ImageID = "newFakeSHA" + + r := new(PGAdminReconciler) + r.Client = fake.NewClientBuilder().WithObjects(pod).Build() + + calls := 0 + r.PodExec = func( + ctx context.Context, namespace, pod, container string, + stdin io.Reader, stdout, stderr io.Writer, command ...string, + ) error { + calls++ + + // Simulate a v7 version of pgAdmin by setting stdout to "7" for + // podexec call in reconcilePGAdminMajorVersion + stdout.Write([]byte("7")) + return nil + } + + assert.NilError(t, r.reconcilePGAdminUsers(ctx, pgadmin)) + assert.Equal(t, calls, 1, "PodExec should be called once") + assert.Equal(t, pgadmin.Status.MajorVersion, 7) + assert.Equal(t, pgadmin.Status.ImageSHA, "newFakeSHA") + }) +} + +func TestReconcilePGAdminMajorVersion(t *testing.T) { + ctx := context.Background() + pod := corev1.Pod{} + pod.Namespace = "test-namespace" + pod.Name = "pgadmin-123-0" + reconciler := &PGAdminReconciler{} + + podExecutor := func( + ctx context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, + ) error { + return reconciler.PodExec(ctx, pod.Namespace, pod.Name, "pgadmin", stdin, stdout, stderr, command...) + } + + t.Run("SuccessfulRetrieval", func(t *testing.T) { + reconciler.PodExec = func( + ctx context.Context, namespace, pod, container string, + stdin io.Reader, stdout, stderr io.Writer, command ...string, + ) error { + assert.Equal(t, pod, "pgadmin-123-0") + assert.Equal(t, namespace, "test-namespace") + assert.Equal(t, container, naming.ContainerPGAdmin) + + // Simulate a v7 version of pgAdmin by setting stdout to "7" for + // podexec call in reconcilePGAdminMajorVersion + stdout.Write([]byte("7")) + return nil + } + + version, err := reconciler.reconcilePGAdminMajorVersion(ctx, podExecutor) + assert.NilError(t, err) + assert.Equal(t, version, 7) + }) + + t.Run("FailedRetrieval", func(t *testing.T) { + reconciler.PodExec = func( + ctx context.Context, namespace, pod, container string, + stdin io.Reader, stdout, stderr io.Writer, command ...string, + ) error { + // Simulate the python call giving bad data (not a version int) + stdout.Write([]byte("asdfjkl;")) + return nil + } + + version, err := reconciler.reconcilePGAdminMajorVersion(ctx, podExecutor) + assert.Check(t, err != nil) + assert.Equal(t, version, 0) + }) + + t.Run("PodExecError", func(t *testing.T) { + reconciler.PodExec = func( + ctx context.Context, namespace, pod, container string, + stdin io.Reader, stdout, stderr io.Writer, command ...string, + ) error { + return errors.New("PodExecError") + } + + version, err := reconciler.reconcilePGAdminMajorVersion(ctx, podExecutor) + assert.Check(t, err != nil) + assert.Equal(t, version, 0) + }) +} + +func TestWritePGAdminUsers(t *testing.T) { + ctx := context.Background() + cc := setupKubernetes(t) + require.ParallelCapacity(t, 1) + + recorder := events.NewRecorder(t, runtime.Scheme) + reconciler := &PGAdminReconciler{ + Client: cc, + Owner: client.FieldOwner(t.Name()), + Recorder: recorder, + } + + ns := setupNamespace(t, cc) + pgadmin := new(v1beta1.PGAdmin) + pgadmin.Name = "test-standalone-pgadmin" + pgadmin.Namespace = ns.Name + assert.NilError(t, cc.Create(ctx, pgadmin)) + + userPasswordSecret1 := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "user-password-secret1", + Namespace: ns.Name, + }, + Data: map[string][]byte{ + "password": []byte(`asdf`), + }, + } + assert.NilError(t, cc.Create(ctx, userPasswordSecret1)) + + userPasswordSecret2 := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "user-password-secret2", + Namespace: ns.Name, + }, + Data: map[string][]byte{ + "password": []byte(`qwer`), + }, + } + assert.NilError(t, cc.Create(ctx, userPasswordSecret2)) + + t.Cleanup(func() { + assert.Check(t, cc.Delete(ctx, pgadmin)) + assert.Check(t, cc.Delete(ctx, userPasswordSecret1)) + assert.Check(t, cc.Delete(ctx, userPasswordSecret2)) + }) + + pod := corev1.Pod{} + pod.Namespace = pgadmin.Namespace + pod.Name = fmt.Sprintf("pgadmin-%s-0", pgadmin.UID) + + podExecutor := func( + ctx context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, + ) error { + return reconciler.PodExec(ctx, pod.Namespace, pod.Name, "pgadmin", stdin, stdout, stderr, command...) + } + + t.Run("CreateOneUser", func(t *testing.T) { + pgadmin.Spec.Users = []v1beta1.PGAdminUser{ + { + PasswordRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "user-password-secret1", + }, + Key: "password", + }, + Username: "testuser1", + Role: "Administrator", + }, + } + + calls := 0 + reconciler.PodExec = func( + ctx context.Context, namespace, pod, container string, + stdin io.Reader, stdout, stderr io.Writer, command ...string, + ) error { + calls++ + + assert.Equal(t, pod, fmt.Sprintf("pgadmin-%s-0", pgadmin.UID)) + assert.Equal(t, namespace, pgadmin.Namespace) + assert.Equal(t, container, naming.ContainerPGAdmin) + assert.Equal(t, strings.Contains(strings.Join(command, " "), + `python3 setup.py add-user --admin -- "testuser1" "asdf"`), true) + + return nil + } + + assert.NilError(t, reconciler.writePGAdminUsers(ctx, pgadmin, podExecutor)) + assert.Equal(t, calls, 1, "PodExec should be called once") + + secret := &corev1.Secret{ObjectMeta: naming.StandalonePGAdmin(pgadmin)} + assert.NilError(t, errors.WithStack( + reconciler.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret))) + if assert.Check(t, secret.Data["users.json"] != nil) { + var usersArr []pgAdminUserForJson + assert.NilError(t, json.Unmarshal(secret.Data["users.json"], &usersArr)) + assert.Equal(t, len(usersArr), 1) + assert.Equal(t, usersArr[0].Username, "testuser1") + assert.Equal(t, usersArr[0].IsAdmin, true) + assert.Equal(t, usersArr[0].Password, "asdf") + } + }) + + t.Run("AddAnotherUserEditExistingUser", func(t *testing.T) { + pgadmin.Spec.Users = []v1beta1.PGAdminUser{ + { + PasswordRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "user-password-secret1", + }, + Key: "password", + }, + Username: "testuser1", + Role: "User", + }, + { + PasswordRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "user-password-secret2", + }, + Key: "password", + }, + Username: "testuser2", + Role: "Administrator", + }, + } + + calls := 0 + addUserCalls := 0 + updateUserCalls := 0 + reconciler.PodExec = func( + ctx context.Context, namespace, pod, container string, + stdin io.Reader, stdout, stderr io.Writer, command ...string, + ) error { + calls++ + if strings.Contains(strings.Join(command, " "), "python3 setup.py add-user") { + addUserCalls++ + } + if strings.Contains(strings.Join(command, " "), "python3 setup.py update-user") { + updateUserCalls++ + } + + return nil + } + + assert.NilError(t, reconciler.writePGAdminUsers(ctx, pgadmin, podExecutor)) + assert.Equal(t, calls, 2, "PodExec should be called twice") + assert.Equal(t, addUserCalls, 1, "The add-user command should be executed once") + assert.Equal(t, updateUserCalls, 1, "The update-user command should be executed once") + + secret := &corev1.Secret{ObjectMeta: naming.StandalonePGAdmin(pgadmin)} + assert.NilError(t, errors.WithStack( + reconciler.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret))) + if assert.Check(t, secret.Data["users.json"] != nil) { + var usersArr []pgAdminUserForJson + assert.NilError(t, json.Unmarshal(secret.Data["users.json"], &usersArr)) + assert.Equal(t, len(usersArr), 2) + assert.Equal(t, usersArr[0].Username, "testuser1") + assert.Equal(t, usersArr[0].IsAdmin, false) + assert.Equal(t, usersArr[0].Password, "asdf") + assert.Equal(t, usersArr[1].Username, "testuser2") + assert.Equal(t, usersArr[1].IsAdmin, true) + assert.Equal(t, usersArr[1].Password, "qwer") + } + }) + + t.Run("AddOneEditOneLeaveOneAlone", func(t *testing.T) { + pgadmin.Spec.Users = []v1beta1.PGAdminUser{ + { + PasswordRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "user-password-secret1", + }, + Key: "password", + }, + Username: "testuser1", + Role: "User", + }, + { + PasswordRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "user-password-secret1", + }, + Key: "password", + }, + Username: "testuser2", + Role: "User", + }, + { + PasswordRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "user-password-secret2", + }, + Key: "password", + }, + Username: "testuser3", + Role: "Administrator", + }, + } + calls := 0 + addUserCalls := 0 + updateUserCalls := 0 + reconciler.PodExec = func( + ctx context.Context, namespace, pod, container string, + stdin io.Reader, stdout, stderr io.Writer, command ...string, + ) error { + calls++ + if strings.Contains(strings.Join(command, " "), "python3 setup.py add-user") { + addUserCalls++ + } + if strings.Contains(strings.Join(command, " "), "python3 setup.py update-user") { + updateUserCalls++ + } + + return nil + } + + assert.NilError(t, reconciler.writePGAdminUsers(ctx, pgadmin, podExecutor)) + assert.Equal(t, calls, 2, "PodExec should be called twice") + assert.Equal(t, addUserCalls, 1, "The add-user command should be executed once") + assert.Equal(t, updateUserCalls, 1, "The update-user command should be executed once") + + secret := &corev1.Secret{ObjectMeta: naming.StandalonePGAdmin(pgadmin)} + assert.NilError(t, errors.WithStack( + reconciler.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret))) + if assert.Check(t, secret.Data["users.json"] != nil) { + var usersArr []pgAdminUserForJson + assert.NilError(t, json.Unmarshal(secret.Data["users.json"], &usersArr)) + assert.Equal(t, len(usersArr), 3) + assert.Equal(t, usersArr[0].Username, "testuser1") + assert.Equal(t, usersArr[0].IsAdmin, false) + assert.Equal(t, usersArr[0].Password, "asdf") + assert.Equal(t, usersArr[1].Username, "testuser2") + assert.Equal(t, usersArr[1].IsAdmin, false) + assert.Equal(t, usersArr[1].Password, "asdf") + assert.Equal(t, usersArr[2].Username, "testuser3") + assert.Equal(t, usersArr[2].IsAdmin, true) + assert.Equal(t, usersArr[2].Password, "qwer") + } + }) + + t.Run("DeleteUsers", func(t *testing.T) { + pgadmin.Spec.Users = []v1beta1.PGAdminUser{ + { + PasswordRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "user-password-secret1", + }, + Key: "password", + }, + Username: "testuser1", + Role: "User", + }, + } + calls := 0 + reconciler.PodExec = func( + ctx context.Context, namespace, pod, container string, + stdin io.Reader, stdout, stderr io.Writer, command ...string, + ) error { + calls++ + + return nil + } + + assert.NilError(t, reconciler.writePGAdminUsers(ctx, pgadmin, podExecutor)) + assert.Equal(t, calls, 0, "PodExec should be called zero times") + + secret := &corev1.Secret{ObjectMeta: naming.StandalonePGAdmin(pgadmin)} + assert.NilError(t, errors.WithStack( + reconciler.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret))) + if assert.Check(t, secret.Data["users.json"] != nil) { + var usersArr []pgAdminUserForJson + assert.NilError(t, json.Unmarshal(secret.Data["users.json"], &usersArr)) + assert.Equal(t, len(usersArr), 1) + assert.Equal(t, usersArr[0].Username, "testuser1") + assert.Equal(t, usersArr[0].IsAdmin, false) + assert.Equal(t, usersArr[0].Password, "asdf") + } + }) + + t.Run("ErrorsWhenUpdating", func(t *testing.T) { + pgadmin.Spec.Users = []v1beta1.PGAdminUser{ + { + PasswordRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "user-password-secret1", + }, + Key: "password", + }, + Username: "testuser1", + Role: "Administrator", + }, + } + + // PodExec error + calls := 0 + reconciler.PodExec = func( + ctx context.Context, namespace, pod, container string, + stdin io.Reader, stdout, stderr io.Writer, command ...string, + ) error { + calls++ + + return errors.New("podexec failure") + } + + assert.NilError(t, reconciler.writePGAdminUsers(ctx, pgadmin, podExecutor)) + assert.Equal(t, calls, 1, "PodExec should be called once") + + // User in users.json should be unchanged + secret := &corev1.Secret{ObjectMeta: naming.StandalonePGAdmin(pgadmin)} + assert.NilError(t, errors.WithStack( + reconciler.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret))) + if assert.Check(t, secret.Data["users.json"] != nil) { + var usersArr []pgAdminUserForJson + assert.NilError(t, json.Unmarshal(secret.Data["users.json"], &usersArr)) + assert.Equal(t, len(usersArr), 1) + assert.Equal(t, usersArr[0].Username, "testuser1") + assert.Equal(t, usersArr[0].IsAdmin, false) + assert.Equal(t, usersArr[0].Password, "asdf") + } + + // setup.py error in stderr + reconciler.PodExec = func( + ctx context.Context, namespace, pod, container string, + stdin io.Reader, stdout, stderr io.Writer, command ...string, + ) error { + calls++ + + stderr.Write([]byte("issue running setup.py update-user command")) + + return nil + } + + assert.NilError(t, reconciler.writePGAdminUsers(ctx, pgadmin, podExecutor)) + assert.Equal(t, calls, 2, "PodExec should be called once more") + + // User in users.json should be unchanged + assert.NilError(t, errors.WithStack( + reconciler.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret))) + if assert.Check(t, secret.Data["users.json"] != nil) { + var usersArr []pgAdminUserForJson + assert.NilError(t, json.Unmarshal(secret.Data["users.json"], &usersArr)) + assert.Equal(t, len(usersArr), 1) + assert.Equal(t, usersArr[0].Username, "testuser1") + assert.Equal(t, usersArr[0].IsAdmin, false) + assert.Equal(t, usersArr[0].Password, "asdf") + } + }) + + t.Run("ErrorsWhenAdding", func(t *testing.T) { + pgadmin.Spec.Users = []v1beta1.PGAdminUser{ + { + PasswordRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "user-password-secret1", + }, + Key: "password", + }, + Username: "testuser1", + Role: "User", + }, + { + PasswordRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "user-password-secret2", + }, + Key: "password", + }, + Username: "testuser2", + Role: "Administrator", + }, + } + + // PodExec error + calls := 0 + reconciler.PodExec = func( + ctx context.Context, namespace, pod, container string, + stdin io.Reader, stdout, stderr io.Writer, command ...string, + ) error { + calls++ + + return errors.New("podexec failure") + } + + assert.NilError(t, reconciler.writePGAdminUsers(ctx, pgadmin, podExecutor)) + assert.Equal(t, calls, 1, "PodExec should be called once") + + // User in users.json should be unchanged and attempt to add user should not + // have succeeded + secret := &corev1.Secret{ObjectMeta: naming.StandalonePGAdmin(pgadmin)} + assert.NilError(t, errors.WithStack( + reconciler.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret))) + if assert.Check(t, secret.Data["users.json"] != nil) { + var usersArr []pgAdminUserForJson + assert.NilError(t, json.Unmarshal(secret.Data["users.json"], &usersArr)) + assert.Equal(t, len(usersArr), 1) + assert.Equal(t, usersArr[0].Username, "testuser1") + assert.Equal(t, usersArr[0].IsAdmin, false) + assert.Equal(t, usersArr[0].Password, "asdf") + } + + // setup.py error in stderr + reconciler.PodExec = func( + ctx context.Context, namespace, pod, container string, + stdin io.Reader, stdout, stderr io.Writer, command ...string, + ) error { + calls++ + + stderr.Write([]byte("issue running setup.py add-user command")) + + return nil + } + + assert.NilError(t, reconciler.writePGAdminUsers(ctx, pgadmin, podExecutor)) + assert.Equal(t, calls, 2, "PodExec should be called once more") + + // User in users.json should be unchanged and attempt to add user should not + // have succeeded + assert.NilError(t, errors.WithStack( + reconciler.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret))) + if assert.Check(t, secret.Data["users.json"] != nil) { + var usersArr []pgAdminUserForJson + assert.NilError(t, json.Unmarshal(secret.Data["users.json"], &usersArr)) + assert.Equal(t, len(usersArr), 1) + assert.Equal(t, usersArr[0].Username, "testuser1") + assert.Equal(t, usersArr[0].IsAdmin, false) + assert.Equal(t, usersArr[0].Password, "asdf") + } + + // setup.py error in stdout regarding email address + reconciler.PodExec = func( + ctx context.Context, namespace, pod, container string, + stdin io.Reader, stdout, stderr io.Writer, command ...string, + ) error { + calls++ + + stdout.Write([]byte("Invalid email address")) + + return nil + } + + assert.NilError(t, reconciler.writePGAdminUsers(ctx, pgadmin, podExecutor)) + assert.Equal(t, calls, 3, "PodExec should be called once more") + + // User in users.json should be unchanged and attempt to add user should not + // have succeeded + assert.NilError(t, errors.WithStack( + reconciler.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret))) + if assert.Check(t, secret.Data["users.json"] != nil) { + var usersArr []pgAdminUserForJson + assert.NilError(t, json.Unmarshal(secret.Data["users.json"], &usersArr)) + assert.Equal(t, len(usersArr), 1) + assert.Equal(t, usersArr[0].Username, "testuser1") + assert.Equal(t, usersArr[0].IsAdmin, false) + assert.Equal(t, usersArr[0].Password, "asdf") + } + assert.Equal(t, len(recorder.Events), 1) + + // setup.py error in stdout regarding password + reconciler.PodExec = func( + ctx context.Context, namespace, pod, container string, + stdin io.Reader, stdout, stderr io.Writer, command ...string, + ) error { + calls++ + + stdout.Write([]byte("Password must be at least 6 characters long")) + + return nil + } + + assert.NilError(t, reconciler.writePGAdminUsers(ctx, pgadmin, podExecutor)) + assert.Equal(t, calls, 4, "PodExec should be called once more") + + // User in users.json should be unchanged and attempt to add user should not + // have succeeded + assert.NilError(t, errors.WithStack( + reconciler.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret))) + if assert.Check(t, secret.Data["users.json"] != nil) { + var usersArr []pgAdminUserForJson + assert.NilError(t, json.Unmarshal(secret.Data["users.json"], &usersArr)) + assert.Equal(t, len(usersArr), 1) + assert.Equal(t, usersArr[0].Username, "testuser1") + assert.Equal(t, usersArr[0].IsAdmin, false) + assert.Equal(t, usersArr[0].Password, "asdf") + } + assert.Equal(t, len(recorder.Events), 2) + }) +} diff --git a/internal/controller/standalone_pgadmin/volume.go b/internal/controller/standalone_pgadmin/volume.go new file mode 100644 index 0000000000..7615f6142b --- /dev/null +++ b/internal/controller/standalone_pgadmin/volume.go @@ -0,0 +1,136 @@ +// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package standalone_pgadmin + +import ( + "context" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/validation/field" + + "github.com/pkg/errors" + + "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +// +kubebuilder:rbac:groups="",resources="persistentvolumeclaims",verbs={create,patch} + +// reconcilePGAdminDataVolume writes the PersistentVolumeClaim for instance's +// pgAdmin data volume. +func (r *PGAdminReconciler) reconcilePGAdminDataVolume( + ctx context.Context, pgadmin *v1beta1.PGAdmin, +) (*corev1.PersistentVolumeClaim, error) { + + pvc := pvc(pgadmin) + + err := errors.WithStack(r.setControllerReference(pgadmin, pvc)) + + if err == nil { + err = r.handlePersistentVolumeClaimError(pgadmin, + errors.WithStack(r.apply(ctx, pvc))) + } + + return pvc, err +} + +// pvc defines the data volume for pgAdmin. +func pvc(pgadmin *v1beta1.PGAdmin) *corev1.PersistentVolumeClaim { + pvc := &corev1.PersistentVolumeClaim{ + ObjectMeta: naming.StandalonePGAdmin(pgadmin), + } + pvc.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("PersistentVolumeClaim")) + + pvc.Annotations = pgadmin.Spec.Metadata.GetAnnotationsOrNil() + pvc.Labels = naming.Merge( + pgadmin.Spec.Metadata.GetLabelsOrNil(), + naming.StandalonePGAdminDataLabels(pgadmin.Name), + ) + pvc.Spec = pgadmin.Spec.DataVolumeClaimSpec + + return pvc +} + +// handlePersistentVolumeClaimError inspects err for expected Kubernetes API +// responses to writing a PVC. It turns errors it understands into conditions +// and events. When err is handled it returns nil. Otherwise it returns err. +// +// TODO(tjmoore4): This function is duplicated from a version that takes a PostgresCluster object. +func (r *PGAdminReconciler) handlePersistentVolumeClaimError( + pgadmin *v1beta1.PGAdmin, err error, +) error { + var status metav1.Status + if api := apierrors.APIStatus(nil); errors.As(err, &api) { + status = api.Status() + } + + cannotResize := func(err error) { + meta.SetStatusCondition(&pgadmin.Status.Conditions, metav1.Condition{ + Type: v1beta1.PersistentVolumeResizing, + Status: metav1.ConditionFalse, + Reason: string(apierrors.ReasonForError(err)), + Message: "One or more volumes cannot be resized", + + ObservedGeneration: pgadmin.Generation, + }) + } + + volumeError := func(err error) { + r.Recorder.Event(pgadmin, + corev1.EventTypeWarning, "PersistentVolumeError", err.Error()) + } + + // Forbidden means (RBAC is broken or) the API request was rejected by an + // admission controller. Assume it is the latter and raise the issue as a + // condition and event. + // - https://releases.k8s.io/v1.21.0/plugin/pkg/admission/storage/persistentvolume/resize/admission.go + if apierrors.IsForbidden(err) { + cannotResize(err) + volumeError(err) + return nil + } + + if apierrors.IsInvalid(err) && status.Details != nil { + unknownCause := false + for _, cause := range status.Details.Causes { + switch { + // Forbidden "spec" happens when the PVC is waiting to be bound. + // It should resolve on its own and trigger another reconcile. Raise + // the issue as an event. + // - https://releases.k8s.io/v1.21.0/pkg/apis/core/validation/validation.go#L2028 + // + // TODO(cbandy): This can also happen when changing a field other + // than requests within the spec (access modes, storage class, etc). + // That case needs a condition or should be prevented via a webhook. + case + cause.Type == metav1.CauseType(field.ErrorTypeForbidden) && + cause.Field == "spec": + volumeError(err) + + // Forbidden "storage" happens when the change is not allowed. Raise + // the issue as a condition and event. + // - https://releases.k8s.io/v1.21.0/pkg/apis/core/validation/validation.go#L2028 + case + cause.Type == metav1.CauseType(field.ErrorTypeForbidden) && + cause.Field == "spec.resources.requests.storage": + cannotResize(err) + volumeError(err) + + default: + unknownCause = true + } + } + + if len(status.Details.Causes) > 0 && !unknownCause { + // All the causes were identified and handled. + return nil + } + } + + return err +} diff --git a/internal/controller/standalone_pgadmin/volume_test.go b/internal/controller/standalone_pgadmin/volume_test.go new file mode 100644 index 0000000000..645c228277 --- /dev/null +++ b/internal/controller/standalone_pgadmin/volume_test.go @@ -0,0 +1,291 @@ +// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package standalone_pgadmin + +import ( + "context" + "testing" + + "gotest.tools/v3/assert" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/validation/field" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/pkg/errors" + + "github.com/crunchydata/postgres-operator/internal/controller/runtime" + "github.com/crunchydata/postgres-operator/internal/initialize" + "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/internal/testing/cmp" + "github.com/crunchydata/postgres-operator/internal/testing/events" + "github.com/crunchydata/postgres-operator/internal/testing/require" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +func TestReconcilePGAdminDataVolume(t *testing.T) { + ctx := context.Background() + cc := setupKubernetes(t) + require.ParallelCapacity(t, 1) + + reconciler := &PGAdminReconciler{ + Client: cc, + Owner: client.FieldOwner(t.Name()), + } + + ns := setupNamespace(t, cc) + pgadmin := &v1beta1.PGAdmin{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-standalone-pgadmin", + Namespace: ns.Name, + }, + Spec: v1beta1.PGAdminSpec{ + DataVolumeClaimSpec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + Resources: corev1.VolumeResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceStorage: resource.MustParse("1Gi")}}, + StorageClassName: initialize.String("storage-class-for-data"), + }}} + + assert.NilError(t, cc.Create(ctx, pgadmin)) + t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, pgadmin)) }) + + t.Run("DataVolume", func(t *testing.T) { + pvc, err := reconciler.reconcilePGAdminDataVolume(ctx, pgadmin) + assert.NilError(t, err) + + assert.Assert(t, metav1.IsControlledBy(pvc, pgadmin)) + + assert.Equal(t, pvc.Labels[naming.LabelStandalonePGAdmin], pgadmin.Name) + assert.Equal(t, pvc.Labels[naming.LabelRole], naming.RolePGAdmin) + assert.Equal(t, pvc.Labels[naming.LabelData], naming.DataPGAdmin) + + assert.Assert(t, cmp.MarshalMatches(pvc.Spec, ` +accessModes: +- ReadWriteOnce +resources: + requests: + storage: 1Gi +storageClassName: storage-class-for-data +volumeMode: Filesystem + `)) + }) +} + +func TestHandlePersistentVolumeClaimError(t *testing.T) { + recorder := events.NewRecorder(t, runtime.Scheme) + reconciler := &PGAdminReconciler{ + Recorder: recorder, + } + + pgadmin := new(v1beta1.PGAdmin) + pgadmin.Namespace = "ns1" + pgadmin.Name = "pg2" + + reset := func() { + pgadmin.Status.Conditions = pgadmin.Status.Conditions[:0] + recorder.Events = recorder.Events[:0] + } + + // It returns any error it does not recognize completely. + t.Run("Unexpected", func(t *testing.T) { + t.Cleanup(reset) + + err := errors.New("whomp") + + assert.Equal(t, err, reconciler.handlePersistentVolumeClaimError(pgadmin, err)) + assert.Assert(t, len(pgadmin.Status.Conditions) == 0) + assert.Assert(t, len(recorder.Events) == 0) + + err = apierrors.NewInvalid( + corev1.SchemeGroupVersion.WithKind("PersistentVolumeClaim").GroupKind(), + "some-pvc", + field.ErrorList{ + field.Forbidden(field.NewPath("metadata"), "dunno"), + }) + + assert.Equal(t, err, reconciler.handlePersistentVolumeClaimError(pgadmin, err)) + assert.Assert(t, len(pgadmin.Status.Conditions) == 0) + assert.Assert(t, len(recorder.Events) == 0) + }) + + // Neither statically nor dynamically provisioned claims can be resized + // before they are bound to a persistent volume. Kubernetes rejects such + // changes during PVC validation. + // + // A static PVC is one with a present-and-blank storage class. It is + // pending until a PV exists that matches its selector, requests, etc. + // - https://docs.k8s.io/concepts/storage/persistent-volumes/#static + // - https://docs.k8s.io/concepts/storage/persistent-volumes/#class-1 + // + // A dynamic PVC is associated with a storage class. Storage classes that + // "WaitForFirstConsumer" do not bind a PV until there is a pod. + // - https://docs.k8s.io/concepts/storage/persistent-volumes/#dynamic + t.Run("Pending", func(t *testing.T) { + t.Run("Grow", func(t *testing.T) { + t.Cleanup(reset) + + err := apierrors.NewInvalid( + corev1.SchemeGroupVersion.WithKind("PersistentVolumeClaim").GroupKind(), + "my-pending-pvc", + field.ErrorList{ + // - https://releases.k8s.io/v1.24.0/pkg/apis/core/validation/validation.go#L2184 + field.Forbidden(field.NewPath("spec"), "… immutable … bound claim …"), + }) + + // PVCs will bind eventually. This error should become an event without a condition. + assert.NilError(t, reconciler.handlePersistentVolumeClaimError(pgadmin, err)) + + assert.Check(t, len(pgadmin.Status.Conditions) == 0) + assert.Check(t, len(recorder.Events) > 0) + + for _, event := range recorder.Events { + assert.Equal(t, event.Type, "Warning") + assert.Equal(t, event.Reason, "PersistentVolumeError") + assert.Assert(t, cmp.Contains(event.Note, "PersistentVolumeClaim")) + assert.Assert(t, cmp.Contains(event.Note, "my-pending-pvc")) + assert.Assert(t, cmp.Contains(event.Note, "bound claim")) + assert.DeepEqual(t, event.Regarding, corev1.ObjectReference{ + APIVersion: v1beta1.GroupVersion.Identifier(), + Kind: "PGAdmin", + Namespace: "ns1", Name: "pg2", + }) + } + }) + + t.Run("Shrink", func(t *testing.T) { + t.Cleanup(reset) + + // Requests to make a pending PVC smaller fail for multiple reasons. + err := apierrors.NewInvalid( + corev1.SchemeGroupVersion.WithKind("PersistentVolumeClaim").GroupKind(), + "my-pending-pvc", + field.ErrorList{ + // - https://releases.k8s.io/v1.24.0/pkg/apis/core/validation/validation.go#L2184 + field.Forbidden(field.NewPath("spec"), "… immutable … bound claim …"), + + // - https://releases.k8s.io/v1.24.0/pkg/apis/core/validation/validation.go#L2188 + field.Forbidden(field.NewPath("spec", "resources", "requests", "storage"), "… not be less …"), + }) + + // PVCs will bind eventually, but the size is rejected. + assert.NilError(t, reconciler.handlePersistentVolumeClaimError(pgadmin, err)) + + assert.Check(t, len(pgadmin.Status.Conditions) > 0) + assert.Check(t, len(recorder.Events) > 0) + + for _, condition := range pgadmin.Status.Conditions { + assert.Equal(t, condition.Type, "PersistentVolumeResizing") + assert.Equal(t, condition.Status, metav1.ConditionFalse) + assert.Equal(t, condition.Reason, "Invalid") + assert.Assert(t, cmp.Contains(condition.Message, "cannot be resized")) + } + + for _, event := range recorder.Events { + assert.Equal(t, event.Type, "Warning") + assert.Equal(t, event.Reason, "PersistentVolumeError") + assert.Assert(t, cmp.Contains(event.Note, "PersistentVolumeClaim")) + assert.Assert(t, cmp.Contains(event.Note, "my-pending-pvc")) + assert.Assert(t, cmp.Contains(event.Note, "bound claim")) + assert.Assert(t, cmp.Contains(event.Note, "not be less")) + assert.DeepEqual(t, event.Regarding, corev1.ObjectReference{ + APIVersion: v1beta1.GroupVersion.Identifier(), + Kind: "PGAdmin", + Namespace: "ns1", Name: "pg2", + }) + } + }) + }) + + // Statically provisioned claims cannot be resized. Kubernetes responds + // differently based on the size growing or shrinking. + // + // Dynamically provisioned claims of storage classes that do *not* + // "allowVolumeExpansion" behave the same way. + t.Run("NoExpansion", func(t *testing.T) { + t.Run("Grow", func(t *testing.T) { + t.Cleanup(reset) + + // - https://releases.k8s.io/v1.24.0/plugin/pkg/admission/storage/persistentvolume/resize/admission.go#L108 + err := apierrors.NewForbidden( + corev1.Resource("persistentvolumeclaims"), "my-static-pvc", + errors.New("… only dynamically provisioned …")) + + // This PVC cannot resize. The error should become an event and condition. + assert.NilError(t, reconciler.handlePersistentVolumeClaimError(pgadmin, err)) + + assert.Check(t, len(pgadmin.Status.Conditions) > 0) + assert.Check(t, len(recorder.Events) > 0) + + for _, condition := range pgadmin.Status.Conditions { + assert.Equal(t, condition.Type, "PersistentVolumeResizing") + assert.Equal(t, condition.Status, metav1.ConditionFalse) + assert.Equal(t, condition.Reason, "Forbidden") + assert.Assert(t, cmp.Contains(condition.Message, "cannot be resized")) + } + + for _, event := range recorder.Events { + assert.Equal(t, event.Type, "Warning") + assert.Equal(t, event.Reason, "PersistentVolumeError") + assert.Assert(t, cmp.Contains(event.Note, "persistentvolumeclaim")) + assert.Assert(t, cmp.Contains(event.Note, "my-static-pvc")) + assert.Assert(t, cmp.Contains(event.Note, "only dynamic")) + assert.DeepEqual(t, event.Regarding, corev1.ObjectReference{ + APIVersion: v1beta1.GroupVersion.Identifier(), + Kind: "PGAdmin", + Namespace: "ns1", Name: "pg2", + }) + } + }) + + // Dynamically provisioned claims of storage classes that *do* + // "allowVolumeExpansion" can grow but cannot shrink. Kubernetes + // rejects such changes during PVC validation, just like static claims. + // + // A future version of Kubernetes will allow `spec.resources` to shrink + // so long as it is greater than `status.capacity`. + // - https://git.k8s.io/enhancements/keps/sig-storage/1790-recover-resize-failure + t.Run("Shrink", func(t *testing.T) { + t.Cleanup(reset) + + err := apierrors.NewInvalid( + corev1.SchemeGroupVersion.WithKind("PersistentVolumeClaim").GroupKind(), + "my-static-pvc", + field.ErrorList{ + // - https://releases.k8s.io/v1.24.0/pkg/apis/core/validation/validation.go#L2188 + field.Forbidden(field.NewPath("spec", "resources", "requests", "storage"), "… not be less …"), + }) + + // The PVC size is rejected. This error should become an event and condition. + assert.NilError(t, reconciler.handlePersistentVolumeClaimError(pgadmin, err)) + + assert.Check(t, len(pgadmin.Status.Conditions) > 0) + assert.Check(t, len(recorder.Events) > 0) + + for _, condition := range pgadmin.Status.Conditions { + assert.Equal(t, condition.Type, "PersistentVolumeResizing") + assert.Equal(t, condition.Status, metav1.ConditionFalse) + assert.Equal(t, condition.Reason, "Invalid") + assert.Assert(t, cmp.Contains(condition.Message, "cannot be resized")) + } + + for _, event := range recorder.Events { + assert.Equal(t, event.Type, "Warning") + assert.Equal(t, event.Reason, "PersistentVolumeError") + assert.Assert(t, cmp.Contains(event.Note, "PersistentVolumeClaim")) + assert.Assert(t, cmp.Contains(event.Note, "my-static-pvc")) + assert.Assert(t, cmp.Contains(event.Note, "not be less")) + assert.DeepEqual(t, event.Regarding, corev1.ObjectReference{ + APIVersion: v1beta1.GroupVersion.Identifier(), + Kind: "PGAdmin", + Namespace: "ns1", Name: "pg2", + }) + } + }) + }) +} diff --git a/internal/controller/standalone_pgadmin/watches.go b/internal/controller/standalone_pgadmin/watches.go new file mode 100644 index 0000000000..49ac1ebd29 --- /dev/null +++ b/internal/controller/standalone_pgadmin/watches.go @@ -0,0 +1,102 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package standalone_pgadmin + +import ( + "context" + + "k8s.io/client-go/util/workqueue" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +// watchPostgresClusters returns a [handler.EventHandler] for PostgresClusters. +func (r *PGAdminReconciler) watchPostgresClusters() handler.Funcs { + handle := func(ctx context.Context, cluster client.Object, q workqueue.RateLimitingInterface) { + for _, pgadmin := range r.findPGAdminsForPostgresCluster(ctx, cluster) { + + q.Add(ctrl.Request{ + NamespacedName: client.ObjectKeyFromObject(pgadmin), + }) + } + } + + return handler.Funcs{ + CreateFunc: func(ctx context.Context, e event.CreateEvent, q workqueue.RateLimitingInterface) { + handle(ctx, e.Object, q) + }, + UpdateFunc: func(ctx context.Context, e event.UpdateEvent, q workqueue.RateLimitingInterface) { + handle(ctx, e.ObjectNew, q) + }, + DeleteFunc: func(ctx context.Context, e event.DeleteEvent, q workqueue.RateLimitingInterface) { + handle(ctx, e.Object, q) + }, + } +} + +// watchForRelatedSecret handles create/update/delete events for secrets, +// passing the Secret ObjectKey to findPGAdminsForSecret +func (r *PGAdminReconciler) watchForRelatedSecret() handler.EventHandler { + handle := func(ctx context.Context, secret client.Object, q workqueue.RateLimitingInterface) { + key := client.ObjectKeyFromObject(secret) + + for _, pgadmin := range r.findPGAdminsForSecret(ctx, key) { + q.Add(ctrl.Request{ + NamespacedName: client.ObjectKeyFromObject(pgadmin), + }) + } + } + + return handler.Funcs{ + CreateFunc: func(ctx context.Context, e event.CreateEvent, q workqueue.RateLimitingInterface) { + handle(ctx, e.Object, q) + }, + UpdateFunc: func(ctx context.Context, e event.UpdateEvent, q workqueue.RateLimitingInterface) { + handle(ctx, e.ObjectNew, q) + }, + // If the secret is deleted, we want to reconcile + // in order to emit an event/status about this problem. + // We will also emit a matching event/status about this problem + // when we reconcile the cluster and can't find the secret. + // That way, users will get two alerts: one when the secret is deleted + // and another when the cluster is being reconciled. + DeleteFunc: func(ctx context.Context, e event.DeleteEvent, q workqueue.RateLimitingInterface) { + handle(ctx, e.Object, q) + }, + } +} + +//+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="pgadmins",verbs={list} + +// findPGAdminsForSecret returns PGAdmins that have a user or users that have their password +// stored in the Secret +func (r *PGAdminReconciler) findPGAdminsForSecret( + ctx context.Context, secret client.ObjectKey, +) []*v1beta1.PGAdmin { + var matching []*v1beta1.PGAdmin + var pgadmins v1beta1.PGAdminList + + // NOTE: If this becomes slow due to a large number of PGAdmins in a single + // namespace, we can configure the [ctrl.Manager] field indexer and pass a + // [fields.Selector] here. + // - https://book.kubebuilder.io/reference/watching-resources/externally-managed.html + if err := r.List(ctx, &pgadmins, &client.ListOptions{ + Namespace: secret.Namespace, + }); err == nil { + for i := range pgadmins.Items { + for j := range pgadmins.Items[i].Spec.Users { + if pgadmins.Items[i].Spec.Users[j].PasswordRef.LocalObjectReference.Name == secret.Name { + matching = append(matching, &pgadmins.Items[i]) + break + } + } + } + } + return matching +} diff --git a/internal/controller/standalone_pgadmin/watches_test.go b/internal/controller/standalone_pgadmin/watches_test.go new file mode 100644 index 0000000000..1419eb9efa --- /dev/null +++ b/internal/controller/standalone_pgadmin/watches_test.go @@ -0,0 +1,122 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package standalone_pgadmin + +import ( + "context" + "testing" + + "gotest.tools/v3/assert" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/crunchydata/postgres-operator/internal/testing/require" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +func TestFindPGAdminsForSecret(t *testing.T) { + ctx := context.Background() + tClient := setupKubernetes(t) + require.ParallelCapacity(t, 0) + + ns := setupNamespace(t, tClient) + reconciler := &PGAdminReconciler{Client: tClient} + + secret1 := &corev1.Secret{} + secret1.Namespace = ns.Name + secret1.Name = "first-password-secret" + + assert.NilError(t, tClient.Create(ctx, secret1)) + secretObjectKey := client.ObjectKeyFromObject(secret1) + + t.Run("NoPGAdmins", func(t *testing.T) { + pgadmins := reconciler.findPGAdminsForSecret(ctx, secretObjectKey) + + assert.Equal(t, len(pgadmins), 0) + }) + + t.Run("OnePGAdmin", func(t *testing.T) { + pgadmin1 := new(v1beta1.PGAdmin) + pgadmin1.Namespace = ns.Name + pgadmin1.Name = "first-pgadmin" + pgadmin1.Spec.Users = []v1beta1.PGAdminUser{ + { + PasswordRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "first-password-secret", + }, + Key: "password", + }, + Username: "testuser", + Role: "Administrator", + }, + } + assert.NilError(t, tClient.Create(ctx, pgadmin1)) + + pgadmins := reconciler.findPGAdminsForSecret(ctx, secretObjectKey) + + assert.Equal(t, len(pgadmins), 1) + assert.Equal(t, pgadmins[0].Name, "first-pgadmin") + }) + + t.Run("TwoPGAdmins", func(t *testing.T) { + pgadmin2 := new(v1beta1.PGAdmin) + pgadmin2.Namespace = ns.Name + pgadmin2.Name = "second-pgadmin" + pgadmin2.Spec.Users = []v1beta1.PGAdminUser{ + { + PasswordRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "first-password-secret", + }, + Key: "password", + }, + Username: "testuser2", + Role: "Administrator", + }, + } + assert.NilError(t, tClient.Create(ctx, pgadmin2)) + + pgadmins := reconciler.findPGAdminsForSecret(ctx, secretObjectKey) + + assert.Equal(t, len(pgadmins), 2) + pgadminCount := map[string]int{} + for _, pgadmin := range pgadmins { + pgadminCount[pgadmin.Name] += 1 + } + assert.Equal(t, pgadminCount["first-pgadmin"], 1) + assert.Equal(t, pgadminCount["second-pgadmin"], 1) + }) + + t.Run("PGAdminWithDifferentSecretNameNotIncluded", func(t *testing.T) { + pgadmin3 := new(v1beta1.PGAdmin) + pgadmin3.Namespace = ns.Name + pgadmin3.Name = "third-pgadmin" + pgadmin3.Spec.Users = []v1beta1.PGAdminUser{ + { + PasswordRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "other-password-secret", + }, + Key: "password", + }, + Username: "testuser2", + Role: "Administrator", + }, + } + assert.NilError(t, tClient.Create(ctx, pgadmin3)) + + pgadmins := reconciler.findPGAdminsForSecret(ctx, secretObjectKey) + + assert.Equal(t, len(pgadmins), 2) + pgadminCount := map[string]int{} + for _, pgadmin := range pgadmins { + pgadminCount[pgadmin.Name] += 1 + } + assert.Equal(t, pgadminCount["first-pgadmin"], 1) + assert.Equal(t, pgadminCount["second-pgadmin"], 1) + assert.Equal(t, pgadminCount["third-pgadmin"], 0) + }) +} diff --git a/internal/feature/features.go b/internal/feature/features.go new file mode 100644 index 0000000000..db424ead42 --- /dev/null +++ b/internal/feature/features.go @@ -0,0 +1,132 @@ +// Copyright 2017 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Package feature provides types and functions to enable and disable features +of the Postgres Operator. + +To add a new feature, export its name as a constant string and configure it +in [NewGate]. Choose a name that is clear to end users, as they will use it +to enable or disable the feature. + +# Stages + +Each feature must be configured with a maturity called a stage. We follow the +Kubernetes convention that features in the "Alpha" stage are disabled by default, +while those in the "Beta" stage are enabled by default. + - https://docs.k8s.io/reference/command-line-tools-reference/feature-gates/#feature-stages + +NOTE: Since Kubernetes 1.24, APIs (not features) in the "Beta" stage are disabled by default: + - https://blog.k8s.io/2022/05/03/kubernetes-1-24-release-announcement/#beta-apis-off-by-default + - https://git.k8s.io/enhancements/keps/sig-architecture/3136-beta-apis-off-by-default#goals + +# Using Features + +We initialize and configure one [MutableGate] in main() and add it to the Context +passed to Reconcilers and other Runnables. Those can then interrogate it using [Enabled]: + + if !feature.Enabled(ctx, feature.Excellent) { return } + +Tests should create and configure their own [MutableGate] and inject it using +[NewContext]. For example, the following enables one feature and disables another: + + gate := feature.NewGate() + assert.NilError(t, gate.SetFromMap(map[string]bool{ + feature.Excellent: true, + feature.Uncommon: false, + })) + ctx := feature.NewContext(context.Background(), gate) +*/ +package feature + +import ( + "context" + + "k8s.io/component-base/featuregate" +) + +type Feature = featuregate.Feature + +// Gate indicates what features exist and which are enabled. +type Gate interface { + Enabled(Feature) bool + String() string +} + +// MutableGate contains features that can be enabled or disabled. +type MutableGate interface { + Gate + // Set enables or disables features by parsing a string like "feature1=true,feature2=false". + Set(string) error + // SetFromMap enables or disables features by boolean values. + SetFromMap(map[string]bool) error +} + +const ( + // Support appending custom queries to default PGMonitor queries + AppendCustomQueries = "AppendCustomQueries" + + // Enables automatic creation of user schema + AutoCreateUserSchema = "AutoCreateUserSchema" + + // Support automatically growing volumes + AutoGrowVolumes = "AutoGrowVolumes" + + BridgeIdentifiers = "BridgeIdentifiers" + + // Support custom sidecars for PostgreSQL instance Pods + InstanceSidecars = "InstanceSidecars" + + // Support custom sidecars for pgBouncer Pods + PGBouncerSidecars = "PGBouncerSidecars" + + // Support tablespace volumes + TablespaceVolumes = "TablespaceVolumes" + + // Support VolumeSnapshots + VolumeSnapshots = "VolumeSnapshots" +) + +// NewGate returns a MutableGate with the Features defined in this package. +func NewGate() MutableGate { + gate := featuregate.NewFeatureGate() + + if err := gate.Add(map[Feature]featuregate.FeatureSpec{ + AppendCustomQueries: {Default: false, PreRelease: featuregate.Alpha}, + AutoCreateUserSchema: {Default: true, PreRelease: featuregate.Beta}, + AutoGrowVolumes: {Default: false, PreRelease: featuregate.Alpha}, + BridgeIdentifiers: {Default: false, PreRelease: featuregate.Alpha}, + InstanceSidecars: {Default: false, PreRelease: featuregate.Alpha}, + PGBouncerSidecars: {Default: false, PreRelease: featuregate.Alpha}, + TablespaceVolumes: {Default: false, PreRelease: featuregate.Alpha}, + VolumeSnapshots: {Default: false, PreRelease: featuregate.Alpha}, + }); err != nil { + panic(err) + } + + return gate +} + +type contextKey struct{} + +// Enabled indicates if a Feature is enabled in the Gate contained in ctx. It +// returns false when there is no Gate. +func Enabled(ctx context.Context, f Feature) bool { + gate, ok := ctx.Value(contextKey{}).(Gate) + return ok && gate.Enabled(f) +} + +// NewContext returns a copy of ctx containing gate. Check it using [Enabled]. +func NewContext(ctx context.Context, gate Gate) context.Context { + return context.WithValue(ctx, contextKey{}, gate) +} + +func ShowGates(ctx context.Context) string { + featuresEnabled := "" + gate, ok := ctx.Value(contextKey{}).(Gate) + if ok { + featuresEnabled = gate.String() + } + return featuresEnabled +} diff --git a/internal/feature/features_test.go b/internal/feature/features_test.go new file mode 100644 index 0000000000..f76dd216e6 --- /dev/null +++ b/internal/feature/features_test.go @@ -0,0 +1,65 @@ +// Copyright 2017 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package feature + +import ( + "context" + "testing" + + "gotest.tools/v3/assert" +) + +func TestDefaults(t *testing.T) { + t.Parallel() + gate := NewGate() + + assert.Assert(t, false == gate.Enabled(AppendCustomQueries)) + assert.Assert(t, true == gate.Enabled(AutoCreateUserSchema)) + assert.Assert(t, false == gate.Enabled(AutoGrowVolumes)) + assert.Assert(t, false == gate.Enabled(BridgeIdentifiers)) + assert.Assert(t, false == gate.Enabled(InstanceSidecars)) + assert.Assert(t, false == gate.Enabled(PGBouncerSidecars)) + assert.Assert(t, false == gate.Enabled(TablespaceVolumes)) + assert.Assert(t, false == gate.Enabled(VolumeSnapshots)) + + assert.Equal(t, gate.String(), "") +} + +func TestStringFormat(t *testing.T) { + t.Parallel() + gate := NewGate() + + assert.NilError(t, gate.Set("")) + assert.NilError(t, gate.Set("TablespaceVolumes=true")) + assert.Equal(t, gate.String(), "TablespaceVolumes=true") + assert.Assert(t, true == gate.Enabled(TablespaceVolumes)) + + err := gate.Set("NotAGate=true") + assert.ErrorContains(t, err, "unrecognized feature gate") + assert.ErrorContains(t, err, "NotAGate") + + err = gate.Set("GateNotSet") + assert.ErrorContains(t, err, "missing bool") + assert.ErrorContains(t, err, "GateNotSet") + + err = gate.Set("GateNotSet=foo") + assert.ErrorContains(t, err, "invalid value") + assert.ErrorContains(t, err, "GateNotSet") +} + +func TestContext(t *testing.T) { + t.Parallel() + gate := NewGate() + ctx := NewContext(context.Background(), gate) + assert.Equal(t, ShowGates(ctx), "") + + assert.NilError(t, gate.Set("TablespaceVolumes=true")) + assert.Assert(t, true == Enabled(ctx, TablespaceVolumes)) + assert.Equal(t, ShowGates(ctx), "TablespaceVolumes=true") + + assert.NilError(t, gate.SetFromMap(map[string]bool{TablespaceVolumes: false})) + assert.Assert(t, false == Enabled(ctx, TablespaceVolumes)) + assert.Equal(t, ShowGates(ctx), "TablespaceVolumes=false") +} diff --git a/internal/initialize/doc.go b/internal/initialize/doc.go index 53e717cc26..aedd85846f 100644 --- a/internal/initialize/doc.go +++ b/internal/initialize/doc.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 // Package initialize provides functions to initialize some common fields and types. package initialize diff --git a/internal/initialize/metadata.go b/internal/initialize/metadata.go index b473fd0822..d62530736a 100644 --- a/internal/initialize/metadata.go +++ b/internal/initialize/metadata.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package initialize diff --git a/internal/initialize/metadata_test.go b/internal/initialize/metadata_test.go index 4d91885bd5..735e455a2e 100644 --- a/internal/initialize/metadata_test.go +++ b/internal/initialize/metadata_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package initialize_test diff --git a/internal/initialize/primitives.go b/internal/initialize/primitives.go index 75a09a1d7a..9bc264f88c 100644 --- a/internal/initialize/primitives.go +++ b/internal/initialize/primitives.go @@ -1,28 +1,20 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package initialize // Bool returns a pointer to v. func Bool(v bool) *bool { return &v } -// ByteMap initializes m when it points to nil. -func ByteMap(m *map[string][]byte) { - if m != nil && *m == nil { - *m = make(map[string][]byte) +// FromPointer returns the value that p points to. +// When p is nil, it returns the zero value of T. +func FromPointer[T any](p *T) T { + var v T + if p != nil { + v = *p } + return v } // Int32 returns a pointer to v. @@ -31,9 +23,17 @@ func Int32(v int32) *int32 { return &v } // Int64 returns a pointer to v. func Int64(v int64) *int64 { return &v } -// StringMap initializes m when it points to nil. -func StringMap(m *map[string]string) { +// Map initializes m when it points to nil. +func Map[M ~map[K]V, K comparable, V any](m *M) { + // See https://pkg.go.dev/maps for similar type constraints. + if m != nil && *m == nil { - *m = make(map[string]string) + *m = make(M) } } + +// Pointer returns a pointer to v. +func Pointer[T any](v T) *T { return &v } + +// String returns a pointer to v. +func String(v string) *string { return &v } diff --git a/internal/initialize/primitives_test.go b/internal/initialize/primitives_test.go index e4f51e712f..e39898b4fe 100644 --- a/internal/initialize/primitives_test.go +++ b/internal/initialize/primitives_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package initialize_test @@ -35,25 +24,32 @@ func TestBool(t *testing.T) { } } -func TestByteMap(t *testing.T) { - // Ignores nil pointer. - initialize.ByteMap(nil) - - var m map[string][]byte - - // Starts nil. - assert.Assert(t, m == nil) - - // Gets initialized. - initialize.ByteMap(&m) - assert.DeepEqual(t, m, map[string][]byte{}) - - // Now writable. - m["x"] = []byte("y") - - // Doesn't overwrite. - initialize.ByteMap(&m) - assert.DeepEqual(t, m, map[string][]byte{"x": []byte("y")}) +func TestFromPointer(t *testing.T) { + t.Run("bool", func(t *testing.T) { + assert.Equal(t, initialize.FromPointer((*bool)(nil)), false) + assert.Equal(t, initialize.FromPointer(initialize.Pointer(false)), false) + assert.Equal(t, initialize.FromPointer(initialize.Pointer(true)), true) + }) + + t.Run("int32", func(t *testing.T) { + assert.Equal(t, initialize.FromPointer((*int32)(nil)), int32(0)) + assert.Equal(t, initialize.FromPointer(initialize.Pointer(int32(0))), int32(0)) + assert.Equal(t, initialize.FromPointer(initialize.Pointer(int32(-99))), int32(-99)) + assert.Equal(t, initialize.FromPointer(initialize.Pointer(int32(42))), int32(42)) + }) + + t.Run("int64", func(t *testing.T) { + assert.Equal(t, initialize.FromPointer((*int64)(nil)), int64(0)) + assert.Equal(t, initialize.FromPointer(initialize.Pointer(int64(0))), int64(0)) + assert.Equal(t, initialize.FromPointer(initialize.Pointer(int64(-99))), int64(-99)) + assert.Equal(t, initialize.FromPointer(initialize.Pointer(int64(42))), int64(42)) + }) + + t.Run("string", func(t *testing.T) { + assert.Equal(t, initialize.FromPointer((*string)(nil)), "") + assert.Equal(t, initialize.FromPointer(initialize.Pointer("")), "") + assert.Equal(t, initialize.FromPointer(initialize.Pointer("sup")), "sup") + }) } func TestInt32(t *testing.T) { @@ -90,23 +86,118 @@ func TestInt64(t *testing.T) { } } -func TestStringMap(t *testing.T) { - // Ignores nil pointer. - initialize.StringMap(nil) +func TestMap(t *testing.T) { + t.Run("map[string][]byte", func(t *testing.T) { + // Ignores nil pointer. + initialize.Map((*map[string][]byte)(nil)) + + var m map[string][]byte + + // Starts nil. + assert.Assert(t, m == nil) + + // Gets initialized. + initialize.Map(&m) + assert.DeepEqual(t, m, map[string][]byte{}) + + // Now writable. + m["x"] = []byte("y") - var m map[string]string + // Doesn't overwrite. + initialize.Map(&m) + assert.DeepEqual(t, m, map[string][]byte{"x": []byte("y")}) + }) - // Starts nil. - assert.Assert(t, m == nil) + t.Run("map[string]string", func(t *testing.T) { + // Ignores nil pointer. + initialize.Map((*map[string]string)(nil)) - // Gets initialized. - initialize.StringMap(&m) - assert.DeepEqual(t, m, map[string]string{}) + var m map[string]string - // Now writable. - m["x"] = "y" + // Starts nil. + assert.Assert(t, m == nil) - // Doesn't overwrite. - initialize.StringMap(&m) - assert.DeepEqual(t, m, map[string]string{"x": "y"}) + // Gets initialized. + initialize.Map(&m) + assert.DeepEqual(t, m, map[string]string{}) + + // Now writable. + m["x"] = "y" + + // Doesn't overwrite. + initialize.Map(&m) + assert.DeepEqual(t, m, map[string]string{"x": "y"}) + }) +} + +func TestPointer(t *testing.T) { + t.Run("bool", func(t *testing.T) { + n := initialize.Pointer(false) + if assert.Check(t, n != nil) { + assert.Equal(t, *n, false) + } + + y := initialize.Pointer(true) + if assert.Check(t, y != nil) { + assert.Equal(t, *y, true) + } + }) + + t.Run("int32", func(t *testing.T) { + z := initialize.Pointer(int32(0)) + if assert.Check(t, z != nil) { + assert.Equal(t, *z, int32(0)) + } + + n := initialize.Pointer(int32(-99)) + if assert.Check(t, n != nil) { + assert.Equal(t, *n, int32(-99)) + } + + p := initialize.Pointer(int32(42)) + if assert.Check(t, p != nil) { + assert.Equal(t, *p, int32(42)) + } + }) + + t.Run("int64", func(t *testing.T) { + z := initialize.Pointer(int64(0)) + if assert.Check(t, z != nil) { + assert.Equal(t, *z, int64(0)) + } + + n := initialize.Pointer(int64(-99)) + if assert.Check(t, n != nil) { + assert.Equal(t, *n, int64(-99)) + } + + p := initialize.Pointer(int64(42)) + if assert.Check(t, p != nil) { + assert.Equal(t, *p, int64(42)) + } + }) + + t.Run("string", func(t *testing.T) { + z := initialize.Pointer("") + if assert.Check(t, z != nil) { + assert.Equal(t, *z, "") + } + + n := initialize.Pointer("sup") + if assert.Check(t, n != nil) { + assert.Equal(t, *n, "sup") + } + }) +} + +func TestString(t *testing.T) { + z := initialize.String("") + if assert.Check(t, z != nil) { + assert.Equal(t, *z, "") + } + + n := initialize.String("sup") + if assert.Check(t, n != nil) { + assert.Equal(t, *n, "sup") + } } diff --git a/internal/initialize/security.go b/internal/initialize/security.go index 8fc7956d6b..5dd52d7b1e 100644 --- a/internal/initialize/security.go +++ b/internal/initialize/security.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package initialize @@ -19,12 +8,13 @@ import ( corev1 "k8s.io/api/core/v1" ) -// RestrictedPodSecurityContext returns a v1.PodSecurityContext with safe defaults. -// See https://docs.k8s.io/concepts/security/pod-security-standards/ -func RestrictedPodSecurityContext() *corev1.PodSecurityContext { +// PodSecurityContext returns a v1.PodSecurityContext with some defaults. +func PodSecurityContext() *corev1.PodSecurityContext { + onRootMismatch := corev1.FSGroupChangeOnRootMismatch return &corev1.PodSecurityContext{ - // Fail to start a container if its image runs as UID 0 (root). - RunAsNonRoot: Bool(true), + // If set to "OnRootMismatch", if the root of the volume already has + // the correct permissions, the recursive permission change can be skipped + FSGroupChangePolicy: &onRootMismatch, } } @@ -35,6 +25,13 @@ func RestrictedSecurityContext() *corev1.SecurityContext { // Prevent any container processes from gaining privileges. AllowPrivilegeEscalation: Bool(false), + // Drop any capabilities granted by the container runtime. + // This must be uppercase to pass Pod Security Admission. + // - https://releases.k8s.io/v1.24.0/staging/src/k8s.io/pod-security-admission/policy/check_capabilities_restricted.go + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + // Processes in privileged containers are essentially root on the host. Privileged: Bool(false), @@ -43,5 +40,9 @@ func RestrictedSecurityContext() *corev1.SecurityContext { // Fail to start the container if its image runs as UID 0 (root). RunAsNonRoot: Bool(true), + + SeccompProfile: &corev1.SeccompProfile{ + Type: corev1.SeccompProfileTypeRuntimeDefault, + }, } } diff --git a/internal/initialize/security_test.go b/internal/initialize/security_test.go index bbe0ba5e0a..0a6409cf41 100644 --- a/internal/initialize/security_test.go +++ b/internal/initialize/security_test.go @@ -1,30 +1,25 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package initialize_test import ( + "fmt" "testing" "gotest.tools/v3/assert" + corev1 "k8s.io/api/core/v1" "github.com/crunchydata/postgres-operator/internal/initialize" ) -func TestRestrictedPodSecurityContext(t *testing.T) { - psc := initialize.RestrictedPodSecurityContext() +func TestPodSecurityContext(t *testing.T) { + psc := initialize.PodSecurityContext() + + if assert.Check(t, psc.FSGroupChangePolicy != nil) { + assert.Equal(t, string(*psc.FSGroupChangePolicy), "OnRootMismatch") + } // Kubernetes describes recommended security profiles: // - https://docs.k8s.io/concepts/security/pod-security-standards/ @@ -35,7 +30,7 @@ func TestRestrictedPodSecurityContext(t *testing.T) { // > non-critical applications. t.Run("Baseline", func(t *testing.T) { assert.Assert(t, psc.SELinuxOptions == nil, - `Setting custom SELinux options should be disallowed.`) + `Setting a custom SELinux user or role option is forbidden.`) assert.Assert(t, psc.Sysctls == nil, `Sysctls can disable security mechanisms or affect all containers on a host, and should be disallowed except for an allowed "safe" subset.`) @@ -46,13 +41,18 @@ func TestRestrictedPodSecurityContext(t *testing.T) { // > operators and developers of security-critical applications, as well as // > lower-trust users. t.Run("Restricted", func(t *testing.T) { - if assert.Check(t, psc.RunAsNonRoot != nil) { - assert.Assert(t, *psc.RunAsNonRoot == true, - "Containers must be required to run as non-root users.") + if assert.Check(t, psc.RunAsNonRoot == nil) { + assert.Assert(t, initialize.RestrictedSecurityContext().RunAsNonRoot != nil, + `RunAsNonRoot should be delegated to the container-level v1.SecurityContext`) } - assert.Assert(t, psc.SeccompProfile == nil, - "The RuntimeDefault seccomp profile must be required, or allow specific additional profiles.") + assert.Assert(t, psc.RunAsUser == nil, + `Containers must not set runAsUser to 0`) + + if assert.Check(t, psc.SeccompProfile == nil) { + assert.Assert(t, initialize.RestrictedSecurityContext().SeccompProfile != nil, + `SeccompProfile should be delegated to the container-level v1.SecurityContext`) + } }) } @@ -72,11 +72,13 @@ func TestRestrictedSecurityContext(t *testing.T) { "Privileged Pods disable most security mechanisms and must be disallowed.") } - assert.Assert(t, sc.Capabilities == nil, - "Adding additional capabilities beyond the default set must be disallowed.") + if assert.Check(t, sc.Capabilities != nil) { + assert.Assert(t, sc.Capabilities.Add == nil, + "Adding additional capabilities … must be disallowed.") + } assert.Assert(t, sc.SELinuxOptions == nil, - "Setting custom SELinux options should be disallowed.") + "Setting a custom SELinux user or role option is forbidden.") assert.Assert(t, sc.ProcMount == nil, "The default /proc masks are set up to reduce attack surface, and should be required.") @@ -92,13 +94,26 @@ func TestRestrictedSecurityContext(t *testing.T) { "Privilege escalation (such as via set-user-ID or set-group-ID file mode) should not be allowed.") } + if assert.Check(t, sc.Capabilities != nil) { + assert.Assert(t, fmt.Sprint(sc.Capabilities.Drop) == `[ALL]`, + "Containers must drop ALL capabilities, and are only permitted to add back the NET_BIND_SERVICE capability.") + } + if assert.Check(t, sc.RunAsNonRoot != nil) { assert.Assert(t, *sc.RunAsNonRoot == true, "Containers must be required to run as non-root users.") } - assert.Assert(t, sc.SeccompProfile == nil, - "The RuntimeDefault seccomp profile must be required, or allow specific additional profiles.") + assert.Assert(t, sc.RunAsUser == nil, + `Containers must not set runAsUser to 0`) + + // NOTE: The "restricted" Security Context Constraint (SCC) of OpenShift 4.10 + // and earlier does not allow any profile to be set. The "restricted-v2" SCC + // of OpenShift 4.11 uses the "runtime/default" profile. + // - https://docs.openshift.com/container-platform/4.10/security/seccomp-profiles.html + // - https://docs.openshift.com/container-platform/4.11/security/seccomp-profiles.html + assert.Assert(t, sc.SeccompProfile.Type == corev1.SeccompProfileTypeRuntimeDefault, + `Seccomp profile must be explicitly set to one of the allowed values. Both the Unconfined profile and the absence of a profile are prohibited.`) }) if assert.Check(t, sc.ReadOnlyRootFilesystem != nil) { diff --git a/internal/kubeapi/patch.go b/internal/kubeapi/patch.go index 3e1d2ba28c..973852c17a 100644 --- a/internal/kubeapi/patch.go +++ b/internal/kubeapi/patch.go @@ -1,26 +1,15 @@ -package kubeapi - -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +package kubeapi import ( "strings" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/json" + "sigs.k8s.io/controller-runtime/pkg/client" ) // escapeJSONPointer encodes '~' and '/' according to RFC 6901. @@ -61,7 +50,6 @@ func (*JSON6902) pointer(tokens ...string) string { // > // > o If the target location specifies an object member that does exist, // > that member's value is replaced. -// func (patch *JSON6902) Add(path ...string) func(value interface{}) *JSON6902 { i := len(*patch) f := func(value interface{}) *JSON6902 { @@ -83,7 +71,6 @@ func (patch *JSON6902) Add(path ...string) func(value interface{}) *JSON6902 { // > The "remove" operation removes the value at the target location. // > // > The target location MUST exist for the operation to be successful. -// func (patch *JSON6902) Remove(path ...string) *JSON6902 { *patch = append(*patch, map[string]interface{}{ "op": "remove", @@ -99,7 +86,6 @@ func (patch *JSON6902) Remove(path ...string) *JSON6902 { // > with a new value. // > // > The target location MUST exist for the operation to be successful. -// func (patch *JSON6902) Replace(path ...string) func(value interface{}) *JSON6902 { i := len(*patch) f := func(value interface{}) *JSON6902 { @@ -120,7 +106,7 @@ func (patch *JSON6902) Replace(path ...string) func(value interface{}) *JSON6902 func (patch JSON6902) Bytes() ([]byte, error) { return patch.Data(nil) } // Data returns the JSON representation of patch. -func (patch JSON6902) Data(runtime.Object) ([]byte, error) { return json.Marshal(patch) } +func (patch JSON6902) Data(client.Object) ([]byte, error) { return json.Marshal(patch) } // IsEmpty returns true when patch has no operations. func (patch JSON6902) IsEmpty() bool { return len(patch) == 0 } @@ -144,7 +130,6 @@ func NewMergePatch() *Merge7386 { return &Merge7386{} } // > contain the member, the value is replaced. Null values in the merge // > patch are given special meaning to indicate the removal of existing // > values in the target. -// func (patch *Merge7386) Add(path ...string) func(value interface{}) *Merge7386 { position := *patch @@ -183,7 +168,7 @@ func (patch *Merge7386) Remove(path ...string) *Merge7386 { func (patch Merge7386) Bytes() ([]byte, error) { return patch.Data(nil) } // Data returns the JSON representation of patch. -func (patch Merge7386) Data(runtime.Object) ([]byte, error) { return json.Marshal(patch) } +func (patch Merge7386) Data(client.Object) ([]byte, error) { return json.Marshal(patch) } // IsEmpty returns true when patch has no modifications. func (patch Merge7386) IsEmpty() bool { return len(patch) == 0 } diff --git a/internal/kubeapi/patch_test.go b/internal/kubeapi/patch_test.go index ae8499198e..52f5787b8f 100644 --- a/internal/kubeapi/patch_test.go +++ b/internal/kubeapi/patch_test.go @@ -1,19 +1,8 @@ -package kubeapi - -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +package kubeapi import ( "encoding/json" diff --git a/internal/logging/logr.go b/internal/logging/logr.go index c9a23fb45c..c907997d40 100644 --- a/internal/logging/logr.go +++ b/internal/logging/logr.go @@ -1,17 +1,6 @@ -/* -Copyright 2021 Crunchy Data Solutions, Inc. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package logging @@ -19,34 +8,31 @@ import ( "context" "github.com/go-logr/logr" - "github.com/wojas/genericr" "go.opentelemetry.io/otel/trace" ) var global = logr.Discard() -// Discard returns a logr.Logger that discards all messages logged to it. -func Discard() logr.Logger { return logr.DiscardLogger{} } +// Logger is an interface to an abstract logging implementation. +type Logger = logr.Logger -// SetLogFunc replaces the global logr.Logger with log that gets called when an -// entry's level is at or below verbosity. (Only the most important entries are -// passed when verbosity is zero.) Before this is called, the global logr.Logger -// is a no-op. -func SetLogFunc(verbosity int, log genericr.LogFunc) { - global = genericr.New(log).WithCaller(true).WithVerbosity(verbosity) -} +// Discard returns a Logger that discards all messages logged to it. +func Discard() Logger { return logr.Discard() } + +// SetLogSink replaces the global Logger with sink. Before this is called, +// the global Logger is a no-op. +func SetLogSink(sink logr.LogSink) { global = logr.New(sink) } // NewContext returns a copy of ctx containing logger. Retrieve it using FromContext. -func NewContext(ctx context.Context, logger logr.Logger) context.Context { +func NewContext(ctx context.Context, logger Logger) context.Context { return logr.NewContext(ctx, logger) } -// FromContext returns the global logr.Logger or the one stored by a prior call +// FromContext returns the global Logger or the one stored by a prior call // to NewContext. -func FromContext(ctx context.Context) logr.Logger { - var log logr.Logger - - if log = logr.FromContext(ctx); log == nil { +func FromContext(ctx context.Context) Logger { + log, err := logr.FromContext(ctx) + if err != nil { log = global } @@ -54,8 +40,58 @@ func FromContext(ctx context.Context) logr.Logger { // Omit trace flags for now because they don't seem relevant. // - https://github.com/open-telemetry/opentelemetry-specification/blob/v0.7.0/specification/logs/overview.md if sc := trace.SpanFromContext(ctx).SpanContext(); sc.IsValid() { - log = log.WithValues("spanid", sc.SpanID, "traceid", sc.TraceID) + log = log.WithValues("spanid", sc.SpanID(), "traceid", sc.TraceID()) } return log } + +// sink implements logr.LogSink using two function pointers. +type sink struct { + depth int + verbosity int + names []string + values []interface{} + + // TODO(cbandy): add names or frame to the functions below. + + fnError func(error, string, ...interface{}) + fnInfo func(int, string, ...interface{}) +} + +var _ logr.LogSink = (*sink)(nil) + +func (s *sink) Enabled(level int) bool { return level <= s.verbosity } +func (s *sink) Init(info logr.RuntimeInfo) { s.depth = info.CallDepth } + +func (s sink) combineValues(kv ...interface{}) []interface{} { + if len(kv) == 0 { + return s.values + } + if n := len(s.values); n > 0 { + return append(s.values[:n:n], kv...) + } + return kv +} + +func (s *sink) Error(err error, msg string, kv ...interface{}) { + s.fnError(err, msg, s.combineValues(kv...)...) +} + +func (s *sink) Info(level int, msg string, kv ...interface{}) { + s.fnInfo(level, msg, s.combineValues(kv...)...) +} + +func (s *sink) WithName(name string) logr.LogSink { + n := len(s.names) + out := *s + out.names = append(out.names[:n:n], name) + return &out +} + +func (s *sink) WithValues(kv ...interface{}) logr.LogSink { + n := len(s.values) + out := *s + out.values = append(out.values[:n:n], kv...) + return &out +} diff --git a/internal/logging/logr_test.go b/internal/logging/logr_test.go index ffe91ba189..1cbc818ad9 100644 --- a/internal/logging/logr_test.go +++ b/internal/logging/logr_test.go @@ -1,17 +1,6 @@ -/* -Copyright 2021 Crunchy Data Solutions, Inc. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package logging @@ -20,24 +9,23 @@ import ( "testing" "github.com/go-logr/logr" - "github.com/wojas/genericr" - "go.opentelemetry.io/otel/oteltest" + "go.opentelemetry.io/otel/sdk/trace" "gotest.tools/v3/assert" ) func TestDiscard(t *testing.T) { - assert.Equal(t, Discard(), logr.DiscardLogger{}) + assert.Equal(t, Discard(), logr.Discard()) } func TestFromContext(t *testing.T) { - global = logr.DiscardLogger{} + global = logr.Discard() // Defaults to global. log := FromContext(context.Background()) assert.Equal(t, log, global) // Retrieves from NewContext. - double := struct{ logr.Logger }{logr.DiscardLogger{}} + double := logr.New(&sink{}) log = FromContext(NewContext(context.Background(), double)) assert.Equal(t, log, double) } @@ -45,8 +33,14 @@ func TestFromContext(t *testing.T) { func TestFromContextTraceContext(t *testing.T) { var calls []map[string]interface{} - SetLogFunc(0, func(input genericr.Entry) { - calls = append(calls, input.FieldsMap()) + SetLogSink(&sink{ + fnInfo: func(_ int, _ string, kv ...interface{}) { + m := make(map[string]interface{}) + for i := 0; i < len(kv); i += 2 { + m[kv[i].(string)] = kv[i+1] + } + calls = append(calls, m) + }, }) ctx := context.Background() @@ -56,20 +50,22 @@ func TestFromContextTraceContext(t *testing.T) { assert.Equal(t, calls[0]["spanid"], nil) assert.Equal(t, calls[0]["traceid"], nil) - ctx, span := oteltest.DefaultTracer().Start(ctx, "test-span") + ctx, span := trace.NewTracerProvider().Tracer("").Start(ctx, "test-span") defer span.End() // OpenTelemetry trace context when there is. FromContext(ctx).Info("") - assert.Equal(t, calls[1]["spanid"], span.SpanContext().SpanID) - assert.Equal(t, calls[1]["traceid"], span.SpanContext().TraceID) + assert.Equal(t, calls[1]["spanid"], span.SpanContext().SpanID()) + assert.Equal(t, calls[1]["traceid"], span.SpanContext().TraceID()) } -func TestSetLogFunc(t *testing.T) { +func TestSetLogSink(t *testing.T) { var calls []string - SetLogFunc(0, func(input genericr.Entry) { - calls = append(calls, input.Message) + SetLogSink(&sink{ + fnInfo: func(_ int, m string, _ ...interface{}) { + calls = append(calls, m) + }, }) global.Info("called") diff --git a/internal/logging/logrus.go b/internal/logging/logrus.go index 20cc8f902a..9683a104d1 100644 --- a/internal/logging/logrus.go +++ b/internal/logging/logrus.go @@ -1,17 +1,6 @@ -/* -Copyright 2021 Crunchy Data Solutions, Inc. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package logging @@ -22,17 +11,18 @@ import ( "runtime" "strings" + "github.com/go-logr/logr" "github.com/pkg/errors" "github.com/sirupsen/logrus" - "github.com/wojas/genericr" ) -// Logrus creates a function that writes genericr.Entry to out using a logrus -// format. The resulting logrus.Level depends on Entry.Error and Entry.Level: -// - Entry.Error ≠ nil → logrus.ErrorLevel -// - Entry.Level < debug → logrus.InfoLevel -// - Entry.Level ≥ debug → logrus.DebugLevel -func Logrus(out io.Writer, version string, debug int) genericr.LogFunc { +// Logrus creates a sink that writes to out using a logrus format. Log entries +// are emitted when their level is at or below verbosity. (Only the most +// important entries are emitted when verbosity is zero.) Error entries get a +// logrus.ErrorLevel, Info entries with verbosity less than debug get a +// logrus.InfoLevel, and Info entries with verbosity of debug or more get a +// logrus.DebugLevel. +func Logrus(out io.Writer, version string, debug, verbosity int) logr.LogSink { root := logrus.New() root.SetLevel(logrus.TraceLevel) @@ -45,47 +35,80 @@ func Logrus(out io.Writer, version string, debug int) genericr.LogFunc { _, module, _, _ := runtime.Caller(0) module = strings.TrimSuffix(module, "internal/logging/logrus.go") - return func(input genericr.Entry) { - entry := root.WithField("version", version) - frame := input.Caller - level := logrus.InfoLevel + return &sink{ + verbosity: verbosity, + + fnError: func(err error, message string, kv ...interface{}) { + entry := root.WithField("version", version) + entry = logrusFields(entry, kv...) - if input.Level >= debug { - level = logrus.DebugLevel - } - if len(input.Fields) != 0 { - entry = entry.WithFields(input.FieldsMap()) - } - if input.Error != nil { if v, ok := entry.Data[logrus.ErrorKey]; ok { entry.Data["fields."+logrus.ErrorKey] = v } - entry = entry.WithError(input.Error) - level = logrus.ErrorLevel + entry = entry.WithError(err) var t interface{ StackTrace() errors.StackTrace } - if errors.As(input.Error, &t) { + if errors.As(err, &t) { if st := t.StackTrace(); len(st) > 0 { - frame, _ = runtime.CallersFrames([]uintptr{uintptr(st[0])}).Next() + frame, _ := runtime.CallersFrames([]uintptr{uintptr(st[0])}).Next() + logrusFrame(entry, frame, module) } } - } - if frame.File != "" { - filename := strings.TrimPrefix(frame.File, module) - fileline := fmt.Sprintf("%s:%d", filename, frame.Line) - if v, ok := entry.Data["file"]; ok { - entry.Data["fields.file"] = v - } - entry.Data["file"] = fileline - } - if frame.Function != "" { - _, function := filepath.Split(frame.Function) - if v, ok := entry.Data["func"]; ok { - entry.Data["fields.func"] = v + entry.Log(logrus.ErrorLevel, message) + }, + + fnInfo: func(level int, message string, kv ...interface{}) { + entry := root.WithField("version", version) + entry = logrusFields(entry, kv...) + + if level >= debug { + entry.Log(logrus.DebugLevel, message) + } else { + entry.Log(logrus.InfoLevel, message) } - entry.Data["func"] = function + }, + } +} + +// logrusFields structures and adds the key/value interface to the logrus.Entry; +// for instance, if a key is not a string, this formats the key as a string. +func logrusFields(entry *logrus.Entry, kv ...interface{}) *logrus.Entry { + if len(kv) == 0 { + return entry + } + if len(kv)%2 == 1 { + kv = append(kv, nil) + } + + m := make(map[string]interface{}, len(kv)/2) + + for i := 0; i < len(kv); i += 2 { + key, ok := kv[i].(string) + if !ok { + key = fmt.Sprintf("!(%#v)", kv[i]) } + m[key] = kv[i+1] + } - entry.Log(level, input.Message) + return entry.WithFields(m) +} + +// logrusFrame adds the file and func to the logrus.Entry, +// for use in logging errors +func logrusFrame(entry *logrus.Entry, frame runtime.Frame, module string) { + if frame.File != "" { + filename := strings.TrimPrefix(frame.File, module) + fileline := fmt.Sprintf("%s:%d", filename, frame.Line) + if v, ok := entry.Data["file"]; ok { + entry.Data["fields.file"] = v + } + entry.Data["file"] = fileline + } + if frame.Function != "" { + _, function := filepath.Split(frame.Function) + if v, ok := entry.Data["func"]; ok { + entry.Data["fields.func"] = v + } + entry.Data["func"] = function } } diff --git a/internal/logging/logrus_test.go b/internal/logging/logrus_test.go index a26d36dafc..3e73193d1a 100644 --- a/internal/logging/logrus_test.go +++ b/internal/logging/logrus_test.go @@ -1,17 +1,6 @@ -/* -Copyright 2021 Crunchy Data Solutions, Inc. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package logging @@ -24,7 +13,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/pkg/errors" - "github.com/wojas/genericr" + "gotest.tools/v3/assert" ) func assertLogrusContains(t testing.TB, actual, expected string) { @@ -39,77 +28,57 @@ func TestLogrus(t *testing.T) { t.Parallel() out := new(bytes.Buffer) - logrus := Logrus(out, "v1", 1) + logrus := Logrus(out, "v1", 1, 2) + + // Configured verbosity discards. + assert.Assert(t, logrus.Enabled(1)) + assert.Assert(t, logrus.Enabled(2)) + assert.Assert(t, !logrus.Enabled(3)) // Default level is INFO. // Version field is always present. out.Reset() - logrus(genericr.Entry{}) + logrus.Info(0, "") assertLogrusContains(t, out.String(), `level=info version=v1`) // Configured level or higher is DEBUG. out.Reset() - logrus(genericr.Entry{Level: 1}) + logrus.Info(1, "") assertLogrusContains(t, out.String(), `level=debug`) out.Reset() - logrus(genericr.Entry{Level: 2}) + logrus.Info(2, "") assertLogrusContains(t, out.String(), `level=debug`) - // Any error becomes ERROR level. + // Any error is ERROR level. out.Reset() - logrus(genericr.Entry{Error: fmt.Errorf("%s", "dang")}) + logrus.Error(fmt.Errorf("%s", "dang"), "") assertLogrusContains(t, out.String(), `level=error error=dang`) // A wrapped error includes one frame of its stack. out.Reset() _, _, baseline, _ := runtime.Caller(0) - logrus(genericr.Entry{Error: errors.New("dang")}) + logrus.Error(errors.New("dang"), "") assertLogrusContains(t, out.String(), fmt.Sprintf(`file="internal/logging/logrus_test.go:%d"`, baseline+1)) assertLogrusContains(t, out.String(), `func=logging.TestLogrus`) out.Reset() - logrus(genericr.Entry{Fields: []interface{}{"k1", "str", "k2", 13, "k3", false}}) + logrus.Info(0, "", "k1", "str", "k2", 13, "k3", false) assertLogrusContains(t, out.String(), `k1=str k2=13 k3=false`) out.Reset() - logrus(genericr.Entry{Message: "banana"}) + logrus.Info(0, "banana") assertLogrusContains(t, out.String(), `msg=banana`) // Fields don't overwrite builtins. out.Reset() - logrus(genericr.Entry{ - Message: "banana", - Error: errors.New("dang"), - Fields: []interface{}{ - "error", "not-err", - "file", "not-file", - "func", "not-func", - "level", "not-lvl", - "msg", "not-msg", - }, - }) + logrus.Error(errors.New("dang"), "banana", + "error", "not-err", + "file", "not-file", + "func", "not-func", + "level", "not-lvl", + "msg", "not-msg", + ) assertLogrusContains(t, out.String(), `level=error msg=banana error=dang`) assertLogrusContains(t, out.String(), `fields.error=not-err fields.file=not-file fields.func=not-func`) assertLogrusContains(t, out.String(), `fields.level=not-lvl fields.msg=not-msg`) } - -func TestLogrusCaller(t *testing.T) { - t.Parallel() - - out := new(bytes.Buffer) - log := genericr.New(Logrus(out, "v2", 2)).WithCaller(true) - - // Details come from the line of the logr.Logger call. - _, _, baseline, _ := runtime.Caller(0) - log.Info("") - assertLogrusContains(t, out.String(), fmt.Sprintf(`file="internal/logging/logrus_test.go:%d"`, baseline+1)) - assertLogrusContains(t, out.String(), `func=logging.TestLogrusCaller`) - - // Fields don't overwrite builtins. - out.Reset() - _, _, baseline, _ = runtime.Caller(0) - log.Info("", "file", "not-file", "func", "not-func") - assertLogrusContains(t, out.String(), fmt.Sprintf(`file="internal/logging/logrus_test.go:%d"`, baseline+1)) - assertLogrusContains(t, out.String(), `func=logging.TestLogrusCaller`) - assertLogrusContains(t, out.String(), `fields.file=not-file fields.func=not-func`) -} diff --git a/internal/naming/annotations.go b/internal/naming/annotations.go index 069eca4f2d..2179a5f084 100644 --- a/internal/naming/annotations.go +++ b/internal/naming/annotations.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package naming @@ -21,6 +10,10 @@ const ( // Finalizer marks an object to be garbage collected by this module. Finalizer = annotationPrefix + "finalizer" + // PatroniSwitchover is the annotation added to a PostgresCluster to initiate a manual + // Patroni Switchover (or Failover). + PatroniSwitchover = annotationPrefix + "trigger-switchover" + // PGBackRestBackup is the annotation that is added to a PostgresCluster to initiate a manual // backup. The value of the annotation will be a unique identifier for a backup Job (e.g. a // timestamp), which will be stored in the PostgresCluster status to properly track completion @@ -28,22 +21,51 @@ const ( // ID associated with a specific manual backup Job. PGBackRestBackup = annotationPrefix + "pgbackrest-backup" + // PGBackRestBackupJobCompletion is the annotation that is added to restore jobs, pvcs, and + // VolumeSnapshots that are involved in the volume snapshot creation process. The annotation + // holds a RFC3339 formatted timestamp that corresponds to the completion time of the associated + // backup job. + PGBackRestBackupJobCompletion = annotationPrefix + "pgbackrest-backup-job-completion" + // PGBackRestConfigHash is an annotation used to specify the hash value associated with a // repo configuration as needed to detect configuration changes that invalidate running Jobs // (and therefore must be recreated) PGBackRestConfigHash = annotationPrefix + "pgbackrest-hash" - // PGBackRestCurrentConfig is an annotation used to indicate the name of the pgBackRest - // configuration associated with a specific Job as determined by either the current primary - // (if no dedicated repository host is enabled), or the dedicated repository host. This helps - // in detecting pgBackRest backup Jobs that no longer mount the proper pgBackRest - // configuration, e.g. because a failover has occurred, or because dedicated repo host has been - // enabled or disabled. - PGBackRestCurrentConfig = annotationPrefix + "pgbackrest-config" - // PGBackRestRestore is the annotation that is added to a PostgresCluster to initiate an in-place - // restore. The value of the annotation will be a unique identfier for a restore Job (e.g. a + // restore. The value of the annotation will be a unique identifier for a restore Job (e.g. a // timestamp), which will be stored in the PostgresCluster status to properly track completion // of the Job. PGBackRestRestore = annotationPrefix + "pgbackrest-restore" + + // PGBackRestIPVersion is an annotation used to indicate whether an IPv6 wildcard address should be + // used for the pgBackRest "tls-server-address" or not. If the user wants to use IPv6, the value + // should be "IPv6". As of right now, if the annotation is not present or if the annotation's value + // is anything other than "IPv6", the "tls-server-address" will default to IPv4 (0.0.0.0). The need + // for this annotation is due to an issue in pgBackRest (#1841) where using a wildcard address to + // bind all addresses does not work in certain IPv6 environments. + PGBackRestIPVersion = annotationPrefix + "pgbackrest-ip-version" + + // PostgresExporterCollectorsAnnotation is an annotation used to allow users to control whether or + // not postgres_exporter default metrics, settings, and collectors are enabled. The value "None" + // disables all postgres_exporter defaults. Disabling the defaults may cause errors in dashboards. + PostgresExporterCollectorsAnnotation = annotationPrefix + "postgres-exporter-collectors" + + // CrunchyBridgeClusterAdoptionAnnotation is an annotation used to allow users to "adopt" or take + // control over an existing Bridge Cluster with a CrunchyBridgeCluster CR. Essentially, if a + // CrunchyBridgeCluster CR does not have a status.ID, but the name matches the name of an existing + // bridge cluster, the user must add this annotation to the CR to allow the CR to take control of + // the Bridge Cluster. The Value assigned to the annotation must be the ID of existing cluster. + CrunchyBridgeClusterAdoptionAnnotation = annotationPrefix + "adopt-bridge-cluster" + + // AutoCreateUserSchemaAnnotation is an annotation used to allow users to control whether the cluster + // has schemas automatically created for the users defined in `spec.users` for all of the databases + // listed for that user. + AutoCreateUserSchemaAnnotation = annotationPrefix + "autoCreateUserSchema" + + // AuthorizeBackupRemovalAnnotation is an annotation used to allow users + // to delete PVC-based backups when changing from a cluster with backups + // to a cluster without backups. As usual with the operator, we do not + // touch cloud-based backups. + AuthorizeBackupRemovalAnnotation = annotationPrefix + "authorizeBackupRemoval" ) diff --git a/internal/naming/annotations_test.go b/internal/naming/annotations_test.go index 92824b976f..318dd5ab5c 100644 --- a/internal/naming/annotations_test.go +++ b/internal/naming/annotations_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package naming @@ -23,9 +12,15 @@ import ( ) func TestAnnotationsValid(t *testing.T) { + assert.Assert(t, nil == validation.IsQualifiedName(AuthorizeBackupRemovalAnnotation)) + assert.Assert(t, nil == validation.IsQualifiedName(AutoCreateUserSchemaAnnotation)) + assert.Assert(t, nil == validation.IsQualifiedName(CrunchyBridgeClusterAdoptionAnnotation)) assert.Assert(t, nil == validation.IsQualifiedName(Finalizer)) + assert.Assert(t, nil == validation.IsQualifiedName(PatroniSwitchover)) assert.Assert(t, nil == validation.IsQualifiedName(PGBackRestBackup)) + assert.Assert(t, nil == validation.IsQualifiedName(PGBackRestBackupJobCompletion)) assert.Assert(t, nil == validation.IsQualifiedName(PGBackRestConfigHash)) - assert.Assert(t, nil == validation.IsQualifiedName(PGBackRestCurrentConfig)) + assert.Assert(t, nil == validation.IsQualifiedName(PGBackRestIPVersion)) assert.Assert(t, nil == validation.IsQualifiedName(PGBackRestRestore)) + assert.Assert(t, nil == validation.IsQualifiedName(PostgresExporterCollectorsAnnotation)) } diff --git a/internal/naming/controllers.go b/internal/naming/controllers.go new file mode 100644 index 0000000000..3d492e8a3a --- /dev/null +++ b/internal/naming/controllers.go @@ -0,0 +1,10 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package naming + +const ( + ControllerBridge = "bridge-controller" + ControllerPGAdmin = "pgadmin-controller" +) diff --git a/internal/naming/dns.go b/internal/naming/dns.go index 6d2f2e3a6d..d3351a5d70 100644 --- a/internal/naming/dns.go +++ b/internal/naming/dns.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package naming @@ -44,6 +33,26 @@ func InstancePodDNSNames(ctx context.Context, instance *appsv1.StatefulSet) []st } } +// RepoHostPodDNSNames returns the possible DNS names for a pgBackRest repository host Pod. +// The first name is the fully qualified domain name (FQDN). +func RepoHostPodDNSNames(ctx context.Context, repoHost *appsv1.StatefulSet) []string { + var ( + domain = KubernetesClusterDomain(ctx) + namespace = repoHost.Namespace + name = repoHost.Name + "-0." + repoHost.Spec.ServiceName + ) + + // We configure our repository hosts with a subdomain so that Pods get stable + // DNS names in the form "{pod}.{service}.{namespace}.svc.{cluster-domain}". + // - https://docs.k8s.io/concepts/services-networking/dns-pod-service/#pods + return []string{ + name + "." + namespace + ".svc." + domain, + name + "." + namespace + ".svc", + name + "." + namespace, + name, + } +} + // ServiceDNSNames returns the possible DNS names for service. The first name // is the fully qualified domain name (FQDN). func ServiceDNSNames(ctx context.Context, service *corev1.Service) []string { diff --git a/internal/naming/dns_test.go b/internal/naming/dns_test.go index f3ab664975..e7e2ea9dc6 100644 --- a/internal/naming/dns_test.go +++ b/internal/naming/dns_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package naming diff --git a/internal/naming/doc.go b/internal/naming/doc.go index 858efed6f4..72cab8b0b0 100644 --- a/internal/naming/doc.go +++ b/internal/naming/doc.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 // Package naming provides functions and constants for the postgres-operator // naming and labeling scheme. diff --git a/internal/naming/labels.go b/internal/naming/labels.go index b92c7e21f3..f25993122b 100644 --- a/internal/naming/labels.go +++ b/internal/naming/labels.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package naming @@ -36,13 +25,28 @@ const ( // LabelClusterCertificate is used to identify a secret containing a cluster certificate LabelClusterCertificate = labelPrefix + "cluster-certificate" + // LabelData is used to identify Pods and Volumes store Postgres data. + LabelData = labelPrefix + "data" + + // LabelMoveJob is used to identify a directory move Job. + LabelMoveJob = labelPrefix + "move-job" + + // LabelMovePGBackRestRepoDir is used to identify the Job that moves an existing pgBackRest repo directory. + LabelMovePGBackRestRepoDir = labelPrefix + "move-pgbackrest-repo-dir" + + // LabelMovePGDataDir is used to identify the Job that moves an existing pgData directory. + LabelMovePGDataDir = labelPrefix + "move-pgdata-dir" + + // LabelMovePGWalDir is used to identify the Job that moves an existing pg_wal directory. + LabelMovePGWalDir = labelPrefix + "move-pgwal-dir" + // LabelPGBackRest is used to indicate that a resource is for pgBackRest LabelPGBackRest = labelPrefix + "pgbackrest" // LabelPGBackRestBackup is used to indicate that a resource is for a pgBackRest backup LabelPGBackRestBackup = labelPrefix + "pgbackrest-backup" - // LabelPGBackRestConfig is used to indicate that a ConfigMap is for pgBackRest + // LabelPGBackRestConfig is used to indicate that a ConfigMap or Secret is for pgBackRest LabelPGBackRestConfig = labelPrefix + "pgbackrest-config" // LabelPGBackRestDedicated is used to indicate that a ConfigMap is for a pgBackRest dedicated @@ -90,6 +94,9 @@ const ( // RolePGBouncer is the LabelRole applied to PgBouncer objects. RolePGBouncer = "pgbouncer" + // RolePGAdmin is the LabelRole applied to pgAdmin objects. + RolePGAdmin = "pgadmin" + // RolePostgresData is the LabelRole applied to PostgreSQL data volumes. RolePostgresData = "pgdata" @@ -101,6 +108,28 @@ const ( // RoleMonitoring is the LabelRole applied to Monitoring resources RoleMonitoring = "monitoring" + + // RoleSnapshot is the LabelRole applied to Snapshot resources. + RoleSnapshot = "snapshot" +) + +const ( + // LabelCrunchyBridgeClusterPostgresRole identifies the PostgreSQL user an object is for or about. + LabelCrunchyBridgeClusterPostgresRole = labelPrefix + "cbc-pgrole" + + // RoleCrunchyBridgeClusterPostgresRole is the LabelRole applied to CBC PostgreSQL role secrets. + RoleCrunchyBridgeClusterPostgresRole = "cbc-pgrole" +) + +const ( + // DataPGAdmin is a LabelData value that indicates the object has pgAdmin data. + DataPGAdmin = "pgadmin" + + // DataPGBackRest is a LabelData value that indicates the object has pgBackRest data. + DataPGBackRest = "pgbackrest" + + // DataPostgres is a LabelData value that indicates the object has PostgreSQL data. + DataPostgres = "postgres" ) // BackupJobType represents different types of backups (e.g. ad-hoc backups, scheduled backups, @@ -114,6 +143,15 @@ const ( // BackupReplicaCreate is the backup type for the backup taken to enable pgBackRest replica // creation BackupReplicaCreate BackupJobType = "replica-create" + + // BackupScheduled is the backup type utilized for scheduled backups + BackupScheduled BackupJobType = "scheduled" +) + +const ( + + // LabelStandalonePGAdmin is used to indicate a resource for a standalone-pgadmin instance. + LabelStandalonePGAdmin = labelPrefix + "pgadmin" ) // Merge takes sets of labels and merges them. The last set @@ -126,6 +164,15 @@ func Merge(sets ...map[string]string) labels.Set { return merged } +// DirectoryMoveJobLabels provides labels for PVC move Jobs. +func DirectoryMoveJobLabels(clusterName string) labels.Set { + jobLabels := map[string]string{ + LabelCluster: clusterName, + LabelMoveJob: "", + } + return jobLabels +} + // PGBackRestLabels provides common labels for pgBackRest resources. func PGBackRestLabels(clusterName string) labels.Set { return map[string]string{ @@ -154,6 +201,9 @@ func PGBackRestBackupJobSelector(clusterName, repoName string, // PGBackRestRestoreConfigLabels provides labels for configuration (e.g. ConfigMaps and Secrets) // generated to perform a pgBackRest restore. +// +// Deprecated: Store restore data in the pgBackRest ConfigMap and Secret, +// [PGBackRestConfig] and [PGBackRestSecret]. func PGBackRestRestoreConfigLabels(clusterName string) labels.Set { commonLabels := PGBackRestLabels(clusterName) jobLabels := map[string]string{ @@ -168,7 +218,8 @@ func PGBackRestRestoreConfigSelector(clusterName string) labels.Selector { return PGBackRestRestoreConfigLabels(clusterName).AsSelector() } -// PGBackRestRestoreJobLabels provides labels for pgBackRest restore Jobs. +// PGBackRestRestoreJobLabels provides labels for pgBackRest restore Jobs and +// associated configuration ConfigMaps and Secrets. func PGBackRestRestoreJobLabels(clusterName string) labels.Set { commonLabels := PGBackRestLabels(clusterName) jobLabels := map[string]string{ @@ -214,6 +265,7 @@ func PGBackRestCronJobLabels(clusterName, repoName, backupType string) labels.Se cronJobLabels := map[string]string{ LabelPGBackRestRepo: repoName, LabelPGBackRestCronJob: backupType, + LabelPGBackRestBackup: string(BackupScheduled), } return labels.Merge(commonLabels, cronJobLabels) } @@ -238,6 +290,37 @@ func PGBackRestRepoVolumeLabels(clusterName, repoName string) labels.Set { repoLabels := PGBackRestRepoLabels(clusterName, repoName) repoVolLabels := map[string]string{ LabelPGBackRestRepoVolume: "", + LabelData: DataPGBackRest, } return labels.Merge(repoLabels, repoVolLabels) } + +// StandalonePGAdminLabels return labels for standalone pgAdmin resources +func StandalonePGAdminLabels(pgAdminName string) labels.Set { + return map[string]string{ + LabelStandalonePGAdmin: pgAdminName, + LabelRole: RolePGAdmin, + } +} + +// StandalonePGAdminSelector provides a selector for standalone pgAdmin resources +func StandalonePGAdminSelector(pgAdminName string) labels.Selector { + return StandalonePGAdminLabels(pgAdminName).AsSelector() +} + +// StandalonePGAdminDataLabels returns the labels for standalone pgAdmin resources +// that contain or mount data +func StandalonePGAdminDataLabels(pgAdminName string) labels.Set { + return labels.Merge( + StandalonePGAdminLabels(pgAdminName), + map[string]string{ + LabelData: DataPGAdmin, + }, + ) +} + +// StandalonePGAdminDataSelector returns a selector for standalone pgAdmin resources +// that contain or mount data +func StandalonePGAdminDataSelector(pgAdmiName string) labels.Selector { + return StandalonePGAdminDataLabels(pgAdmiName).AsSelector() +} diff --git a/internal/naming/labels_test.go b/internal/naming/labels_test.go index 857368f745..b8a7779858 100644 --- a/internal/naming/labels_test.go +++ b/internal/naming/labels_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package naming @@ -25,8 +14,13 @@ import ( func TestLabelsValid(t *testing.T) { assert.Assert(t, nil == validation.IsQualifiedName(LabelCluster)) + assert.Assert(t, nil == validation.IsQualifiedName(LabelData)) assert.Assert(t, nil == validation.IsQualifiedName(LabelInstance)) assert.Assert(t, nil == validation.IsQualifiedName(LabelInstanceSet)) + assert.Assert(t, nil == validation.IsQualifiedName(LabelMoveJob)) + assert.Assert(t, nil == validation.IsQualifiedName(LabelMovePGBackRestRepoDir)) + assert.Assert(t, nil == validation.IsQualifiedName(LabelMovePGDataDir)) + assert.Assert(t, nil == validation.IsQualifiedName(LabelMovePGWalDir)) assert.Assert(t, nil == validation.IsQualifiedName(LabelPatroni)) assert.Assert(t, nil == validation.IsQualifiedName(LabelRole)) assert.Assert(t, nil == validation.IsQualifiedName(LabelPGBackRest)) @@ -39,20 +33,29 @@ func TestLabelsValid(t *testing.T) { assert.Assert(t, nil == validation.IsQualifiedName(LabelPGBackRestRestoreConfig)) assert.Assert(t, nil == validation.IsQualifiedName(LabelPGMonitorDiscovery)) assert.Assert(t, nil == validation.IsQualifiedName(LabelPostgresUser)) + assert.Assert(t, nil == validation.IsQualifiedName(LabelStandalonePGAdmin)) assert.Assert(t, nil == validation.IsQualifiedName(LabelStartupInstance)) + assert.Assert(t, nil == validation.IsQualifiedName(LabelCrunchyBridgeClusterPostgresRole)) } func TestLabelValuesValid(t *testing.T) { + assert.Assert(t, nil == validation.IsValidLabelValue(DataPGAdmin)) + assert.Assert(t, nil == validation.IsValidLabelValue(DataPGBackRest)) + assert.Assert(t, nil == validation.IsValidLabelValue(DataPostgres)) assert.Assert(t, nil == validation.IsValidLabelValue(RolePatroniLeader)) assert.Assert(t, nil == validation.IsValidLabelValue(RolePatroniReplica)) + assert.Assert(t, nil == validation.IsValidLabelValue(RolePGAdmin)) assert.Assert(t, nil == validation.IsValidLabelValue(RolePGBouncer)) assert.Assert(t, nil == validation.IsValidLabelValue(RolePostgresData)) assert.Assert(t, nil == validation.IsValidLabelValue(RolePostgresUser)) assert.Assert(t, nil == validation.IsValidLabelValue(RolePostgresWAL)) assert.Assert(t, nil == validation.IsValidLabelValue(RolePrimary)) assert.Assert(t, nil == validation.IsValidLabelValue(RoleReplica)) + assert.Assert(t, nil == validation.IsValidLabelValue(string(BackupManual))) assert.Assert(t, nil == validation.IsValidLabelValue(string(BackupReplicaCreate))) + assert.Assert(t, nil == validation.IsValidLabelValue(string(BackupScheduled))) assert.Assert(t, nil == validation.IsValidLabelValue(RoleMonitoring)) + assert.Assert(t, nil == validation.IsValidLabelValue(RoleCrunchyBridgeClusterPostgresRole)) } func TestMerge(t *testing.T) { @@ -181,6 +184,7 @@ func TestPGBackRestLabelFuncs(t *testing.T) { assert.Equal(t, pgBackRestCronJobLabels.Get(LabelCluster), clusterName) assert.Check(t, pgBackRestCronJobLabels.Has(LabelPGBackRest)) assert.Equal(t, pgBackRestCronJobLabels.Get(LabelPGBackRestRepo), repoName) + assert.Equal(t, pgBackRestCronJobLabels.Get(LabelPGBackRestBackup), string(BackupScheduled)) // verify the labels that identify pgBackRest dedicated repository host resources pgBackRestDedicatedLabels := PGBackRestDedicatedLabels(clusterName) @@ -214,3 +218,14 @@ func TestPGBackRestLabelFuncs(t *testing.T) { pgBackRestRestoreConfigSelector := PGBackRestRestoreConfigSelector(clusterName) assert.Check(t, pgBackRestRestoreConfigSelector.Matches(pgBackRestRestoreConfigLabels)) } + +// validate the DirectoryMoveJobLabels function +func TestMoveJobLabelFunc(t *testing.T) { + + clusterName := "hippo" + + // verify the labels that identify directory move jobs + dirMoveJobLabels := DirectoryMoveJobLabels(clusterName) + assert.Equal(t, dirMoveJobLabels.Get(LabelCluster), clusterName) + assert.Check(t, dirMoveJobLabels.Has(LabelMoveJob)) +} diff --git a/internal/naming/limitations.md b/internal/naming/limitations.md new file mode 100644 index 0000000000..ba607215f7 --- /dev/null +++ b/internal/naming/limitations.md @@ -0,0 +1,105 @@ + + +# Definitions + +[k8s-names]: https://docs.k8s.io/concepts/overview/working-with-objects/names/ + +### DNS subdomain + +Most resource types require this kind of name. It must be 253 characters or less, +lowercase, and alphanumeric with hyphens U+002D and dots U+002E allowed in between. + +- [k8s.io/apimachinery/pkg/util/validation.IsDNS1123Subdomain](https://pkg.go.dev/k8s.io/apimachinery/pkg/util/validation#IsDNS1123Subdomain) + +### DNS label + +Some resource types require this kind of name. It must be 63 characters or less, +lowercase, and alphanumeric with hyphens U+002D allowed in between. + +Some have a stricter requirement to start with an alphabetic (nonnumerical) character. + +- [k8s.io/apimachinery/pkg/util/validation.IsDNS1123Label](https://pkg.go.dev/k8s.io/apimachinery/pkg/util/validation#IsDNS1123Label) +- [k8s.io/apimachinery/pkg/util/validation.IsDNS1035Label](https://pkg.go.dev/k8s.io/apimachinery/pkg/util/validation#IsDNS1035Label) + + +# Labels + +[k8s-labels]: https://docs.k8s.io/concepts/overview/working-with-objects/labels/ + +Label names must be 317 characters or less. The portion before an optional slash U+002F +must be a DNS subdomain. The portion after must be 63 characters or less. + +Label values must be 63 characters or less and can be empty. + +Both label names and values must be alphanumeric with hyphens U+002D, underscores U+005F, +and dots U+002E allowed in between. + +- [k8s.io/apimachinerypkg/util/validation.IsQualifiedName](https://pkg.go.dev/k8s.io/apimachinery/pkg/util/validation#IsQualifiedName) +- [k8s.io/apimachinerypkg/util/validation.IsValidLabelValue](https://pkg.go.dev/k8s.io/apimachinery/pkg/util/validation#IsValidLabelValue) + + +# Annotations + +[k8s-annotations]: https://docs.k8s.io/concepts/overview/working-with-objects/annotations/ + +Annotation names must be 317 characters or less. The portion before an optional slash U+002F +must be a DNS subdomain. The portion after must be 63 characters or less and alphanumeric with +hyphens U+002D, underscores U+005F, and dots U+002E allowed in between. + +Annotation values may contain anything, but the combined size of *all* names and values +must be 256 KiB or less. + +- [https://pkg.go.dev/k8s.io/apimachinery/pkg/api/validation.ValidateAnnotations](https://pkg.go.dev/k8s.io/apimachinery/pkg/api/validation#ValidateAnnotations) + + +# Specifics + +The Kubernetes API validates custom resource metadata. +[Custom resource names are DNS subdomains](https://releases.k8s.io/v1.23.0/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/validator.go#L60). +It may be possible to limit this further through validation. This is a stated +goal of [CEL expression validation](https://docs.k8s.io/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#validation-rules). + +[ConfigMap names are DNS subdomains](https://releases.k8s.io/v1.23.0/pkg/apis/core/validation/validation.go#L5618). + +[CronJob names are DNS subdomains](https://docs.k8s.io/concepts/workloads/controllers/cron-jobs/) +but must be [52 characters or less](https://releases.k8s.io/v1.23.0/pkg/apis/batch/validation/validation.go#L281). + +[Deployment names are DNS subdomains](https://releases.k8s.io/v1.23.0/pkg/apis/apps/validation/validation.go#L632). + +[Job names are DNS subdomains](https://releases.k8s.io/v1.23.0/pkg/apis/batch/validation/validation.go#L86). +When `.spec.completionMode = Indexed`, the name must be shorter (closer to 61 characters, it depends). +When `.spec.manualSelector` is unset, its Pods get (and must have) a "job-name" label, limiting the +name to 63 characters or less. + +[Namespace names are DNS labels](https://releases.k8s.io/v1.23.0/pkg/apis/core/validation/validation.go#L5963). + +[PersistentVolumeClaim (PVC) names are DNS subdomains](https://releases.k8s.io/v1.23.0/pkg/apis/core/validation/validation.go#L2066). + +[Pod names are DNS subdomains](https://releases.k8s.io/v1.23.0/pkg/apis/core/validation/validation.go#L3443). +The strategy for [generating Pod names](https://releases.k8s.io/v1.23.0/pkg/registry/core/pod/strategy.go#L62) truncates to 63 characters. +The `.spec.hostname` field must be 63 characters or less. + +PodDisruptionBudget (PDB) + +[ReplicaSet names are DNS subdomains](https://releases.k8s.io/v1.23.0/pkg/apis/apps/validation/validation.go#L655). + +Role + +RoleBinding + +[Secret names are DNS subdomains](https://releases.k8s.io/v1.23.0/pkg/apis/core/validation/validation.go#L5515). + +[Service names are DNS labels](https://docs.k8s.io/concepts/services-networking/service/) +that must begin with a letter. + +ServiceAccount (subdomain) + +[StatefulSet names are DNS subdomains](https://docs.k8s.io/concepts/workloads/controllers/statefulset/), +but its Pods get [hostnames](https://releases.k8s.io/v1.23.0/pkg/apis/core/validation/validation.go#L3561) +so it must be shorter (closer to 61 characters, it depends). Its Pods also get a "controller-revision-hash" +label with [11 characters appended](https://issue.k8s.io/64023), limiting the name to 52 characters or less. + diff --git a/internal/naming/names.go b/internal/naming/names.go index bf7da84d52..369591de91 100644 --- a/internal/naming/names.go +++ b/internal/naming/names.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package naming @@ -24,6 +13,7 @@ import ( "k8s.io/apimachinery/pkg/util/rand" "sigs.k8s.io/controller-runtime/pkg/client" + "github.com/crunchydata/postgres-operator/internal/config" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -32,6 +22,16 @@ const ( // supporting tools: Patroni, pgBackRest, etc. ContainerDatabase = "database" + // ContainerPGAdmin is the name of a container running pgAdmin. + ContainerPGAdmin = "pgadmin" + + // ContainerPGAdminStartup is the name of the initialization container + // that prepares the filesystem for pgAdmin. + ContainerPGAdminStartup = "pgadmin-startup" + + // ContainerPGBackRestConfig is the name of a container supporting pgBackRest. + ContainerPGBackRestConfig = "pgbackrest-config" + // ContainerPGBouncer is the name of a container running PgBouncer. ContainerPGBouncer = "pgbouncer" // ContainerPGBouncerConfig is the name of a container supporting PgBouncer. @@ -41,9 +41,6 @@ const ( // that prepares the filesystem for PostgreSQL. ContainerPostgresStartup = "postgres-startup" - // ContainerClientCertInit is the name of the initialization container that is responsible - // for copying and setting proper permissions on the client certificate and key - ContainerClientCertInit = ContainerDatabase + "-client-cert-init" // ContainerClientCertCopy is the name of the container that is responsible for copying and // setting proper permissions on the client certificate and key after initialization whenever // there is a change in the certificates or key @@ -52,13 +49,29 @@ const ( // for the nss_wrapper ContainerNSSWrapperInit = "nss-wrapper-init" + // ContainerPGBackRestLogDirInit is the name of the init container utilized to make + // a pgBackRest log directory when using a dedicated repo host. + ContainerPGBackRestLogDirInit = "pgbackrest-log-dir" + // ContainerPGMonitorExporter is the name of a container running postgres_exporter ContainerPGMonitorExporter = "exporter" + + // ContainerJobMovePGDataDir is the name of the job container utilized to copy v4 Operator + // pgData directories to the v5 default location + ContainerJobMovePGDataDir = "pgdata-move-job" + // ContainerJobMovePGWALDir is the name of the job container utilized to copy v4 Operator + // pg_wal directories to the v5 default location + ContainerJobMovePGWALDir = "pgwal-move-job" + // ContainerJobMovePGBackRestRepoDir is the name of the job container utilized to copy v4 + // Operator pgBackRest repo directories to the v5 default location + ContainerJobMovePGBackRestRepoDir = "repo-move-job" ) const ( // PortExporter is the named port for the "exporter" container PortExporter = "exporter" + // PortPGAdmin is the name of a port that connects to pgAdmin. + PortPGAdmin = "pgadmin" // PortPGBouncer is the name of a port that connects to PgBouncer. PortPGBouncer = "pgbouncer" // PortPostgreSQL is the name of a port that connects to PostgreSQL. @@ -118,8 +131,7 @@ const ( ) const ( - // PGBackRestRepoContainerName is the name assigned to the container used to run pgBackRest and - // SSH + // PGBackRestRepoContainerName is the name assigned to the container used to run pgBackRest PGBackRestRepoContainerName = "pgbackrest" // PGBackRestRestoreContainerName is the name assigned to the container used to run pgBackRest @@ -129,8 +141,13 @@ const ( // PGBackRestRepoName is the name used for a pgbackrest repository PGBackRestRepoName = "%s-pgbackrest-repo-%s" - // PGBackRestSSHVolume is the name the SSH volume used when configuring SSH in a pgBackRest Pod - PGBackRestSSHVolume = "ssh" + // PGBackRestPGDataLogPath is the pgBackRest default log path configuration used by the + // PostgreSQL instance. + PGBackRestPGDataLogPath = "/pgdata/pgbackrest/log" + + // PGBackRestRepoLogPath is the pgBackRest default log path configuration used by the + // dedicated repo host, if configured. + PGBackRestRepoLogPath = "/pgbackrest/%s/log" // suffix used with postgrescluster name for associated configmap. // for instance, if the cluster is named 'mycluster', the @@ -140,12 +157,22 @@ const ( // suffix used with postgrescluster name for associated configmap. // for instance, if the cluster is named 'mycluster', the // configmap will be named 'mycluster-ssh-config' + // Deprecated: Repository hosts use mTLS for encryption, authentication, and authorization. + // TODO(tjmoore4): Once we no longer need this for cleanup purposes, this should be removed. sshCMNameSuffix = "%s-ssh-config" // suffix used with postgrescluster name for associated secret. // for instance, if the cluster is named 'mycluster', the // secret will be named 'mycluster-ssh' + // Deprecated: Repository hosts use mTLS for encryption, authentication, and authorization. + // TODO(tjmoore4): Once we no longer need this for cleanup purposes, this should be removed. sshSecretNameSuffix = "%s-ssh" + + // RestoreConfigCopySuffix is the suffix used for ConfigMap or Secret configuration + // resources needed when restoring from a PostgresCluster data source. If, for + // example, a Secret is named 'mysecret' and is the first item in the configuration + // slice, the copied Secret will be named 'mysecret-restorecopy-0' + RestoreConfigCopySuffix = "%s-restorecopy-%d" ) // AsObjectKey converts the ObjectMeta API type to a client.ObjectKey. @@ -172,8 +199,18 @@ func ClusterInstanceRBAC(cluster *v1beta1.PostgresCluster) metav1.ObjectMeta { } } +// ClusterPGAdmin returns the ObjectMeta necessary to lookup the ConfigMap, +// Service, StatefulSet, or Volume for the cluster's pgAdmin user interface. +func ClusterPGAdmin(cluster *v1beta1.PostgresCluster) metav1.ObjectMeta { + return metav1.ObjectMeta{ + Namespace: cluster.Namespace, + Name: cluster.Name + "-pgadmin", + } +} + // ClusterPGBouncer returns the ObjectMeta necessary to lookup the ConfigMap, -// Deployment, Secret, or Service that is cluster's PgBouncer proxy. +// Deployment, Secret, PodDisruptionBudget or Service that is cluster's +// PgBouncer proxy. func ClusterPGBouncer(cluster *v1beta1.PostgresCluster) metav1.ObjectMeta { return metav1.ObjectMeta{ Namespace: cluster.Namespace, @@ -212,6 +249,24 @@ func ClusterReplicaService(cluster *v1beta1.PostgresCluster) metav1.ObjectMeta { } } +// ClusterDedicatedSnapshotVolume returns the ObjectMeta for the dedicated Snapshot +// volume for a cluster. +func ClusterDedicatedSnapshotVolume(cluster *v1beta1.PostgresCluster) metav1.ObjectMeta { + return metav1.ObjectMeta{ + Namespace: cluster.GetNamespace(), + Name: cluster.GetName() + "-snapshot", + } +} + +// ClusterVolumeSnapshot returns the ObjectMeta, including a random name, for a +// new pgdata VolumeSnapshot. +func ClusterVolumeSnapshot(cluster *v1beta1.PostgresCluster) metav1.ObjectMeta { + return metav1.ObjectMeta{ + Namespace: cluster.Namespace, + Name: cluster.Name + "-pgdata-snapshot-" + rand.String(4), + } +} + // GenerateInstance returns a random name for a member of cluster and set. func GenerateInstance( cluster *v1beta1.PostgresCluster, set *v1beta1.PostgresInstanceSetSpec, @@ -258,6 +313,16 @@ func InstanceCertificates(instance metav1.Object) metav1.ObjectMeta { } } +// InstanceSet returns the ObjectMeta necessary to lookup the objects +// associated with a single instance set. Includes PodDisruptionBudgets +func InstanceSet(cluster *v1beta1.PostgresCluster, + set *v1beta1.PostgresInstanceSetSpec) metav1.ObjectMeta { + return metav1.ObjectMeta{ + Name: cluster.Name + "-set-" + set.Name, + Namespace: cluster.Namespace, + } +} + // InstancePostgresDataVolume returns the ObjectMeta for the PostgreSQL data // volume for instance. func InstancePostgresDataVolume(instance *appsv1.StatefulSet) metav1.ObjectMeta { @@ -267,6 +332,17 @@ func InstancePostgresDataVolume(instance *appsv1.StatefulSet) metav1.ObjectMeta } } +// InstanceTablespaceDataVolume returns the ObjectMeta for the tablespace data +// volume for instance. +func InstanceTablespaceDataVolume(instance *appsv1.StatefulSet, tablespaceName string) metav1.ObjectMeta { + return metav1.ObjectMeta{ + Namespace: instance.GetNamespace(), + Name: instance.GetName() + + "-" + tablespaceName + + "-tablespace", + } +} + // InstancePostgresWALVolume returns the ObjectMeta for the PostgreSQL WAL // volume for instance. func InstancePostgresWALVolume(instance *appsv1.StatefulSet) metav1.ObjectMeta { @@ -285,6 +361,35 @@ func MonitoringUserSecret(cluster *v1beta1.PostgresCluster) metav1.ObjectMeta { } } +// ExporterWebConfigMap returns ObjectMeta necessary to lookup and create the +// exporter web configmap. This configmap is used to configure the exporter +// web server. +func ExporterWebConfigMap(cluster *v1beta1.PostgresCluster) metav1.ObjectMeta { + return metav1.ObjectMeta{ + Namespace: cluster.Namespace, + Name: cluster.Name + "-exporter-web-config", + } +} + +// ExporterQueriesConfigMap returns ObjectMeta necessary to lookup and create the +// exporter queries configmap. This configmap is used to pass the default queries +// to the exporter. +func ExporterQueriesConfigMap(cluster *v1beta1.PostgresCluster) metav1.ObjectMeta { + return metav1.ObjectMeta{ + Namespace: cluster.Namespace, + Name: cluster.Name + "-exporter-queries-config", + } +} + +// OperatorConfigurationSecret returns the ObjectMeta necessary to lookup the +// Secret containing PGO configuration. +func OperatorConfigurationSecret() metav1.ObjectMeta { + return metav1.ObjectMeta{ + Namespace: config.PGONamespace(), + Name: "pgo-config", + } +} + // ReplicationClientCertSecret returns ObjectMeta necessary to lookup the Secret // containing the Patroni client authentication certificate information. func ReplicationClientCertSecret(cluster *v1beta1.PostgresCluster) metav1.ObjectMeta { @@ -360,7 +465,7 @@ func PGBackRestBackupJob(cluster *v1beta1.PostgresCluster) metav1.ObjectMeta { func PGBackRestCronJob(cluster *v1beta1.PostgresCluster, backuptype, repoName string) metav1.ObjectMeta { return metav1.ObjectMeta{ Namespace: cluster.GetNamespace(), - Name: cluster.Name + "-pgbackrest-" + repoName + "-" + backuptype, + Name: cluster.Name + "-" + repoName + "-" + backuptype, } } @@ -391,6 +496,8 @@ func PGBackRestRepoVolume(cluster *v1beta1.PostgresCluster, } // PGBackRestSSHConfig returns the ObjectMeta for a pgBackRest SSHD ConfigMap +// Deprecated: Repository hosts use mTLS for encryption, authentication, and authorization. +// TODO(tjmoore4): Once we no longer need this for cleanup purposes, this should be removed. func PGBackRestSSHConfig(cluster *v1beta1.PostgresCluster) metav1.ObjectMeta { return metav1.ObjectMeta{ Name: fmt.Sprintf(sshCMNameSuffix, cluster.GetName()), @@ -399,6 +506,8 @@ func PGBackRestSSHConfig(cluster *v1beta1.PostgresCluster) metav1.ObjectMeta { } // PGBackRestSSHSecret returns the ObjectMeta for a pgBackRest SSHD Secret +// Deprecated: Repository hosts use mTLS for encryption, authentication, and authorization. +// TODO(tjmoore4): Once we no longer need this for cleanup purposes, this should be removed. func PGBackRestSSHSecret(cluster *v1beta1.PostgresCluster) metav1.ObjectMeta { return metav1.ObjectMeta{ Name: fmt.Sprintf(sshSecretNameSuffix, cluster.GetName()), @@ -406,6 +515,14 @@ func PGBackRestSSHSecret(cluster *v1beta1.PostgresCluster) metav1.ObjectMeta { } } +// PGBackRestSecret returns the ObjectMeta for a pgBackRest Secret +func PGBackRestSecret(cluster *v1beta1.PostgresCluster) metav1.ObjectMeta { + return metav1.ObjectMeta{ + Name: cluster.GetName() + "-pgbackrest", + Namespace: cluster.GetNamespace(), + } +} + // DeprecatedPostgresUserSecret returns the ObjectMeta necessary to lookup the // old Secret containing the default Postgres user and connection information. // Use PostgresUserSecret instead. @@ -433,3 +550,44 @@ func PostgresTLSSecret(cluster *v1beta1.PostgresCluster) metav1.ObjectMeta { Name: cluster.Name + "-cluster-cert", } } + +// MovePGDataDirJob returns the ObjectMeta for a pgData directory move Job +func MovePGDataDirJob(cluster *v1beta1.PostgresCluster) metav1.ObjectMeta { + return metav1.ObjectMeta{ + Namespace: cluster.GetNamespace(), + Name: cluster.Name + "-move-pgdata-dir", + } +} + +// MovePGWALDirJob returns the ObjectMeta for a pg_wal directory move Job +func MovePGWALDirJob(cluster *v1beta1.PostgresCluster) metav1.ObjectMeta { + return metav1.ObjectMeta{ + Namespace: cluster.GetNamespace(), + Name: cluster.Name + "-move-pgwal-dir", + } +} + +// MovePGBackRestRepoDirJob returns the ObjectMeta for a pgBackRest repo directory move Job +func MovePGBackRestRepoDirJob(cluster *v1beta1.PostgresCluster) metav1.ObjectMeta { + return metav1.ObjectMeta{ + Namespace: cluster.GetNamespace(), + Name: cluster.Name + "-move-pgbackrest-repo-dir", + } +} + +// StandalonePGAdmin returns the ObjectMeta necessary to lookup the ConfigMap, +// Service, StatefulSet, or Volume for the cluster's pgAdmin user interface. +func StandalonePGAdmin(pgadmin *v1beta1.PGAdmin) metav1.ObjectMeta { + return metav1.ObjectMeta{ + Namespace: pgadmin.Namespace, + Name: fmt.Sprintf("pgadmin-%s", pgadmin.UID), + } +} + +// UpgradeCheckConfigMap returns the ObjectMeta for the PGO ConfigMap +func UpgradeCheckConfigMap() metav1.ObjectMeta { + return metav1.ObjectMeta{ + Namespace: config.PGONamespace(), + Name: "pgo-upgrade-check", + } +} diff --git a/internal/naming/names_test.go b/internal/naming/names_test.go index 809582b8fe..27835c3e5d 100644 --- a/internal/naming/names_test.go +++ b/internal/naming/names_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package naming @@ -45,6 +34,10 @@ func TestContainerNamesUniqueAndValid(t *testing.T) { for _, name := range []string{ ContainerDatabase, ContainerNSSWrapperInit, + ContainerPGAdmin, + ContainerPGAdminStartup, + ContainerPGBackRestConfig, + ContainerPGBackRestLogDirInit, ContainerPGBouncer, ContainerPGBouncerConfig, ContainerPostgresStartup, @@ -63,14 +56,17 @@ func TestClusterNamesUniqueAndValid(t *testing.T) { }, } repoName := "hippo-repo" + instanceSet := &v1beta1.PostgresInstanceSetSpec{ + Name: "set-1", + } type test struct { name string value metav1.ObjectMeta } - testUniqueAndValid := func(t *testing.T, tests []test) sets.String { - names := sets.NewString() + testUniqueAndValid := func(t *testing.T, tests []test) sets.Set[string] { + names := sets.Set[string]{} for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { assert.Equal(t, tt.value.Namespace, cluster.Namespace) @@ -86,6 +82,7 @@ func TestClusterNamesUniqueAndValid(t *testing.T) { t.Run("ConfigMaps", func(t *testing.T) { testUniqueAndValid(t, []test{ {"ClusterConfigMap", ClusterConfigMap(cluster)}, + {"ClusterPGAdmin", ClusterPGAdmin(cluster)}, {"ClusterPGBouncer", ClusterPGBouncer(cluster)}, {"PatroniDistributedConfiguration", PatroniDistributedConfiguration(cluster)}, {"PatroniLeaderConfigMap", PatroniLeaderConfigMap(cluster)}, @@ -117,6 +114,13 @@ func TestClusterNamesUniqueAndValid(t *testing.T) { }) }) + t.Run("PodDisruptionBudgets", func(t *testing.T) { + testUniqueAndValid(t, []test{ + {"InstanceSetPDB", InstanceSet(cluster, instanceSet)}, + {"PGBouncerPDB", ClusterPGBouncer(cluster)}, + }) + }) + t.Run("RoleBindings", func(t *testing.T) { testUniqueAndValid(t, []test{ {"ClusterInstanceRBAC", ClusterInstanceRBAC(cluster)}, @@ -141,6 +145,13 @@ func TestClusterNamesUniqueAndValid(t *testing.T) { {"MonitoringUserSecret", MonitoringUserSecret(cluster)}, }) + // NOTE: This does not fail when a conflict is introduced. When adding a + // Secret, be sure to compare it to the function below. + t.Run("OperatorConfiguration", func(t *testing.T) { + other := OperatorConfigurationSecret().Name + assert.Assert(t, !names.Has(other), "%q defined already", other) + }) + t.Run("PostgresUserSecret", func(t *testing.T) { value := PostgresUserSecret(cluster, "some-user") @@ -148,7 +159,7 @@ func TestClusterNamesUniqueAndValid(t *testing.T) { assert.Assert(t, nil == validation.IsDNS1123Label(value.Name)) prefix := PostgresUserSecret(cluster, "").Name - for _, name := range names.List() { + for _, name := range sets.List(names) { assert.Assert(t, !strings.HasPrefix(name, prefix), "%q may collide", name) } }) @@ -164,6 +175,7 @@ func TestClusterNamesUniqueAndValid(t *testing.T) { t.Run("Services", func(t *testing.T) { testUniqueAndValid(t, []test{ {"ClusterPGBouncer", ClusterPGBouncer(cluster)}, + {"ClusterPGAdmin", ClusterPGAdmin(cluster)}, {"ClusterPodService", ClusterPodService(cluster)}, {"ClusterPrimaryService", ClusterPrimaryService(cluster)}, {"ClusterReplicaService", ClusterReplicaService(cluster)}, @@ -174,11 +186,24 @@ func TestClusterNamesUniqueAndValid(t *testing.T) { }) }) + t.Run("StatefulSets", func(t *testing.T) { + testUniqueAndValid(t, []test{ + {"ClusterPGAdmin", ClusterPGAdmin(cluster)}, + }) + }) + t.Run("Volumes", func(t *testing.T) { testUniqueAndValid(t, []test{ + {"ClusterPGAdmin", ClusterPGAdmin(cluster)}, {"PGBackRestRepoVolume", PGBackRestRepoVolume(cluster, repoName)}, }) }) + + t.Run("VolumeSnapshots", func(t *testing.T) { + testUniqueAndValid(t, []test{ + {"ClusterVolumeSnapshot", ClusterVolumeSnapshot(cluster)}, + }) + }) } func TestInstanceNamesUniqueAndValid(t *testing.T) { @@ -274,3 +299,29 @@ func TestGenerateStartupInstance(t *testing.T) { assert.DeepEqual(t, instanceOne, instanceTwo) } + +func TestOperatorConfigurationSecret(t *testing.T) { + t.Setenv("PGO_NAMESPACE", "cheese") + + value := OperatorConfigurationSecret() + assert.Equal(t, value.Namespace, "cheese") + assert.Assert(t, nil == validation.IsDNS1123Label(value.Name)) +} + +func TestPortNamesUniqueAndValid(t *testing.T) { + // Port names have to be unique within a Pod. The number of ports we employ + // should be few enough that we can name them uniquely across all pods. + // - https://docs.k8s.io/reference/kubernetes-api/workload-resources/pod-v1/#ports + + names := sets.NewString() + for _, name := range []string{ + PortExporter, + PortPGAdmin, + PortPGBouncer, + PortPostgreSQL, + } { + assert.Assert(t, !names.Has(name), "%q defined already", name) + assert.Assert(t, nil == validation.IsValidPortName(name)) + names.Insert(name) + } +} diff --git a/internal/naming/selectors.go b/internal/naming/selectors.go index 8070d4c5dd..94dbc3a9fa 100644 --- a/internal/naming/selectors.go +++ b/internal/naming/selectors.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package naming @@ -46,6 +35,45 @@ func Cluster(cluster string) metav1.LabelSelector { } } +// ClusterRestoreJobs selects all existing restore jobs in a cluster. +func ClusterRestoreJobs(cluster string) metav1.LabelSelector { + return metav1.LabelSelector{ + MatchLabels: map[string]string{ + LabelCluster: cluster, + }, + MatchExpressions: []metav1.LabelSelectorRequirement{ + {Key: LabelPGBackRestRestore, Operator: metav1.LabelSelectorOpExists}, + }, + } +} + +// ClusterBackupJobs selects things for all existing backup jobs in cluster. +func ClusterBackupJobs(cluster string) metav1.LabelSelector { + return metav1.LabelSelector{ + MatchLabels: map[string]string{ + LabelCluster: cluster, + }, + MatchExpressions: []metav1.LabelSelectorRequirement{ + {Key: LabelPGBackRestBackup, Operator: metav1.LabelSelectorOpExists}, + }, + } +} + +// ClusterDataForPostgresAndPGBackRest selects things for PostgreSQL data and +// things for pgBackRest data. +func ClusterDataForPostgresAndPGBackRest(cluster string) metav1.LabelSelector { + return metav1.LabelSelector{ + MatchLabels: map[string]string{ + LabelCluster: cluster, + }, + MatchExpressions: []metav1.LabelSelectorRequirement{{ + Key: LabelData, + Operator: metav1.LabelSelectorOpIn, + Values: []string{DataPostgres, DataPGBackRest}, + }}, + } +} + // ClusterInstance selects things for a single instance in a cluster. func ClusterInstance(cluster, instance string) metav1.LabelSelector { return metav1.LabelSelector{ @@ -78,6 +106,18 @@ func ClusterInstanceSet(cluster, set string) metav1.LabelSelector { } } +// ClusterInstanceSets selects things for sets in a cluster. +func ClusterInstanceSets(cluster string) metav1.LabelSelector { + return metav1.LabelSelector{ + MatchLabels: map[string]string{ + LabelCluster: cluster, + }, + MatchExpressions: []metav1.LabelSelectorRequirement{ + {Key: LabelInstanceSet, Operator: metav1.LabelSelectorOpExists}, + }, + } +} + // ClusterPatronis selects things labeled for Patroni in cluster. func ClusterPatronis(cluster *v1beta1.PostgresCluster) metav1.LabelSelector { return metav1.LabelSelector{ @@ -88,6 +128,16 @@ func ClusterPatronis(cluster *v1beta1.PostgresCluster) metav1.LabelSelector { } } +// ClusterPGBouncerSelector selects things labeled for PGBouncer in cluster. +func ClusterPGBouncerSelector(cluster *v1beta1.PostgresCluster) metav1.LabelSelector { + return metav1.LabelSelector{ + MatchLabels: map[string]string{ + LabelCluster: cluster.Name, + LabelRole: RolePGBouncer, + }, + } +} + // ClusterPostgresUsers selects things labeled for PostgreSQL users in cluster. func ClusterPostgresUsers(cluster string) metav1.LabelSelector { return metav1.LabelSelector{ @@ -102,9 +152,13 @@ func ClusterPostgresUsers(cluster string) metav1.LabelSelector { } } -// ClusterPrimary selects things for the Primary PostgreSQL instance. -func ClusterPrimary(cluster string) metav1.LabelSelector { - s := ClusterInstances(cluster) - s.MatchLabels[LabelRole] = RolePatroniLeader - return s +// CrunchyBridgeClusterPostgresRoles selects things labeled for CrunchyBridgeCluster +// PostgreSQL roles in cluster. +func CrunchyBridgeClusterPostgresRoles(clusterName string) metav1.LabelSelector { + return metav1.LabelSelector{ + MatchLabels: map[string]string{ + LabelCluster: clusterName, + LabelRole: RoleCrunchyBridgeClusterPostgresRole, + }, + } } diff --git a/internal/naming/selectors_test.go b/internal/naming/selectors_test.go index ff15f0e389..1f5f42ad96 100644 --- a/internal/naming/selectors_test.go +++ b/internal/naming/selectors_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package naming @@ -40,7 +29,31 @@ func TestCluster(t *testing.T) { }, ",")) _, err = AsSelector(Cluster("--whoa/yikes")) - assert.ErrorContains(t, err, "invalid") + assert.ErrorContains(t, err, "Invalid") +} + +func TestClusterBackupJobs(t *testing.T) { + s, err := AsSelector(ClusterBackupJobs("something")) + assert.NilError(t, err) + assert.DeepEqual(t, s.String(), strings.Join([]string{ + "postgres-operator.crunchydata.com/cluster=something", + "postgres-operator.crunchydata.com/pgbackrest-backup", + }, ",")) + + _, err = AsSelector(Cluster("--whoa/yikes")) + assert.ErrorContains(t, err, "Invalid") +} + +func TestClusterDataForPostgresAndPGBackRest(t *testing.T) { + s, err := AsSelector(ClusterDataForPostgresAndPGBackRest("something")) + assert.NilError(t, err) + assert.DeepEqual(t, s.String(), strings.Join([]string{ + "postgres-operator.crunchydata.com/cluster=something", + "postgres-operator.crunchydata.com/data in (pgbackrest,postgres)", + }, ",")) + + _, err = AsSelector(ClusterDataForPostgresAndPGBackRest("--whoa/yikes")) + assert.ErrorContains(t, err, "Invalid") } func TestClusterInstance(t *testing.T) { @@ -52,7 +65,7 @@ func TestClusterInstance(t *testing.T) { }, ",")) _, err = AsSelector(ClusterInstance("--whoa/son", "--whoa/yikes")) - assert.ErrorContains(t, err, "invalid") + assert.ErrorContains(t, err, "Invalid") } func TestClusterInstances(t *testing.T) { @@ -64,7 +77,7 @@ func TestClusterInstances(t *testing.T) { }, ",")) _, err = AsSelector(ClusterInstances("--whoa/yikes")) - assert.ErrorContains(t, err, "invalid") + assert.ErrorContains(t, err, "Invalid") } func TestClusterInstanceSet(t *testing.T) { @@ -76,7 +89,19 @@ func TestClusterInstanceSet(t *testing.T) { }, ",")) _, err = AsSelector(ClusterInstanceSet("--whoa/yikes", "ok")) - assert.ErrorContains(t, err, "invalid") + assert.ErrorContains(t, err, "Invalid") +} + +func TestClusterInstanceSets(t *testing.T) { + s, err := AsSelector(ClusterInstanceSets("something")) + assert.NilError(t, err) + assert.DeepEqual(t, s.String(), strings.Join([]string{ + "postgres-operator.crunchydata.com/cluster=something", + "postgres-operator.crunchydata.com/instance-set", + }, ",")) + + _, err = AsSelector(ClusterInstanceSets("--whoa/yikes")) + assert.ErrorContains(t, err, "Invalid") } func TestClusterPatronis(t *testing.T) { @@ -92,7 +117,23 @@ func TestClusterPatronis(t *testing.T) { cluster.Name = "--nope--" _, err = AsSelector(ClusterPatronis(cluster)) - assert.ErrorContains(t, err, "invalid") + assert.ErrorContains(t, err, "Invalid") +} + +func TestClusterPGBouncerSelector(t *testing.T) { + cluster := &v1beta1.PostgresCluster{} + cluster.Name = "something" + + s, err := AsSelector(ClusterPGBouncerSelector(cluster)) + assert.NilError(t, err) + assert.DeepEqual(t, s.String(), strings.Join([]string{ + "postgres-operator.crunchydata.com/cluster=something", + "postgres-operator.crunchydata.com/role=pgbouncer", + }, ",")) + + cluster.Name = "--bad--dog" + _, err = AsSelector(ClusterPGBouncerSelector(cluster)) + assert.ErrorContains(t, err, "Invalid") } func TestClusterPostgresUsers(t *testing.T) { @@ -104,15 +145,17 @@ func TestClusterPostgresUsers(t *testing.T) { }, ",")) _, err = AsSelector(ClusterPostgresUsers("--nope--")) - assert.ErrorContains(t, err, "invalid") + assert.ErrorContains(t, err, "Invalid") } -func TestClusterPrimary(t *testing.T) { - s, err := AsSelector(ClusterPrimary("something")) +func TestCrunchyBridgeClusterPostgresRoles(t *testing.T) { + s, err := AsSelector(CrunchyBridgeClusterPostgresRoles("something")) assert.NilError(t, err) assert.DeepEqual(t, s.String(), strings.Join([]string{ "postgres-operator.crunchydata.com/cluster=something", - "postgres-operator.crunchydata.com/instance", - "postgres-operator.crunchydata.com/role=master", + "postgres-operator.crunchydata.com/role=cbc-pgrole", }, ",")) + + _, err = AsSelector(CrunchyBridgeClusterPostgresRoles("--nope--")) + assert.ErrorContains(t, err, "Invalid") } diff --git a/internal/naming/telemetry.go b/internal/naming/telemetry.go index 4a3d33ad30..5825d6299f 100644 --- a/internal/naming/telemetry.go +++ b/internal/naming/telemetry.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package naming diff --git a/internal/patroni/api.go b/internal/patroni/api.go index 35c7c14225..679da5f4af 100644 --- a/internal/patroni/api.go +++ b/internal/patroni/api.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package patroni @@ -19,6 +8,7 @@ import ( "bytes" "context" "encoding/json" + "errors" "io" "strings" @@ -33,7 +23,7 @@ type API interface { ChangePrimaryAndWait(ctx context.Context, current, next string) (bool, error) // ReplaceConfiguration replaces Patroni's entire dynamic configuration. - ReplaceConfiguration(ctx context.Context, configuration map[string]interface{}) error + ReplaceConfiguration(ctx context.Context, configuration map[string]any) error } // Executor implements API by calling "patronictl". @@ -47,7 +37,7 @@ var _ API = Executor(nil) // ChangePrimaryAndWait tries to demote the current Patroni leader by calling // "patronictl". It returns true when an election completes successfully. It // waits up to two "loop_wait" or until an error occurs. When Patroni is paused, -// next cannot be blank. +// next cannot be blank. Similar to the "POST /switchover" REST endpoint. func (exec Executor) ChangePrimaryAndWait( ctx context.Context, current, next string, ) (bool, error) { @@ -67,13 +57,76 @@ func (exec Executor) ChangePrimaryAndWait( // HTTP API. It exits zero even when the API says switchover did not occur. // Check for the text that indicates success. // - https://github.com/zalando/patroni/blob/v2.0.2/patroni/api.py#L351-L367 + // - https://github.com/zalando/patroni/blob/v2.1.1/patroni/api.py#L461-L477 + return strings.Contains(stdout.String(), "switched over"), err +} + +// SwitchoverAndWait tries to change the current Patroni leader by calling +// "patronictl". It returns true when an election completes successfully. It +// waits up to two "loop_wait" or until an error occurs. When Patroni is paused, +// next cannot be blank. Similar to the "POST /switchover" REST endpoint. +// The "patronictl switchover" variant does not require the current master to be passed +// as a flag. +func (exec Executor) SwitchoverAndWait( + ctx context.Context, target string, +) (bool, error) { + var stdout, stderr bytes.Buffer + + err := exec(ctx, nil, &stdout, &stderr, + "patronictl", "switchover", "--scheduled=now", "--force", + "--candidate="+target) + + log := logging.FromContext(ctx) + log.V(1).Info("changed primary", + "stdout", stdout.String(), + "stderr", stderr.String(), + ) + + // The command exits zero when it is able to communicate with the Patroni + // HTTP API. It exits zero even when the API says switchover did not occur. + // Check for the text that indicates success. + // - https://github.com/zalando/patroni/blob/v2.0.2/patroni/api.py#L351-L367 + // Patroni has an edge case where it could switchover to an instance other + // than the requested candidate. In this case, stdout will contain + // "Switched over" instead of "switched over" and return false, nil return strings.Contains(stdout.String(), "switched over"), err } +// FailoverAndWait tries to change the current Patroni leader by calling +// "patronictl". It returns true when an election completes successfully. It +// waits up to two "loop_wait" or until an error occurs. When Patroni is paused, +// next cannot be blank. Similar to the "POST /switchover" REST endpoint. +// The "patronictl failover" variant does not require the current master to be passed +// as a flag. +func (exec Executor) FailoverAndWait( + ctx context.Context, target string, +) (bool, error) { + var stdout, stderr bytes.Buffer + + err := exec(ctx, nil, &stdout, &stderr, + "patronictl", "failover", "--force", + "--candidate="+target) + + log := logging.FromContext(ctx) + log.V(1).Info("changed primary", + "stdout", stdout.String(), + "stderr", stderr.String(), + ) + + // The command exits zero when it is able to communicate with the Patroni + // HTTP API. It exits zero even when the API says failover did not occur. + // Check for the text that indicates success. + // - https://github.com/zalando/patroni/blob/v2.0.2/patroni/api.py#L351-L367 + // Patroni has an edge case where it could failover to an instance other + // than the requested candidate. In this case, stdout will contain "Failed over" + // instead of "failed over" and return false, nil + return strings.Contains(stdout.String(), "failed over"), err +} + // ReplaceConfiguration replaces Patroni's entire dynamic configuration by -// calling "patronictl". +// calling "patronictl". Similar to the "POST /switchover" REST endpoint. func (exec Executor) ReplaceConfiguration( - ctx context.Context, configuration map[string]interface{}, + ctx context.Context, configuration map[string]any, ) error { var stdin, stdout, stderr bytes.Buffer @@ -91,3 +144,65 @@ func (exec Executor) ReplaceConfiguration( return err } + +// RestartPendingMembers looks up Patroni members with role in scope and restarts +// those that have a pending restart. +func (exec Executor) RestartPendingMembers(ctx context.Context, role, scope string) error { + var stdout, stderr bytes.Buffer + + // The following exits zero when it is able to read the DCS and communicate + // with the Patroni HTTP API. It prints the result of calling "POST /restart" + // on each member found with the desired role. The "Failed … 503 … restart + // conditions are not satisfied" message is normal and means that a particular + // member has already restarted. + // - https://github.com/zalando/patroni/blob/v2.1.1/patroni/ctl.py#L580-L596 + err := exec(ctx, nil, &stdout, &stderr, + "patronictl", "restart", "--pending", "--force", "--role="+role, scope) + + log := logging.FromContext(ctx) + log.V(1).Info("restarted members", + "stdout", stdout.String(), + "stderr", stderr.String(), + ) + + return err +} + +// GetTimeline gets the patronictl status and returns the timeline, +// currently the only information required by PGO. +// Returns zero if it runs into errors or cannot find a running Leader pod +// to get the up-to-date timeline from. +func (exec Executor) GetTimeline(ctx context.Context) (int64, error) { + var stdout, stderr bytes.Buffer + + // The following exits zero when it is able to read the DCS and communicate + // with the Patroni HTTP API. It prints the result of calling "GET /cluster" + // - https://github.com/zalando/patroni/blob/v2.1.1/patroni/ctl.py#L849 + err := exec(ctx, nil, &stdout, &stderr, + "patronictl", "list", "--format", "json") + if err != nil { + return 0, err + } + + if stderr.String() != "" { + return 0, errors.New(stderr.String()) + } + + var members []struct { + Role string `json:"Role"` + State string `json:"State"` + Timeline int64 `json:"TL"` + } + err = json.Unmarshal(stdout.Bytes(), &members) + if err != nil { + return 0, err + } + + for _, member := range members { + if member.Role == "Leader" && member.State == "running" { + return member.Timeline, nil + } + } + + return 0, err +} diff --git a/internal/patroni/api_test.go b/internal/patroni/api_test.go index 2194675f39..1603d2fc75 100644 --- a/internal/patroni/api_test.go +++ b/internal/patroni/api_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package patroni @@ -32,7 +21,7 @@ func ExampleExecutor_execCmd() { _ = Executor(func( ctx context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, ) error { - // #nosec G204 Executor only calls `patronictl`. + // #nosec G204 Nothing calls the function defined in this example. cmd := exec.CommandContext(ctx, command[0], command[1:]...) cmd.Stdin, cmd.Stdout, cmd.Stderr = stdin, stdout, stderr return cmd.Run() @@ -91,6 +80,110 @@ func TestExecutorChangePrimaryAndWait(t *testing.T) { }) } +func TestExecutorSwitchoverAndWait(t *testing.T) { + t.Run("Arguments", func(t *testing.T) { + called := false + exec := func( + _ context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, + ) error { + called = true + assert.DeepEqual(t, command, strings.Fields( + `patronictl switchover --scheduled=now --force --candidate=new`, + )) + assert.Assert(t, stdin == nil, "expected no stdin, got %T", stdin) + assert.Assert(t, stderr != nil, "should capture stderr") + assert.Assert(t, stdout != nil, "should capture stdout") + return nil + } + + _, _ = Executor(exec).SwitchoverAndWait(context.Background(), "new") + assert.Assert(t, called) + }) + + t.Run("Error", func(t *testing.T) { + expected := errors.New("bang") + _, actual := Executor(func( + context.Context, io.Reader, io.Writer, io.Writer, ...string, + ) error { + return expected + }).SwitchoverAndWait(context.Background(), "next") + + assert.Equal(t, expected, actual) + }) + + t.Run("Result", func(t *testing.T) { + success, _ := Executor(func( + _ context.Context, _ io.Reader, stdout, _ io.Writer, _ ...string, + ) error { + _, _ = stdout.Write([]byte(`no luck`)) + return nil + }).SwitchoverAndWait(context.Background(), "next") + + assert.Assert(t, !success, "expected failure message to become false") + + success, _ = Executor(func( + _ context.Context, _ io.Reader, stdout, _ io.Writer, _ ...string, + ) error { + _, _ = stdout.Write([]byte(`Successfully switched over to something`)) + return nil + }).SwitchoverAndWait(context.Background(), "next") + + assert.Assert(t, success, "expected success message to become true") + }) +} + +func TestExecutorFailoverAndWait(t *testing.T) { + t.Run("Arguments", func(t *testing.T) { + called := false + exec := func( + _ context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, + ) error { + called = true + assert.DeepEqual(t, command, strings.Fields( + `patronictl failover --force --candidate=new`, + )) + assert.Assert(t, stdin == nil, "expected no stdin, got %T", stdin) + assert.Assert(t, stderr != nil, "should capture stderr") + assert.Assert(t, stdout != nil, "should capture stdout") + return nil + } + + _, _ = Executor(exec).FailoverAndWait(context.Background(), "new") + assert.Assert(t, called) + }) + + t.Run("Error", func(t *testing.T) { + expected := errors.New("bang") + _, actual := Executor(func( + context.Context, io.Reader, io.Writer, io.Writer, ...string, + ) error { + return expected + }).FailoverAndWait(context.Background(), "next") + + assert.Equal(t, expected, actual) + }) + + t.Run("Result", func(t *testing.T) { + success, _ := Executor(func( + _ context.Context, _ io.Reader, stdout, _ io.Writer, _ ...string, + ) error { + _, _ = stdout.Write([]byte(`no luck`)) + return nil + }).FailoverAndWait(context.Background(), "next") + + assert.Assert(t, !success, "expected failure message to become false") + + success, _ = Executor(func( + _ context.Context, _ io.Reader, stdout, _ io.Writer, _ ...string, + ) error { + _, _ = stdout.Write([]byte(`Successfully failed over to something`)) + return nil + }).FailoverAndWait(context.Background(), "next") + + assert.Assert(t, success, "expected success message to become true") + }) +} + func TestExecutorReplaceConfiguration(t *testing.T) { expected := errors.New("bang") exec := func( @@ -108,7 +201,89 @@ func TestExecutorReplaceConfiguration(t *testing.T) { } actual := Executor(exec).ReplaceConfiguration( - context.Background(), map[string]interface{}{"some": "values"}) + context.Background(), map[string]any{"some": "values"}) + + assert.Equal(t, expected, actual, "should call exec") +} + +func TestExecutorRestartPendingMembers(t *testing.T) { + expected := errors.New("oop") + exec := func( + _ context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, + ) error { + assert.DeepEqual(t, command, strings.Fields( + `patronictl restart --pending --force --role=sock-role shoe-scope`, + )) + assert.Assert(t, stdin == nil, "expected no stdin, got %T", stdin) + assert.Assert(t, stderr != nil, "should capture stderr") + assert.Assert(t, stdout != nil, "should capture stdout") + return expected + } + + actual := Executor(exec).RestartPendingMembers( + context.Background(), "sock-role", "shoe-scope") assert.Equal(t, expected, actual, "should call exec") } + +func TestExecutorGetTimeline(t *testing.T) { + t.Run("Error", func(t *testing.T) { + expected := errors.New("bang") + tl, actual := Executor(func( + context.Context, io.Reader, io.Writer, io.Writer, ...string, + ) error { + return expected + }).GetTimeline(context.Background()) + + assert.Equal(t, expected, actual) + assert.Equal(t, tl, int64(0)) + }) + + t.Run("Stderr", func(t *testing.T) { + tl, actual := Executor(func( + _ context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, + ) error { + stderr.Write([]byte(`no luck`)) + return nil + }).GetTimeline(context.Background()) + + assert.Error(t, actual, "no luck") + assert.Equal(t, tl, int64(0)) + }) + + t.Run("BadJSON", func(t *testing.T) { + tl, actual := Executor(func( + _ context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, + ) error { + stdout.Write([]byte(`no luck`)) + return nil + }).GetTimeline(context.Background()) + + assert.Error(t, actual, "invalid character 'o' in literal null (expecting 'u')") + assert.Equal(t, tl, int64(0)) + }) + + t.Run("NoLeader", func(t *testing.T) { + tl, actual := Executor(func( + _ context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, + ) error { + stdout.Write([]byte(`[{"Cluster": "hippo-ha", "Member": "hippo-instance1-ltcf-0", "Host": "hippo-instance1-ltcf-0.hippo-pods", "Role": "Replica", "State": "running", "TL": 4, "Lag in MB": 0}]`)) + return nil + }).GetTimeline(context.Background()) + + assert.NilError(t, actual) + assert.Equal(t, tl, int64(0)) + }) + + t.Run("Success", func(t *testing.T) { + tl, actual := Executor(func( + _ context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, + ) error { + stdout.Write([]byte(`[{"Cluster": "hippo-ha", "Member": "hippo-instance1-67mc-0", "Host": "hippo-instance1-67mc-0.hippo-pods", "Role": "Leader", "State": "running", "TL": 4}, {"Cluster": "hippo-ha", "Member": "hippo-instance1-ltcf-0", "Host": "hippo-instance1-ltcf-0.hippo-pods", "Role": "Replica", "State": "running", "TL": 4, "Lag in MB": 0}]`)) + return nil + }).GetTimeline(context.Background()) + + assert.NilError(t, actual) + assert.Equal(t, tl, int64(4)) + }) +} diff --git a/internal/patroni/assertions_test.go b/internal/patroni/assertions_test.go deleted file mode 100644 index 00abb65073..0000000000 --- a/internal/patroni/assertions_test.go +++ /dev/null @@ -1,48 +0,0 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package patroni - -import ( - "strings" - - "gotest.tools/v3/assert/cmp" - "sigs.k8s.io/yaml" -) - -func marshalContains(actual interface{}, expected string) cmp.Comparison { - b, err := yaml.Marshal(actual) - return func() cmp.Result { - if err != nil { - return cmp.ResultFromError(err) - } - - if !strings.Contains(string(b), expected) { - return cmp.DeepEqual(string(b), expected)() - } - - return cmp.ResultSuccess - } -} - -func marshalEquals(actual interface{}, expected string) cmp.Comparison { - b, err := yaml.Marshal(actual) - return func() cmp.Result { - if err != nil { - return cmp.ResultFromError(err) - } - return cmp.DeepEqual(string(b), expected)() - } -} diff --git a/internal/patroni/certificates.go b/internal/patroni/certificates.go index 0445c29281..9aa1525769 100644 --- a/internal/patroni/certificates.go +++ b/internal/patroni/certificates.go @@ -1,24 +1,13 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package patroni import ( - v1 "k8s.io/api/core/v1" + "encoding" - "github.com/crunchydata/postgres-operator/internal/pki" + corev1 "k8s.io/api/core/v1" ) const ( @@ -29,12 +18,12 @@ const ( certServerFileKey = "patroni.crt-combined" ) -// certAuthorities encodes roots in a format suitable for Patroni's TLS verification. -func certAuthorities(roots ...*pki.Certificate) ([]byte, error) { +// certFile concatenates the results of multiple PEM-encoding marshalers. +func certFile(texts ...encoding.TextMarshaler) ([]byte, error) { var out []byte - for i := range roots { - if b, err := roots[i].MarshalText(); err == nil { + for i := range texts { + if b, err := texts[i].MarshalText(); err == nil { out = append(out, b...) } else { return nil, err @@ -44,35 +33,15 @@ func certAuthorities(roots ...*pki.Certificate) ([]byte, error) { return out, nil } -// certFile encodes cert and key as a combination suitable for -// Patroni's TLS identification. It can be used by both the client and the server. -func certFile(key *pki.PrivateKey, cert *pki.Certificate) ([]byte, error) { - var out []byte - - if b, err := key.MarshalText(); err == nil { - out = append(out, b...) - } else { - return nil, err - } - - if b, err := cert.MarshalText(); err == nil { - out = append(out, b...) - } else { - return nil, err - } - - return out, nil -} - // instanceCertificates returns projections of Patroni's CAs, keys, and // certificates to include in the instance configuration volume. -func instanceCertificates(certificates *v1.Secret) []v1.VolumeProjection { - return []v1.VolumeProjection{{ - Secret: &v1.SecretProjection{ - LocalObjectReference: v1.LocalObjectReference{ +func instanceCertificates(certificates *corev1.Secret) []corev1.VolumeProjection { + return []corev1.VolumeProjection{{ + Secret: &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{ Name: certificates.Name, }, - Items: []v1.KeyToPath{ + Items: []corev1.KeyToPath{ { Key: certAuthorityFileKey, Path: certAuthorityConfigPath, diff --git a/internal/patroni/certificates.md b/internal/patroni/certificates.md index d1a4aee0f4..f58786ce20 100644 --- a/internal/patroni/certificates.md +++ b/internal/patroni/certificates.md @@ -1,16 +1,7 @@ Server diff --git a/internal/patroni/certificates_test.go b/internal/patroni/certificates_test.go index dfd41d420f..3073f2247f 100644 --- a/internal/patroni/certificates_test.go +++ b/internal/patroni/certificates_test.go @@ -1,87 +1,44 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package patroni import ( - "strings" + "errors" "testing" "gotest.tools/v3/assert" - "gotest.tools/v3/assert/cmp" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" - "github.com/crunchydata/postgres-operator/internal/pki" + "github.com/crunchydata/postgres-operator/internal/testing/cmp" ) -const rootPEM = `-----BEGIN CERTIFICATE----- -MIIBgTCCASigAwIBAgIRAO0NXdQ5ZtvI26doDvj9Dx8wCgYIKoZIzj0EAwMwHzEd -MBsGA1UEAxMUcG9zdGdyZXMtb3BlcmF0b3ItY2EwHhcNMjEwMTI3MjEyNTU0WhcN -MzEwMTI1MjIyNTU0WjAfMR0wGwYDVQQDExRwb3N0Z3Jlcy1vcGVyYXRvci1jYTBZ -MBMGByqGSM49AgEGCCqGSM49AwEHA0IABL0xD8B6ZQHPscklofw2hpEN1F8h06Ys -IRhK2xoy8ASkiKOkzXVs22R/Wnv/+jAMVf9rit0vhblZlvn2yP7e29WjRTBDMA4G -A1UdDwEB/wQEAwIBBjASBgNVHRMBAf8ECDAGAQH/AgECMB0GA1UdDgQWBBQjfqdS -Ynr3rFHMLd3fHO79tH3w5DAKBggqhkjOPQQDAwNHADBEAiA41LbQXeC0G/AyOHgs -gaUp3fzHKSsrTGhzA8+dK2mnSgIgEKnv1FquJBJuXRBAxzrmnt0nJPiTWB926iNE -BY8V4Ag= ------END CERTIFICATE-----` - -func TestCertAuthorities(t *testing.T) { - root, err := pki.ParseCertificate([]byte(rootPEM)) - assert.NilError(t, err) - - data, err := certAuthorities(root) - assert.NilError(t, err) +type funcMarshaler func() ([]byte, error) - // PEM-encoded certificates. - assert.DeepEqual(t, string(data), rootPEM+"\n") -} +func (f funcMarshaler) MarshalText() ([]byte, error) { return f() } func TestCertFile(t *testing.T) { - root := pki.NewRootCertificateAuthority() - assert.NilError(t, root.Generate()) - - instance := pki.NewLeafCertificate("instance.pod-dns", nil, nil) - assert.NilError(t, instance.Generate(root)) + expected := errors.New("boom") + var short funcMarshaler = func() ([]byte, error) { return []byte(`one`), nil } + var fail funcMarshaler = func() ([]byte, error) { return nil, expected } - data, err := certFile(instance.PrivateKey, instance.Certificate) + text, err := certFile(short, short, short) assert.NilError(t, err) + assert.DeepEqual(t, text, []byte(`oneoneone`)) - // PEM-encoded key followed by the certificate - // - https://docs.python.org/3/library/ssl.html#combined-key-and-certificate - // - https://docs.python.org/3/library/ssl.html#certificate-chains - assert.Assert(t, - cmp.Regexp(`^`+ - `-----BEGIN [^ ]+ PRIVATE KEY-----\n`+ - `([^-]+\n)+`+ - `-----END [^ ]+ PRIVATE KEY-----\n`+ - `-----BEGIN CERTIFICATE-----\n`+ - `([^-]+\n)+`+ - `-----END CERTIFICATE-----\n`+ - `$`, - string(data), - )) + text, err = certFile(short, fail, short) + assert.Equal(t, err, expected) + assert.DeepEqual(t, text, []byte(nil)) } func TestInstanceCertificates(t *testing.T) { - certs := new(v1.Secret) + certs := new(corev1.Secret) certs.Name = "some-name" projections := instanceCertificates(certs) - assert.Assert(t, marshalEquals(projections, strings.TrimSpace(` + assert.Assert(t, cmp.MarshalMatches(projections, ` - secret: items: - key: patroni.ca-roots @@ -89,5 +46,5 @@ func TestInstanceCertificates(t *testing.T) { - key: patroni.crt-combined path: ~postgres-operator/patroni.crt+key name: some-name - `)+"\n")) + `)) } diff --git a/internal/patroni/config.go b/internal/patroni/config.go index 0df7087ed0..b4d7e54f68 100644 --- a/internal/patroni/config.go +++ b/internal/patroni/config.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package patroni @@ -20,9 +9,10 @@ import ( "path" "strings" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "sigs.k8s.io/yaml" + "github.com/crunchydata/postgres-operator/internal/config" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/internal/postgres" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" @@ -34,6 +24,7 @@ const ( ) const ( + basebackupCreateReplicaMethod = "basebackup" pgBackRestCreateReplicaMethod = "pgbackrest" ) @@ -54,7 +45,7 @@ func clusterYAML( cluster *v1beta1.PostgresCluster, pgHBAs postgres.HBAs, pgParameters postgres.Parameters, ) (string, error) { - root := map[string]interface{}{ + root := map[string]any{ // The cluster identifier. This value cannot change during the cluster's // lifetime. "scope": naming.PatroniScope(cluster), @@ -64,7 +55,7 @@ func clusterYAML( // // NOTE(cbandy): It *might* be possible to *carefully* change the role and // scope labels, but there is no way to reconfigure all instances at once. - "kubernetes": map[string]interface{}{ + "kubernetes": map[string]any{ "namespace": cluster.Namespace, "role_label": naming.LabelRole, "scope_label": naming.LabelPatroni, @@ -78,7 +69,7 @@ func clusterYAML( }, }, - "postgresql": map[string]interface{}{ + "postgresql": map[string]any{ // TODO(cbandy): "callbacks" // Custom configuration "must exist on all cluster nodes". @@ -94,15 +85,15 @@ func clusterYAML( // PostgreSQL Auth settings used by Patroni to // create replication, and pg_rewind accounts // TODO(tjmoore4): add "superuser" account - "authentication": map[string]interface{}{ - "replication": map[string]interface{}{ + "authentication": map[string]any{ + "replication": map[string]any{ "sslcert": "/tmp/replication/tls.crt", "sslkey": "/tmp/replication/tls.key", "sslmode": "verify-ca", "sslrootcert": "/tmp/replication/ca.crt", "username": postgres.ReplicationUser, }, - "rewind": map[string]interface{}{ + "rewind": map[string]any{ "sslcert": "/tmp/replication/tls.crt", "sslkey": "/tmp/replication/tls.key", "sslmode": "verify-ca", @@ -116,7 +107,7 @@ func clusterYAML( // instance. TLS and/or authentication settings need to be applied consistently // across the entire cluster. - "restapi": map[string]interface{}{ + "restapi": map[string]any{ // Use TLS to encrypt traffic and verify clients. // NOTE(cbandy): The path package always uses slash separators. "cafile": path.Join(configDirectory, certAuthorityConfigPath), @@ -141,7 +132,7 @@ func clusterYAML( // - https://github.com/zalando/patroni/commit/ba4ab58d4069ee30 }, - "ctl": map[string]interface{}{ + "ctl": map[string]any{ // Use TLS to verify the server and present a client certificate. // NOTE(cbandy): The path package always uses slash separators. "cacert": path.Join(configDirectory, certAuthorityConfigPath), @@ -154,7 +145,7 @@ func clusterYAML( "insecure": false, }, - "watchdog": map[string]interface{}{ + "watchdog": map[string]any{ // Disable leader watchdog device. Kubernetes' liveness probe is a less // flexible approximation. "mode": "off", @@ -165,16 +156,12 @@ func clusterYAML( // Patroni has not yet bootstrapped. Populate the "bootstrap.dcs" field to // facilitate it. When Patroni is already bootstrapped, this field is ignored. - // Deserialize the schemaless field. There will be no error because the - // Kubernetes API has already ensured it is a JSON object. - configuration := make(map[string]interface{}) + var configuration map[string]any if cluster.Spec.Patroni != nil { - _ = yaml.Unmarshal( - cluster.Spec.Patroni.DynamicConfiguration.Raw, &configuration, - ) + configuration = cluster.Spec.Patroni.DynamicConfiguration } - root["bootstrap"] = map[string]interface{}{ + root["bootstrap"] = map[string]any{ "dcs": DynamicConfiguration(cluster, configuration, pgHBAs, pgParameters), // Missing here is "users" which runs *after* "post_bootstrap". It is @@ -191,11 +178,11 @@ func clusterYAML( // and returns a value that can be marshaled to JSON. func DynamicConfiguration( cluster *v1beta1.PostgresCluster, - configuration map[string]interface{}, + configuration map[string]any, pgHBAs postgres.HBAs, pgParameters postgres.Parameters, -) map[string]interface{} { +) map[string]any { // Copy the entire configuration before making any changes. - root := make(map[string]interface{}, len(configuration)) + root := make(map[string]any, len(configuration)) for k, v := range configuration { root[k] = v } @@ -204,11 +191,20 @@ func DynamicConfiguration( root["loop_wait"] = *cluster.Spec.Patroni.SyncPeriodSeconds // Copy the "postgresql" section before making any changes. - postgresql := map[string]interface{}{ + postgresql := map[string]any{ // TODO(cbandy): explain this. requires an archive, perhaps. "use_slots": false, } - if section, ok := root["postgresql"].(map[string]interface{}); ok { + + // When TDE is configured, override the pg_rewind binary name to point + // to the wrapper script. + if config.FetchKeyCommand(&cluster.Spec) != "" { + postgresql["bin_name"] = map[string]any{ + "pg_rewind": "/tmp/pg_rewind_tde.sh", + } + } + + if section, ok := root["postgresql"].(map[string]any); ok { for k, v := range section { postgresql[k] = v } @@ -216,13 +212,13 @@ func DynamicConfiguration( root["postgresql"] = postgresql // Copy the "postgresql.parameters" section over any defaults. - parameters := make(map[string]interface{}) + parameters := make(map[string]any) if pgParameters.Default != nil { for k, v := range pgParameters.Default.AsMap() { parameters[k] = v } } - if section, ok := postgresql["parameters"].(map[string]interface{}); ok { + if section, ok := postgresql["parameters"].(map[string]any); ok { for k, v := range section { parameters[k] = v } @@ -230,27 +226,34 @@ func DynamicConfiguration( // Override the above with mandatory parameters. if pgParameters.Mandatory != nil { for k, v := range pgParameters.Mandatory.AsMap() { - // Unlike other PostgreSQL parameters that have mandatory values, - // shared_preload_libraries is a comma separated list that can have - // other values appended in addition to the mandatory values. Below, - // any values provided in the CRD are appended after the mandatory - // values. - s, ok := parameters[k].(string) - if k == "shared_preload_libraries" && ok { - parameters[k] = v + "," + s - } else { - parameters[k] = v + + // This parameter is a comma-separated list. Rather than overwrite the + // user-defined value, we want to combine it with the mandatory one. + // Some libraries belong at specific positions in the list, so figure + // that out as well. + if k == "shared_preload_libraries" { + // Load mandatory libraries ahead of user-defined libraries. + if s, ok := parameters[k].(string); ok && len(s) > 0 { + v = v + "," + s + } + // Load "citus" ahead of any other libraries. + // - https://github.com/citusdata/citus/blob/v12.0.0/src/backend/distributed/shared_library_init.c#L417-L419 + if strings.Contains(v, "citus") { + v = "citus," + v + } } + + parameters[k] = v } } postgresql["parameters"] = parameters // Copy the "postgresql.pg_hba" section after any mandatory values. - hba := make([]string, len(pgHBAs.Mandatory)) + hba := make([]string, 0, len(pgHBAs.Mandatory)) for i := range pgHBAs.Mandatory { - hba[i] = pgHBAs.Mandatory[i].String() + hba = append(hba, pgHBAs.Mandatory[i].String()) } - if section, ok := postgresql["pg_hba"].([]interface{}); ok { + if section, ok := postgresql["pg_hba"].([]any); ok { for i := range section { // any pg_hba values that are not strings will be skipped if value, ok := section[i].(string); ok { @@ -266,28 +269,50 @@ func DynamicConfiguration( } postgresql["pg_hba"] = hba - // TODO(cbandy): explain this. - postgresql["use_pg_rewind"] = true + // Enabling `pg_rewind` allows a former primary to automatically rejoin the + // cluster even if it has commits that were not sent to a replica. In other + // words, this favors availability over consistency. Without it, the former + // primary needs patronictl reinit to rejoin. + // + // Recent versions of `pg_rewind` can run with limited permissions granted + // by Patroni to the user defined in "postgresql.authentication.rewind". + // PostgreSQL v10 and earlier require superuser access over the network. + postgresql["use_pg_rewind"] = cluster.Spec.PostgresVersion > 10 if cluster.Spec.Standby != nil && cluster.Spec.Standby.Enabled { // Copy the "standby_cluster" section before making any changes. - standby := make(map[string]interface{}) - if section, ok := root["standby_cluster"].(map[string]interface{}); ok { + standby := make(map[string]any) + if section, ok := root["standby_cluster"].(map[string]any); ok { for k, v := range section { standby[k] = v } } - // NOTE(cbandy): pgBackRest is the only supported standby source. + // Unset any previous value for restore_command - we will set it later if needed + delete(standby, "restore_command") - // Do not fallback to other methods when creating the standby leader. - standby["create_replica_methods"] = []string{pgBackRestCreateReplicaMethod} + // Populate replica creation methods based on options provided in the standby spec: + methods := []string{} + if cluster.Spec.Standby.Host != "" { + standby["host"] = cluster.Spec.Standby.Host + if cluster.Spec.Standby.Port != nil { + standby["port"] = *cluster.Spec.Standby.Port + } + + methods = append([]string{basebackupCreateReplicaMethod}, methods...) + } - // Populate the standby leader by shipping logs through pgBackRest. - // This also overrides the "restore_command" used by standby replicas. - // - https://www.postgresql.org/docs/current/warm-standby.html - standby["restore_command"] = pgParameters.Mandatory.Value("restore_command") + if cluster.Spec.Standby.RepoName != "" { + // Append pgbackrest as the first choice when creating the standby + methods = append([]string{pgBackRestCreateReplicaMethod}, methods...) + // Populate the standby leader by shipping logs through pgBackRest. + // This also overrides the "restore_command" used by standby replicas. + // - https://www.postgresql.org/docs/current/warm-standby.html + standby["restore_command"] = pgParameters.Mandatory.Value("restore_command") + } + + standby["create_replica_methods"] = methods root["standby_cluster"] = standby } @@ -298,10 +323,10 @@ func DynamicConfiguration( // instance container. func instanceEnvironment( cluster *v1beta1.PostgresCluster, - clusterPodService *v1.Service, - leaderService *v1.Service, - podContainers []v1.Container, -) []v1.EnvVar { + clusterPodService *corev1.Service, + leaderService *corev1.Service, + podContainers []corev1.Container, +) []corev1.EnvVar { var ( patroniPort = *cluster.Spec.Patroni.Port postgresPort = *cluster.Spec.Port @@ -310,12 +335,12 @@ func instanceEnvironment( // Gather Endpoint ports for any Container ports that match the leader // Service definition. - ports := []v1.EndpointPort{} + ports := []corev1.EndpointPort{} for _, sp := range leaderService.Spec.Ports { for i := range podContainers { for _, cp := range podContainers[i].Ports { if sp.TargetPort.StrVal == cp.Name { - ports = append(ports, v1.EndpointPort{ + ports = append(ports, corev1.EndpointPort{ Name: sp.Name, Port: cp.ContainerPort, Protocol: cp.Protocol, @@ -331,12 +356,12 @@ func instanceEnvironment( // - https://github.com/zalando/patroni/blob/v2.0.2/patroni/config.py#L247 // - https://github.com/zalando/patroni/blob/v2.0.2/patroni/postgresql/postmaster.py#L215-L216 - variables := []v1.EnvVar{ + variables := []corev1.EnvVar{ // Set "name" to the v1.Pod's name. Required when using Kubernetes for DCS. // Patroni must be restarted when changing this value. { Name: "PATRONI_NAME", - ValueFrom: &v1.EnvVarSource{FieldRef: &v1.ObjectFieldSelector{ + ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{ APIVersion: "v1", FieldPath: "metadata.name", }}, @@ -346,7 +371,7 @@ func instanceEnvironment( // Patroni must be restarted when changing this value. { Name: "PATRONI_KUBERNETES_POD_IP", - ValueFrom: &v1.EnvVarSource{FieldRef: &v1.ObjectFieldSelector{ + ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{ APIVersion: "v1", FieldPath: "status.podIP", }}, @@ -421,25 +446,25 @@ func instanceEnvironment( // instanceConfigFiles returns projections of Patroni's configuration files // to include in the instance configuration volume. -func instanceConfigFiles(cluster, instance *v1.ConfigMap) []v1.VolumeProjection { - return []v1.VolumeProjection{ +func instanceConfigFiles(cluster, instance *corev1.ConfigMap) []corev1.VolumeProjection { + return []corev1.VolumeProjection{ { - ConfigMap: &v1.ConfigMapProjection{ - LocalObjectReference: v1.LocalObjectReference{ + ConfigMap: &corev1.ConfigMapProjection{ + LocalObjectReference: corev1.LocalObjectReference{ Name: cluster.Name, }, - Items: []v1.KeyToPath{{ + Items: []corev1.KeyToPath{{ Key: configMapFileKey, Path: "~postgres-operator_cluster.yaml", }}, }, }, { - ConfigMap: &v1.ConfigMapProjection{ - LocalObjectReference: v1.LocalObjectReference{ + ConfigMap: &corev1.ConfigMapProjection{ + LocalObjectReference: corev1.LocalObjectReference{ Name: instance.Name, }, - Items: []v1.KeyToPath{{ + Items: []corev1.KeyToPath{{ Key: configMapFileKey, Path: "~postgres-operator_instance.yaml", }}, @@ -453,12 +478,12 @@ func instanceYAML( cluster *v1beta1.PostgresCluster, instance *v1beta1.PostgresInstanceSetSpec, pgbackrestReplicaCreateCommand []string, ) (string, error) { - root := map[string]interface{}{ + root := map[string]any{ // Missing here is "name" which cannot be known until the instance Pod is // created. That value should be injected using the downward API and the // PATRONI_NAME environment variable. - "kubernetes": map[string]interface{}{ + "kubernetes": map[string]any{ // Missing here is "pod_ip" which cannot be known until the instance Pod is // created. That value should be injected using the downward API and the // PATRONI_KUBERNETES_POD_IP environment variable. @@ -467,7 +492,7 @@ func instanceYAML( // See the PATRONI_KUBERNETES_PORTS env variable. }, - "restapi": map[string]interface{}{ + "restapi": map[string]any{ // Missing here is "connect_address" which cannot be known until the // instance Pod is created. That value should be injected using the downward // API and the PATRONI_RESTAPI_CONNECT_ADDRESS environment variable. @@ -476,13 +501,13 @@ func instanceYAML( // See the PATRONI_RESTAPI_LISTEN environment variable. }, - "tags": map[string]interface{}{ + "tags": map[string]any{ // TODO(cbandy): "nofailover" // TODO(cbandy): "nosync" }, } - postgresql := map[string]interface{}{ + postgresql := map[string]any{ // TODO(cbandy): "bin_dir" // Missing here is "connect_address" which cannot be known until the @@ -537,7 +562,7 @@ func instanceYAML( for i := range command { quoted[i] = quoteShellWord(command[i]) } - postgresql[pgBackRestCreateReplicaMethod] = map[string]interface{}{ + postgresql[pgBackRestCreateReplicaMethod] = map[string]any{ "command": strings.Join(quoted, " "), "keep_data": true, "no_master": true, @@ -551,47 +576,61 @@ func instanceYAML( postgresql["create_replica_methods"] = methods if !ClusterBootstrapped(cluster) { - // if restore status exists, then a restore occurred an the "existing" method is used - if cluster.Status.PGBackRest != nil && cluster.Status.PGBackRest.Restore != nil { + isRestore := (cluster.Status.PGBackRest != nil && cluster.Status.PGBackRest.Restore != nil) + isDataSource := (cluster.Spec.DataSource != nil && cluster.Spec.DataSource.Volumes != nil && + cluster.Spec.DataSource.Volumes.PGDataVolume != nil && + cluster.Spec.DataSource.Volumes.PGDataVolume.Directory != "") + // If the cluster is being bootstrapped using existing volumes, or if the cluster is being + // bootstrapped following a restore, then use the "existing" + // bootstrap method. Otherwise use "initdb". + if isRestore || isDataSource { data_dir := postgres.DataDirectory(cluster) - root["bootstrap"] = map[string]interface{}{ + root["bootstrap"] = map[string]any{ "method": "existing", - "existing": map[string]interface{}{ + "existing": map[string]any{ "command": fmt.Sprintf(`mv %q %q`, data_dir+"_bootstrap", data_dir), "no_params": "true", }, } } else { + + initdb := []string{ + // Enable checksums on data pages to help detect corruption of + // storage that would otherwise be silent. This also enables + // "wal_log_hints" which is a prerequisite for using `pg_rewind`. + // - https://www.postgresql.org/docs/current/app-initdb.html + // - https://www.postgresql.org/docs/current/app-pgrewind.html + // - https://www.postgresql.org/docs/current/runtime-config-wal.html + // + // The benefits of checksums in the Kubernetes storage landscape + // outweigh their negligible overhead, and enabling them later + // is costly. (Every file of the cluster must be rewritten.) + // PostgreSQL v12 introduced the `pg_checksums` utility which + // can cheaply disable them while PostgreSQL is stopped. + // - https://www.postgresql.org/docs/current/app-pgchecksums.html + "data-checksums", + "encoding=UTF8", + + // NOTE(cbandy): The "--waldir" option was introduced in PostgreSQL v10. + "waldir=" + postgres.WALDirectory(cluster, instance), + } + + // Append the encryption key command, if provided. + if ekc := config.FetchKeyCommand(&cluster.Spec); ekc != "" { + initdb = append(initdb, fmt.Sprintf("encryption-key-command=%s", ekc)) + } + // Populate some "bootstrap" fields to initialize the cluster. // When Patroni is already bootstrapped, this section is ignored. // - https://github.com/zalando/patroni/blob/v2.0.2/docs/SETTINGS.rst#bootstrap-configuration // - https://github.com/zalando/patroni/blob/v2.0.2/docs/replica_bootstrap.rst#bootstrap - root["bootstrap"] = map[string]interface{}{ + root["bootstrap"] = map[string]any{ "method": "initdb", // The "initdb" bootstrap method is configured differently from others. // Patroni prepends "--" before it calls `initdb`. // - https://github.com/zalando/patroni/blob/v2.0.2/patroni/postgresql/bootstrap.py#L45 - "initdb": []string{ - // Enable checksums on data pages to help detect corruption of - // storage that would otherwise be silent. This also enables - // "wal_log_hints" which is a prerequisite for using `pg_rewind`. - // - https://www.postgresql.org/docs/current/app-initdb.html - // - https://www.postgresql.org/docs/current/app-pgrewind.html - // - https://www.postgresql.org/docs/current/runtime-config-wal.html - // - // The benefits of checksums in the Kubernetes storage landscape - // outweigh their negligible overhead, and enabling them later - // is costly. (Every file of the cluster must be rewritten.) - // PostgreSQL v12 introduced the `pg_checksums` utility which - // can cheaply disable them while PostgreSQL is stopped. - // - https://www.postgresql.org/docs/current/app-pgchecksums.html - "data-checksums", - "encoding=UTF8", - - // NOTE(cbandy): The "--waldir" option was introduced in PostgreSQL v10. - "waldir=" + postgres.WALDirectory(cluster, instance), - }, + "initdb": initdb, } } } @@ -601,7 +640,7 @@ func instanceYAML( } // probeTiming returns a Probe with thresholds and timeouts set according to spec. -func probeTiming(spec *v1beta1.PatroniSpec) *v1.Probe { +func probeTiming(spec *v1beta1.PatroniSpec) *corev1.Probe { // "Probes should be configured in such a way that they start failing about // time when the leader key is expiring." // - https://github.com/zalando/patroni/blob/v2.0.1/docs/rest_api.rst @@ -609,7 +648,7 @@ func probeTiming(spec *v1beta1.PatroniSpec) *v1.Probe { // TODO(cbandy): When the probe times out, failure triggers at // (FailureThreshold × PeriodSeconds + TimeoutSeconds) - probe := v1.Probe{ + probe := corev1.Probe{ TimeoutSeconds: *spec.SyncPeriodSeconds / 2, PeriodSeconds: *spec.SyncPeriodSeconds, SuccessThreshold: 1, diff --git a/internal/patroni/config.md b/internal/patroni/config.md index 1cd3b362dc..18d28d8a4e 100644 --- a/internal/patroni/config.md +++ b/internal/patroni/config.md @@ -1,16 +1,7 @@ Patroni configuration is complicated. The daemon `patroni` and the client diff --git a/internal/patroni/config_test.go b/internal/patroni/config_test.go index 7166d5299c..a45568df8b 100644 --- a/internal/patroni/config_test.go +++ b/internal/patroni/config_test.go @@ -1,47 +1,98 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package patroni import ( - "io/ioutil" + "os" "os/exec" "path/filepath" "strings" "testing" "gotest.tools/v3/assert" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/yaml" + "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/postgres" + "github.com/crunchydata/postgres-operator/internal/testing/cmp" + "github.com/crunchydata/postgres-operator/internal/testing/require" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) func TestClusterYAML(t *testing.T) { t.Parallel() - cluster := new(v1beta1.PostgresCluster) - cluster.Default() - cluster.Namespace = "some-namespace" - cluster.Name = "cluster-name" + t.Run("PG version defaulted", func(t *testing.T) { + cluster := new(v1beta1.PostgresCluster) + cluster.Default() + cluster.Namespace = "some-namespace" + cluster.Name = "cluster-name" - data, err := clusterYAML(cluster, postgres.HBAs{}, postgres.Parameters{}) - assert.NilError(t, err) - assert.Equal(t, data, strings.TrimSpace(` + data, err := clusterYAML(cluster, postgres.HBAs{}, postgres.Parameters{}) + assert.NilError(t, err) + assert.Equal(t, data, strings.TrimSpace(` +# Generated by postgres-operator. DO NOT EDIT. +# Your changes will not be saved. +bootstrap: + dcs: + loop_wait: 10 + postgresql: + parameters: {} + pg_hba: [] + use_pg_rewind: false + use_slots: false + ttl: 30 +ctl: + cacert: /etc/patroni/~postgres-operator/patroni.ca-roots + certfile: /etc/patroni/~postgres-operator/patroni.crt+key + insecure: false + keyfile: null +kubernetes: + labels: + postgres-operator.crunchydata.com/cluster: cluster-name + namespace: some-namespace + role_label: postgres-operator.crunchydata.com/role + scope_label: postgres-operator.crunchydata.com/patroni + use_endpoints: true +postgresql: + authentication: + replication: + sslcert: /tmp/replication/tls.crt + sslkey: /tmp/replication/tls.key + sslmode: verify-ca + sslrootcert: /tmp/replication/ca.crt + username: _crunchyrepl + rewind: + sslcert: /tmp/replication/tls.crt + sslkey: /tmp/replication/tls.key + sslmode: verify-ca + sslrootcert: /tmp/replication/ca.crt + username: _crunchyrepl +restapi: + cafile: /etc/patroni/~postgres-operator/patroni.ca-roots + certfile: /etc/patroni/~postgres-operator/patroni.crt+key + keyfile: null + verify_client: optional +scope: cluster-name-ha +watchdog: + mode: "off" + `)+"\n") + }) + + t.Run(">PG10", func(t *testing.T) { + cluster := new(v1beta1.PostgresCluster) + cluster.Default() + cluster.Namespace = "some-namespace" + cluster.Name = "cluster-name" + cluster.Spec.PostgresVersion = 14 + + data, err := clusterYAML(cluster, postgres.HBAs{}, postgres.Parameters{}) + assert.NilError(t, err) + assert.Equal(t, data, strings.TrimSpace(` # Generated by postgres-operator. DO NOT EDIT. # Your changes will not be saved. bootstrap: @@ -88,12 +139,12 @@ scope: cluster-name-ha watchdog: mode: "off" `)+"\n") + }) } func TestDynamicConfiguration(t *testing.T) { t.Parallel() - newInt32 := func(i int32) *int32 { return &i } parameters := func(in map[string]string) *postgres.ParameterSet { out := postgres.NewParameterSet() for k, v := range in { @@ -105,18 +156,18 @@ func TestDynamicConfiguration(t *testing.T) { for _, tt := range []struct { name string cluster *v1beta1.PostgresCluster - input map[string]interface{} + input map[string]any hbas postgres.HBAs params postgres.Parameters - expected map[string]interface{} + expected map[string]any }{ { name: "empty is valid", - expected: map[string]interface{}{ + expected: map[string]any{ "loop_wait": int32(10), "ttl": int32(30), - "postgresql": map[string]interface{}{ - "parameters": map[string]interface{}{}, + "postgresql": map[string]any{ + "parameters": map[string]any{}, "pg_hba": []string{}, "use_pg_rewind": true, "use_slots": false, @@ -125,15 +176,15 @@ func TestDynamicConfiguration(t *testing.T) { }, { name: "top-level passes through", - input: map[string]interface{}{ + input: map[string]any{ "retry_timeout": 5, }, - expected: map[string]interface{}{ + expected: map[string]any{ "loop_wait": int32(10), "ttl": int32(30), "retry_timeout": 5, - "postgresql": map[string]interface{}{ - "parameters": map[string]interface{}{}, + "postgresql": map[string]any{ + "parameters": map[string]any{}, "pg_hba": []string{}, "use_pg_rewind": true, "use_slots": false, @@ -145,20 +196,20 @@ func TestDynamicConfiguration(t *testing.T) { cluster: &v1beta1.PostgresCluster{ Spec: v1beta1.PostgresClusterSpec{ Patroni: &v1beta1.PatroniSpec{ - LeaderLeaseDurationSeconds: newInt32(99), - SyncPeriodSeconds: newInt32(8), + LeaderLeaseDurationSeconds: initialize.Int32(99), + SyncPeriodSeconds: initialize.Int32(8), }, }, }, - input: map[string]interface{}{ + input: map[string]any{ "loop_wait": 3, "ttl": "nope", }, - expected: map[string]interface{}{ + expected: map[string]any{ "loop_wait": int32(8), "ttl": int32(99), - "postgresql": map[string]interface{}{ - "parameters": map[string]interface{}{}, + "postgresql": map[string]any{ + "parameters": map[string]any{}, "pg_hba": []string{}, "use_pg_rewind": true, "use_slots": false, @@ -167,14 +218,14 @@ func TestDynamicConfiguration(t *testing.T) { }, { name: "postgresql: wrong-type is ignored", - input: map[string]interface{}{ + input: map[string]any{ "postgresql": true, }, - expected: map[string]interface{}{ + expected: map[string]any{ "loop_wait": int32(10), "ttl": int32(30), - "postgresql": map[string]interface{}{ - "parameters": map[string]interface{}{}, + "postgresql": map[string]any{ + "parameters": map[string]any{}, "pg_hba": []string{}, "use_pg_rewind": true, "use_slots": false, @@ -183,17 +234,17 @@ func TestDynamicConfiguration(t *testing.T) { }, { name: "postgresql: defaults and overrides", - input: map[string]interface{}{ - "postgresql": map[string]interface{}{ + input: map[string]any{ + "postgresql": map[string]any{ "use_pg_rewind": "overridden", "use_slots": "input", }, }, - expected: map[string]interface{}{ + expected: map[string]any{ "loop_wait": int32(10), "ttl": int32(30), - "postgresql": map[string]interface{}{ - "parameters": map[string]interface{}{}, + "postgresql": map[string]any{ + "parameters": map[string]any{}, "pg_hba": []string{}, "use_pg_rewind": true, "use_slots": "input", @@ -202,16 +253,16 @@ func TestDynamicConfiguration(t *testing.T) { }, { name: "postgresql.parameters: wrong-type is ignored", - input: map[string]interface{}{ - "postgresql": map[string]interface{}{ + input: map[string]any{ + "postgresql": map[string]any{ "parameters": true, }, }, - expected: map[string]interface{}{ + expected: map[string]any{ "loop_wait": int32(10), "ttl": int32(30), - "postgresql": map[string]interface{}{ - "parameters": map[string]interface{}{}, + "postgresql": map[string]any{ + "parameters": map[string]any{}, "pg_hba": []string{}, "use_pg_rewind": true, "use_slots": false, @@ -220,19 +271,19 @@ func TestDynamicConfiguration(t *testing.T) { }, { name: "postgresql.parameters: input passes through", - input: map[string]interface{}{ - "postgresql": map[string]interface{}{ - "parameters": map[string]interface{}{ + input: map[string]any{ + "postgresql": map[string]any{ + "parameters": map[string]any{ "something": "str", "another": 5, }, }, }, - expected: map[string]interface{}{ + expected: map[string]any{ "loop_wait": int32(10), "ttl": int32(30), - "postgresql": map[string]interface{}{ - "parameters": map[string]interface{}{ + "postgresql": map[string]any{ + "parameters": map[string]any{ "something": "str", "another": 5, }, @@ -244,9 +295,9 @@ func TestDynamicConfiguration(t *testing.T) { }, { name: "postgresql.parameters: input overrides default", - input: map[string]interface{}{ - "postgresql": map[string]interface{}{ - "parameters": map[string]interface{}{ + input: map[string]any{ + "postgresql": map[string]any{ + "parameters": map[string]any{ "something": "str", "another": 5, }, @@ -258,11 +309,11 @@ func TestDynamicConfiguration(t *testing.T) { "unrelated": "default", }), }, - expected: map[string]interface{}{ + expected: map[string]any{ "loop_wait": int32(10), "ttl": int32(30), - "postgresql": map[string]interface{}{ - "parameters": map[string]interface{}{ + "postgresql": map[string]any{ + "parameters": map[string]any{ "something": "str", "another": 5, "unrelated": "default", @@ -275,9 +326,9 @@ func TestDynamicConfiguration(t *testing.T) { }, { name: "postgresql.parameters: mandatory overrides input", - input: map[string]interface{}{ - "postgresql": map[string]interface{}{ - "parameters": map[string]interface{}{ + input: map[string]any{ + "postgresql": map[string]any{ + "parameters": map[string]any{ "something": "str", "another": 5, }, @@ -289,11 +340,11 @@ func TestDynamicConfiguration(t *testing.T) { "unrelated": "setting", }), }, - expected: map[string]interface{}{ + expected: map[string]any{ "loop_wait": int32(10), "ttl": int32(30), - "postgresql": map[string]interface{}{ - "parameters": map[string]interface{}{ + "postgresql": map[string]any{ + "parameters": map[string]any{ "something": "overrides", "another": 5, "unrelated": "setting", @@ -306,9 +357,9 @@ func TestDynamicConfiguration(t *testing.T) { }, { name: "postgresql.parameters: mandatory shared_preload_libraries", - input: map[string]interface{}{ - "postgresql": map[string]interface{}{ - "parameters": map[string]interface{}{ + input: map[string]any{ + "postgresql": map[string]any{ + "parameters": map[string]any{ "shared_preload_libraries": "given", }, }, @@ -318,11 +369,11 @@ func TestDynamicConfiguration(t *testing.T) { "shared_preload_libraries": "mandatory", }), }, - expected: map[string]interface{}{ + expected: map[string]any{ "loop_wait": int32(10), "ttl": int32(30), - "postgresql": map[string]interface{}{ - "parameters": map[string]interface{}{ + "postgresql": map[string]any{ + "parameters": map[string]any{ "shared_preload_libraries": "mandatory,given", }, "pg_hba": []string{}, @@ -332,10 +383,10 @@ func TestDynamicConfiguration(t *testing.T) { }, }, { - name: "postgresql.parameters: mandatory shared_preload_libraries bad type", - input: map[string]interface{}{ - "postgresql": map[string]interface{}{ - "parameters": map[string]interface{}{ + name: "postgresql.parameters: mandatory shared_preload_libraries wrong-type is ignored", + input: map[string]any{ + "postgresql": map[string]any{ + "parameters": map[string]any{ "shared_preload_libraries": 1, }, }, @@ -345,11 +396,11 @@ func TestDynamicConfiguration(t *testing.T) { "shared_preload_libraries": "mandatory", }), }, - expected: map[string]interface{}{ + expected: map[string]any{ "loop_wait": int32(10), "ttl": int32(30), - "postgresql": map[string]interface{}{ - "parameters": map[string]interface{}{ + "postgresql": map[string]any{ + "parameters": map[string]any{ "shared_preload_libraries": "mandatory", }, "pg_hba": []string{}, @@ -358,18 +409,45 @@ func TestDynamicConfiguration(t *testing.T) { }, }, }, + { + name: "postgresql.parameters: shared_preload_libraries order", + input: map[string]any{ + "postgresql": map[string]any{ + "parameters": map[string]any{ + "shared_preload_libraries": "given, citus, more", + }, + }, + }, + params: postgres.Parameters{ + Mandatory: parameters(map[string]string{ + "shared_preload_libraries": "mandatory", + }), + }, + expected: map[string]any{ + "loop_wait": int32(10), + "ttl": int32(30), + "postgresql": map[string]any{ + "parameters": map[string]any{ + "shared_preload_libraries": "citus,mandatory,given, citus, more", + }, + "pg_hba": []string{}, + "use_pg_rewind": true, + "use_slots": false, + }, + }, + }, { name: "postgresql.pg_hba: wrong-type is ignored", - input: map[string]interface{}{ - "postgresql": map[string]interface{}{ + input: map[string]any{ + "postgresql": map[string]any{ "pg_hba": true, }, }, - expected: map[string]interface{}{ + expected: map[string]any{ "loop_wait": int32(10), "ttl": int32(30), - "postgresql": map[string]interface{}{ - "parameters": map[string]interface{}{}, + "postgresql": map[string]any{ + "parameters": map[string]any{}, "pg_hba": []string{}, "use_pg_rewind": true, "use_slots": false, @@ -378,8 +456,8 @@ func TestDynamicConfiguration(t *testing.T) { }, { name: "postgresql.pg_hba: default when no input", - input: map[string]interface{}{ - "postgresql": map[string]interface{}{ + input: map[string]any{ + "postgresql": map[string]any{ "pg_hba": nil, }, }, @@ -388,11 +466,11 @@ func TestDynamicConfiguration(t *testing.T) { *postgres.NewHBA().Local().Method("peer"), }, }, - expected: map[string]interface{}{ + expected: map[string]any{ "loop_wait": int32(10), "ttl": int32(30), - "postgresql": map[string]interface{}{ - "parameters": map[string]interface{}{}, + "postgresql": map[string]any{ + "parameters": map[string]any{}, "pg_hba": []string{ "local all all peer", }, @@ -403,9 +481,9 @@ func TestDynamicConfiguration(t *testing.T) { }, { name: "postgresql.pg_hba: no default when input", - input: map[string]interface{}{ - "postgresql": map[string]interface{}{ - "pg_hba": []interface{}{"custom"}, + input: map[string]any{ + "postgresql": map[string]any{ + "pg_hba": []any{"custom"}, }, }, hbas: postgres.HBAs{ @@ -413,11 +491,11 @@ func TestDynamicConfiguration(t *testing.T) { *postgres.NewHBA().Local().Method("peer"), }, }, - expected: map[string]interface{}{ + expected: map[string]any{ "loop_wait": int32(10), "ttl": int32(30), - "postgresql": map[string]interface{}{ - "parameters": map[string]interface{}{}, + "postgresql": map[string]any{ + "parameters": map[string]any{}, "pg_hba": []string{ "custom", }, @@ -428,9 +506,9 @@ func TestDynamicConfiguration(t *testing.T) { }, { name: "postgresql.pg_hba: mandatory before others", - input: map[string]interface{}{ - "postgresql": map[string]interface{}{ - "pg_hba": []interface{}{"custom"}, + input: map[string]any{ + "postgresql": map[string]any{ + "pg_hba": []any{"custom"}, }, }, hbas: postgres.HBAs{ @@ -438,11 +516,11 @@ func TestDynamicConfiguration(t *testing.T) { *postgres.NewHBA().Local().Method("peer"), }, }, - expected: map[string]interface{}{ + expected: map[string]any{ "loop_wait": int32(10), "ttl": int32(30), - "postgresql": map[string]interface{}{ - "parameters": map[string]interface{}{}, + "postgresql": map[string]any{ + "parameters": map[string]any{}, "pg_hba": []string{ "local all all peer", "custom", @@ -454,9 +532,9 @@ func TestDynamicConfiguration(t *testing.T) { }, { name: "postgresql.pg_hba: ignore non-string types", - input: map[string]interface{}{ - "postgresql": map[string]interface{}{ - "pg_hba": []interface{}{1, true, "custom", map[string]string{}, []string{}}, + input: map[string]any{ + "postgresql": map[string]any{ + "pg_hba": []any{1, true, "custom", map[string]string{}, []string{}}, }, }, hbas: postgres.HBAs{ @@ -464,11 +542,11 @@ func TestDynamicConfiguration(t *testing.T) { *postgres.NewHBA().Local().Method("peer"), }, }, - expected: map[string]interface{}{ + expected: map[string]any{ "loop_wait": int32(10), "ttl": int32(30), - "postgresql": map[string]interface{}{ - "parameters": map[string]interface{}{}, + "postgresql": map[string]any{ + "parameters": map[string]any{}, "pg_hba": []string{ "local all all peer", "custom", @@ -480,36 +558,37 @@ func TestDynamicConfiguration(t *testing.T) { }, { name: "standby_cluster: input passes through", - input: map[string]interface{}{ - "standby_cluster": map[string]interface{}{ + input: map[string]any{ + "standby_cluster": map[string]any{ "primary_slot_name": "str", }, }, - expected: map[string]interface{}{ + expected: map[string]any{ "loop_wait": int32(10), "ttl": int32(30), - "postgresql": map[string]interface{}{ - "parameters": map[string]interface{}{}, + "postgresql": map[string]any{ + "parameters": map[string]any{}, "pg_hba": []string{}, "use_pg_rewind": true, "use_slots": false, }, - "standby_cluster": map[string]interface{}{ + "standby_cluster": map[string]any{ "primary_slot_name": "str", }, }, }, { - name: "standby_cluster: spec overrides input", + name: "standby_cluster: repo only", cluster: &v1beta1.PostgresCluster{ Spec: v1beta1.PostgresClusterSpec{ Standby: &v1beta1.PostgresStandbySpec{ - Enabled: true, + Enabled: true, + RepoName: "repo", }, }, }, - input: map[string]interface{}{ - "standby_cluster": map[string]interface{}{ + input: map[string]any{ + "standby_cluster": map[string]any{ "restore_command": "overridden", "unrelated": "input", }, @@ -519,30 +598,148 @@ func TestDynamicConfiguration(t *testing.T) { "restore_command": "mandatory", }), }, - expected: map[string]interface{}{ + expected: map[string]any{ "loop_wait": int32(10), "ttl": int32(30), - "postgresql": map[string]interface{}{ - "parameters": map[string]interface{}{ + "postgresql": map[string]any{ + "parameters": map[string]any{ "restore_command": "mandatory", }, "pg_hba": []string{}, "use_pg_rewind": true, "use_slots": false, }, - "standby_cluster": map[string]interface{}{ + "standby_cluster": map[string]any{ "create_replica_methods": []string{"pgbackrest"}, "restore_command": "mandatory", "unrelated": "input", }, }, }, + { + name: "standby_cluster: basebackup for streaming", + cluster: &v1beta1.PostgresCluster{ + Spec: v1beta1.PostgresClusterSpec{ + Standby: &v1beta1.PostgresStandbySpec{ + Enabled: true, + Host: "0.0.0.0", + Port: initialize.Int32(5432), + }, + }, + }, + input: map[string]any{ + "standby_cluster": map[string]any{ + "host": "overridden", + "port": int32(0000), + "restore_command": "overridden", + "unrelated": "input", + }, + }, + params: postgres.Parameters{ + Mandatory: parameters(map[string]string{ + "restore_command": "mandatory", + }), + }, + expected: map[string]any{ + "loop_wait": int32(10), + "ttl": int32(30), + "postgresql": map[string]any{ + "parameters": map[string]any{ + "restore_command": "mandatory", + }, + "pg_hba": []string{}, + "use_pg_rewind": true, + "use_slots": false, + }, + "standby_cluster": map[string]any{ + "create_replica_methods": []string{"basebackup"}, + "host": "0.0.0.0", + "port": int32(5432), + "unrelated": "input", + }, + }, + }, + { + name: "standby_cluster: both repo and streaming", + cluster: &v1beta1.PostgresCluster{ + Spec: v1beta1.PostgresClusterSpec{ + Standby: &v1beta1.PostgresStandbySpec{ + Enabled: true, + Host: "0.0.0.0", + Port: initialize.Int32(5432), + RepoName: "repo", + }, + }, + }, + input: map[string]any{ + "standby_cluster": map[string]any{ + "host": "overridden", + "port": int32(9999), + "restore_command": "overridden", + "unrelated": "input", + }, + }, + params: postgres.Parameters{ + Mandatory: parameters(map[string]string{ + "restore_command": "mandatory", + }), + }, + expected: map[string]any{ + "loop_wait": int32(10), + "ttl": int32(30), + "postgresql": map[string]any{ + "parameters": map[string]any{ + "restore_command": "mandatory", + }, + "pg_hba": []string{}, + "use_pg_rewind": true, + "use_slots": false, + }, + "standby_cluster": map[string]any{ + "create_replica_methods": []string{"pgbackrest", "basebackup"}, + "host": "0.0.0.0", + "port": int32(5432), + "restore_command": "mandatory", + "unrelated": "input", + }, + }, + }, + { + name: "tde enabled", + cluster: &v1beta1.PostgresCluster{ + Spec: v1beta1.PostgresClusterSpec{ + Patroni: &v1beta1.PatroniSpec{ + DynamicConfiguration: map[string]any{ + "postgresql": map[string]any{ + "parameters": map[string]any{ + "encryption_key_command": "echo test", + }, + }, + }, + }, + }, + }, + expected: map[string]any{ + "loop_wait": int32(10), + "ttl": int32(30), + "postgresql": map[string]any{ + "bin_name": map[string]any{"pg_rewind": string("/tmp/pg_rewind_tde.sh")}, + "parameters": map[string]any{}, + "pg_hba": []string{}, + "use_pg_rewind": bool(true), + "use_slots": bool(false), + }, + }, + }, } { t.Run(tt.name, func(t *testing.T) { cluster := tt.cluster if cluster == nil { cluster = new(v1beta1.PostgresCluster) } + if cluster.Spec.PostgresVersion == 0 { + cluster.Spec.PostgresVersion = 14 + } cluster.Default() actual := DynamicConfiguration(cluster, tt.input, tt.hbas, tt.params) assert.DeepEqual(t, tt.expected, actual) @@ -553,12 +750,12 @@ func TestDynamicConfiguration(t *testing.T) { func TestInstanceConfigFiles(t *testing.T) { t.Parallel() - cm1 := &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "cm1"}} - cm2 := &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "cm2"}} + cm1 := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "cm1"}} + cm2 := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "cm2"}} projections := instanceConfigFiles(cm1, cm2) - assert.Assert(t, marshalEquals(projections, strings.TrimSpace(` + assert.Assert(t, cmp.MarshalMatches(projections, ` - configMap: items: - key: patroni.yaml @@ -569,7 +766,7 @@ func TestInstanceConfigFiles(t *testing.T) { - key: patroni.yaml path: ~postgres-operator_instance.yaml name: cm2 - `)+"\n")) + `)) } func TestInstanceEnvironment(t *testing.T) { @@ -578,13 +775,13 @@ func TestInstanceEnvironment(t *testing.T) { cluster := new(v1beta1.PostgresCluster) cluster.Default() cluster.Spec.PostgresVersion = 12 - leaderService := new(v1.Service) - podService := new(v1.Service) + leaderService := new(corev1.Service) + podService := new(corev1.Service) podService.Name = "pod-dns" vars := instanceEnvironment(cluster, podService, leaderService, nil) - assert.Assert(t, marshalEquals(vars, strings.TrimSpace(` + assert.Assert(t, cmp.MarshalMatches(vars, ` - name: PATRONI_NAME valueFrom: fieldRef: @@ -612,19 +809,19 @@ func TestInstanceEnvironment(t *testing.T) { value: '*:8008' - name: PATRONICTL_CONFIG_FILE value: /etc/patroni - `)+"\n")) + `)) t.Run("MatchingPorts", func(t *testing.T) { - leaderService.Spec.Ports = []v1.ServicePort{{Name: "postgres"}} + leaderService.Spec.Ports = []corev1.ServicePort{{Name: "postgres"}} leaderService.Spec.Ports[0].TargetPort.StrVal = "postgres" - containers := []v1.Container{{Name: "okay"}} - containers[0].Ports = []v1.ContainerPort{{ - Name: "postgres", ContainerPort: 9999, Protocol: v1.ProtocolTCP, + containers := []corev1.Container{{Name: "okay"}} + containers[0].Ports = []corev1.ContainerPort{{ + Name: "postgres", ContainerPort: 9999, Protocol: corev1.ProtocolTCP, }} vars := instanceEnvironment(cluster, podService, leaderService, containers) - assert.Assert(t, marshalEquals(vars, strings.TrimSpace(` + assert.Assert(t, cmp.MarshalMatches(vars, ` - name: PATRONI_NAME valueFrom: fieldRef: @@ -654,7 +851,7 @@ func TestInstanceEnvironment(t *testing.T) { value: '*:8008' - name: PATRONICTL_CONFIG_FILE value: /etc/patroni - `)+"\n")) + `)) }) } @@ -716,20 +913,47 @@ postgresql: restapi: {} tags: {} `, "\t\n")+"\n") + + cluster.Spec.Patroni = &v1beta1.PatroniSpec{ + DynamicConfiguration: map[string]any{ + "postgresql": map[string]any{ + "parameters": map[string]any{ + "encryption_key_command": "echo test", + }, + }, + }, + } + + datawithTDE, err := instanceYAML(cluster, instance, nil) + assert.NilError(t, err) + assert.Equal(t, datawithTDE, strings.Trim(` +# Generated by postgres-operator. DO NOT EDIT. +# Your changes will not be saved. +bootstrap: + initdb: + - data-checksums + - encoding=UTF8 + - waldir=/pgdata/pg12_wal + - encryption-key-command=echo test + method: initdb +kubernetes: {} +postgresql: + basebackup: + - waldir=/pgdata/pg12_wal + create_replica_methods: + - basebackup + pgpass: /tmp/.pgpass + use_unix_socket: true +restapi: {} +tags: {} + `, "\t\n")+"\n") + } func TestPGBackRestCreateReplicaCommand(t *testing.T) { t.Parallel() - shellcheck, err := exec.LookPath("shellcheck") - if err != nil { - t.Skip(`requires "shellcheck" executable`) - } else { - output, err := exec.Command(shellcheck, "--version").CombinedOutput() - assert.NilError(t, err) - t.Logf("using %q:\n%s", shellcheck, output) - } - + shellcheck := require.ShellCheck(t) cluster := new(v1beta1.PostgresCluster) instance := new(v1beta1.PostgresInstanceSetSpec) @@ -751,7 +975,7 @@ func TestPGBackRestCreateReplicaCommand(t *testing.T) { { command := parsed.PostgreSQL.PGBackRest.Command file := filepath.Join(dir, "command.sh") - assert.NilError(t, ioutil.WriteFile(file, []byte(command), 0o600)) + assert.NilError(t, os.WriteFile(file, []byte(command), 0o600)) cmd := exec.Command(shellcheck, "--enable=all", "--shell=sh", file) output, err := cmd.CombinedOutput() @@ -773,7 +997,7 @@ func TestPGBackRestCreateReplicaCommand(t *testing.T) { // It should pass shellcheck. { file := filepath.Join(dir, "script.bash") - assert.NilError(t, ioutil.WriteFile(file, []byte(script), 0o600)) + assert.NilError(t, os.WriteFile(file, []byte(script), 0o600)) cmd := exec.Command(shellcheck, "--enable=all", file) output, err := cmd.CombinedOutput() @@ -789,7 +1013,7 @@ func TestProbeTiming(t *testing.T) { // Defaults should match the suggested/documented timing. // - https://github.com/zalando/patroni/blob/v2.0.1/docs/rest_api.rst - assert.DeepEqual(t, probeTiming(defaults), &v1.Probe{ + assert.DeepEqual(t, probeTiming(defaults), &corev1.Probe{ TimeoutSeconds: 5, PeriodSeconds: 10, SuccessThreshold: 1, @@ -798,12 +1022,12 @@ func TestProbeTiming(t *testing.T) { for _, tt := range []struct { lease, sync int32 - expected v1.Probe + expected corev1.Probe }{ // The smallest possible values for "loop_wait" and "retry_timeout" are // both 1 sec which makes 3 sec the smallest appropriate value for "ttl". // These are the validation minimums in v1beta1.PatroniSpec. - {lease: 3, sync: 1, expected: v1.Probe{ + {lease: 3, sync: 1, expected: corev1.Probe{ TimeoutSeconds: 1, PeriodSeconds: 1, SuccessThreshold: 1, @@ -811,13 +1035,13 @@ func TestProbeTiming(t *testing.T) { }}, // These are plausible values for "ttl" and "loop_wait". - {lease: 60, sync: 15, expected: v1.Probe{ + {lease: 60, sync: 15, expected: corev1.Probe{ TimeoutSeconds: 7, PeriodSeconds: 15, SuccessThreshold: 1, FailureThreshold: 4, }}, - {lease: 10, sync: 5, expected: v1.Probe{ + {lease: 10, sync: 5, expected: corev1.Probe{ TimeoutSeconds: 2, PeriodSeconds: 5, SuccessThreshold: 1, @@ -827,13 +1051,13 @@ func TestProbeTiming(t *testing.T) { // These are plausible values that aren't multiples of each other. // Failure triggers sooner than "ttl", which seems to agree with docs: // - https://github.com/zalando/patroni/blob/v2.0.1/docs/watchdog.rst - {lease: 19, sync: 7, expected: v1.Probe{ + {lease: 19, sync: 7, expected: corev1.Probe{ TimeoutSeconds: 3, PeriodSeconds: 7, SuccessThreshold: 1, FailureThreshold: 2, }}, - {lease: 13, sync: 7, expected: v1.Probe{ + {lease: 13, sync: 7, expected: corev1.Probe{ TimeoutSeconds: 3, PeriodSeconds: 7, SuccessThreshold: 1, @@ -841,19 +1065,20 @@ func TestProbeTiming(t *testing.T) { }}, // These values are infeasible for Patroni but produce valid v1.Probes. - {lease: 60, sync: 60, expected: v1.Probe{ + {lease: 60, sync: 60, expected: corev1.Probe{ TimeoutSeconds: 30, PeriodSeconds: 60, SuccessThreshold: 1, FailureThreshold: 1, }}, - {lease: 10, sync: 20, expected: v1.Probe{ + {lease: 10, sync: 20, expected: corev1.Probe{ TimeoutSeconds: 10, PeriodSeconds: 20, SuccessThreshold: 1, FailureThreshold: 1, }}, } { + tt := tt actual := probeTiming(&v1beta1.PatroniSpec{ LeaderLeaseDurationSeconds: &tt.lease, SyncPeriodSeconds: &tt.sync, diff --git a/internal/patroni/doc.go b/internal/patroni/doc.go index d571618c8b..500305406d 100644 --- a/internal/patroni/doc.go +++ b/internal/patroni/doc.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 // Package patroni provides clients, utilities and resources for configuring and // interacting with Patroni inside of a PostgreSQL cluster diff --git a/internal/patroni/helpers.go b/internal/patroni/helpers.go deleted file mode 100644 index 439de84fa8..0000000000 --- a/internal/patroni/helpers.go +++ /dev/null @@ -1,105 +0,0 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package patroni - -import ( - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/equality" - "k8s.io/apimachinery/pkg/util/sets" -) - -func findOrAppendContainer(containers *[]v1.Container, name string) *v1.Container { - for i := range *containers { - if (*containers)[i].Name == name { - return &(*containers)[i] - } - } - - *containers = append(*containers, v1.Container{Name: name}) - return &(*containers)[len(*containers)-1] -} - -func mergeEnvVars(from []v1.EnvVar, vars ...v1.EnvVar) []v1.EnvVar { - names := sets.NewString() - for i := range vars { - names.Insert(vars[i].Name) - } - - // Partition original slice by whether or not the name was passed in. - var existing, others []v1.EnvVar - for i := range from { - if names.Has(from[i].Name) { - existing = append(existing, from[i]) - } else { - others = append(others, from[i]) - } - } - - // When the new vars don't match, replace them. - if !equality.Semantic.DeepEqual(existing, vars) { - return append(others, vars...) - } - - return from -} - -func mergeVolumes(from []v1.Volume, vols ...v1.Volume) []v1.Volume { - names := sets.NewString() - for i := range vols { - names.Insert(vols[i].Name) - } - - // Partition original slice by whether or not the name was passed in. - var existing, others []v1.Volume - for i := range from { - if names.Has(from[i].Name) { - existing = append(existing, from[i]) - } else { - others = append(others, from[i]) - } - } - - // When the new vols don't match, replace them. - if !equality.Semantic.DeepEqual(existing, vols) { - return append(others, vols...) - } - - return from -} - -func mergeVolumeMounts(from []v1.VolumeMount, mounts ...v1.VolumeMount) []v1.VolumeMount { - names := sets.NewString() - for i := range mounts { - names.Insert(mounts[i].Name) - } - - // Partition original slice by whether or not the name was passed in. - var existing, others []v1.VolumeMount - for i := range from { - if names.Has(from[i].Name) { - existing = append(existing, from[i]) - } else { - others = append(others, from[i]) - } - } - - // When the new mounts don't match, replace them. - if !equality.Semantic.DeepEqual(existing, mounts) { - return append(others, mounts...) - } - - return from -} diff --git a/internal/patroni/rbac.go b/internal/patroni/rbac.go index 56f0af2575..dcf3f18cea 100644 --- a/internal/patroni/rbac.go +++ b/internal/patroni/rbac.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package patroni @@ -23,25 +12,25 @@ import ( ) // "list", "patch", and "watch" are required. Include "get" for good measure. -// +kubebuilder:rbac:namespace=patroni,groups="",resources=pods,verbs=get -// +kubebuilder:rbac:namespace=patroni,groups="",resources=pods,verbs=list;watch -// +kubebuilder:rbac:namespace=patroni,groups="",resources=pods,verbs=patch +// +kubebuilder:rbac:groups="",resources="pods",verbs={get} +// +kubebuilder:rbac:groups="",resources="pods",verbs={list,watch} +// +kubebuilder:rbac:groups="",resources="pods",verbs={patch} // TODO(cbandy): Separate these so that one can choose ConfigMap over Endpoints. // When using Endpoints for DCS, "create", "list", "patch", and "watch" are // required. Include "get" for good measure. The `patronictl scaffold` and // `patronictl remove` commands require "deletecollection". -// +kubebuilder:rbac:namespace=patroni,groups="",resources=endpoints,verbs=get -// +kubebuilder:rbac:namespace=patroni,groups="",resources=endpoints,verbs=create;deletecollection -// +kubebuilder:rbac:namespace=patroni,groups="",resources=endpoints,verbs=list;watch -// +kubebuilder:rbac:namespace=patroni,groups="",resources=endpoints,verbs=patch -// +kubebuilder:rbac:namespace=patroni,groups="",resources=services,verbs=create +// +kubebuilder:rbac:groups="",resources="endpoints",verbs={get} +// +kubebuilder:rbac:groups="",resources="endpoints",verbs={create,deletecollection} +// +kubebuilder:rbac:groups="",resources="endpoints",verbs={list,watch} +// +kubebuilder:rbac:groups="",resources="endpoints",verbs={patch} +// +kubebuilder:rbac:groups="",resources="services",verbs={create} // The OpenShift RestrictedEndpointsAdmission plugin requires special // authorization to create Endpoints that contain Pod IPs. // - https://github.com/openshift/origin/pull/9383 -// +kubebuilder:rbac:namespace=patroni,groups="",resources=endpoints/restricted,verbs=create +// +kubebuilder:rbac:groups="",resources="endpoints/restricted",verbs={create} // Permissions returns the RBAC rules Patroni needs for cluster. func Permissions(cluster *v1beta1.PostgresCluster) []rbacv1.PolicyRule { diff --git a/internal/patroni/rbac_test.go b/internal/patroni/rbac_test.go index e6c426d1bd..39a8dff245 100644 --- a/internal/patroni/rbac_test.go +++ b/internal/patroni/rbac_test.go @@ -1,26 +1,15 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package patroni import ( - "strings" "testing" "gotest.tools/v3/assert" + "github.com/crunchydata/postgres-operator/internal/testing/cmp" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -49,7 +38,7 @@ func TestPermissions(t *testing.T) { assert.Assert(t, isUniqueAndSorted(rule.Verbs), "got %q", rule.Verbs) } - assert.Assert(t, marshalEquals(permissions, strings.Trim(` + assert.Assert(t, cmp.MarshalMatches(permissions, ` - apiGroups: - "" resources: @@ -76,7 +65,7 @@ func TestPermissions(t *testing.T) { - services verbs: - create - `, "\t\n")+"\n")) + `)) }) t.Run("OpenShift", func(t *testing.T) { @@ -90,7 +79,7 @@ func TestPermissions(t *testing.T) { assert.Assert(t, isUniqueAndSorted(rule.Verbs), "got %q", rule.Verbs) } - assert.Assert(t, marshalEquals(permissions, strings.Trim(` + assert.Assert(t, cmp.MarshalMatches(permissions, ` - apiGroups: - "" resources: @@ -123,6 +112,6 @@ func TestPermissions(t *testing.T) { - services verbs: - create - `, "\t\n")+"\n")) + `)) }) } diff --git a/internal/patroni/reconcile.go b/internal/patroni/reconcile.go index 024cc62df9..4fbb08b67d 100644 --- a/internal/patroni/reconcile.go +++ b/internal/patroni/reconcile.go @@ -1,30 +1,17 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package patroni import ( "context" - "fmt" "strings" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" - "github.com/crunchydata/postgres-operator/internal/config" "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/internal/pgbackrest" @@ -36,8 +23,7 @@ import ( // ClusterBootstrapped returns a bool indicating whether or not Patroni has successfully // bootstrapped the PostgresCluster func ClusterBootstrapped(postgresCluster *v1beta1.PostgresCluster) bool { - return (postgresCluster.Status.Patroni != nil && - postgresCluster.Status.Patroni.SystemIdentifier != "") + return postgresCluster.Status.Patroni.SystemIdentifier != "" } // ClusterConfigMap populates the shared ConfigMap with fields needed to run Patroni. @@ -45,11 +31,11 @@ func ClusterConfigMap(ctx context.Context, inCluster *v1beta1.PostgresCluster, inHBAs postgres.HBAs, inParameters postgres.Parameters, - outClusterConfigMap *v1.ConfigMap, + outClusterConfigMap *corev1.ConfigMap, ) error { var err error - initialize.StringMap(&outClusterConfigMap.Data) + initialize.Map(&outClusterConfigMap.Data) outClusterConfigMap.Data[configMapFileKey], err = clusterYAML(inCluster, inHBAs, inParameters) @@ -61,11 +47,11 @@ func ClusterConfigMap(ctx context.Context, func InstanceConfigMap(ctx context.Context, inCluster *v1beta1.PostgresCluster, inInstanceSpec *v1beta1.PostgresInstanceSetSpec, - outInstanceConfigMap *v1.ConfigMap, + outInstanceConfigMap *corev1.ConfigMap, ) error { var err error - initialize.StringMap(&outInstanceConfigMap.Data) + initialize.Map(&outInstanceConfigMap.Data) command := pgbackrest.ReplicaCreateCommand(inCluster, inInstanceSpec) @@ -77,33 +63,32 @@ func InstanceConfigMap(ctx context.Context, // InstanceCertificates populates the shared Secret with certificates needed to run Patroni. func InstanceCertificates(ctx context.Context, - inRoot *pki.Certificate, inDNS *pki.Certificate, - inDNSKey *pki.PrivateKey, outInstanceCertificates *v1.Secret, + inRoot pki.Certificate, inDNS pki.Certificate, + inDNSKey pki.PrivateKey, outInstanceCertificates *corev1.Secret, ) error { - initialize.ByteMap(&outInstanceCertificates.Data) + initialize.Map(&outInstanceCertificates.Data) var err error - outInstanceCertificates.Data[certAuthorityFileKey], err = - certAuthorities(inRoot) + outInstanceCertificates.Data[certAuthorityFileKey], err = certFile(inRoot) if err == nil { - outInstanceCertificates.Data[certServerFileKey], err = - certFile(inDNSKey, inDNS) + outInstanceCertificates.Data[certServerFileKey], err = certFile(inDNSKey, inDNS) } return err } // InstancePod populates a PodTemplateSpec with the fields needed to run Patroni. +// The database container must already be in the template. func InstancePod(ctx context.Context, inCluster *v1beta1.PostgresCluster, - inClusterConfigMap *v1.ConfigMap, - inClusterPodService *v1.Service, - inPatroniLeaderService *v1.Service, + inClusterConfigMap *corev1.ConfigMap, + inClusterPodService *corev1.Service, + inPatroniLeaderService *corev1.Service, inInstanceSpec *v1beta1.PostgresInstanceSetSpec, - inInstanceCertificates *v1.Secret, - inInstanceConfigMap *v1.ConfigMap, - outInstancePod *v1.PodTemplateSpec, + inInstanceCertificates *corev1.Secret, + inInstanceConfigMap *corev1.ConfigMap, + outInstancePod *corev1.PodTemplateSpec, ) error { initialize.Labels(outInstancePod) @@ -112,30 +97,32 @@ func InstancePod(ctx context.Context, // "kubernetes.labels" settings. outInstancePod.Labels[naming.LabelPatroni] = naming.PatroniScope(inCluster) - container := findOrAppendContainer(&outInstancePod.Spec.Containers, - naming.ContainerDatabase) + var container *corev1.Container + for i := range outInstancePod.Spec.Containers { + if outInstancePod.Spec.Containers[i].Name == naming.ContainerDatabase { + container = &outInstancePod.Spec.Containers[i] + } + } container.Command = []string{"patroni", configDirectory} - container.Env = mergeEnvVars(container.Env, + container.Env = append(container.Env, instanceEnvironment(inCluster, inClusterPodService, inPatroniLeaderService, outInstancePod.Spec.Containers)...) - volume := v1.Volume{Name: "patroni-config"} - volume.Projected = new(v1.ProjectedVolumeSource) + volume := corev1.Volume{Name: "patroni-config"} + volume.Projected = new(corev1.ProjectedVolumeSource) // Add our projections after those specified in the CR. Items later in the // list take precedence over earlier items (that is, last write wins). // - https://kubernetes.io/docs/concepts/storage/volumes/#projected - volume.Projected.Sources = append(append(append( - // TODO(cbandy): User config will come from the spec. - volume.Projected.Sources, []v1.VolumeProjection(nil)...), + volume.Projected.Sources = append(append(volume.Projected.Sources, instanceConfigFiles(inClusterConfigMap, inInstanceConfigMap)...), instanceCertificates(inInstanceCertificates)...) - outInstancePod.Spec.Volumes = mergeVolumes(outInstancePod.Spec.Volumes, volume) + outInstancePod.Spec.Volumes = append(outInstancePod.Spec.Volumes, volume) - container.VolumeMounts = mergeVolumeMounts(container.VolumeMounts, v1.VolumeMount{ + container.VolumeMounts = append(container.VolumeMounts, corev1.VolumeMount{ Name: volume.Name, MountPath: configDirectory, ReadOnly: true, @@ -143,75 +130,11 @@ func InstancePod(ctx context.Context, instanceProbes(inCluster, container) - // Create the sidecar container that handles certificate copying and permission - // setting and the patronictl reload. Use the existing cluster, pod, volume name - // and container env as these are needed for the functions listed. - diffCopyReplicationTLS(inCluster, outInstancePod, volume.Name, container.Env) - return nil } -// diffCopyReplicationTLS, similar to InitCopyReplicationTLS, creates a sidecar -// container that copies the mounted client certificate, key and CA certificate -// files from the /pgconf/tls/replication directory to the /tmp/replication -// directory in order to set proper file permissions. However, this function -// involves a continual loop that checks for changes to the relevant directory -// rather than acting during initialization. As during initialization, this is -// required because the group permission settings applied via the defaultMode -// option are not honored as expected, resulting in incorrect group read -// permissions. -// See https://github.com/kubernetes/kubernetes/issues/57923 -// TODO(tjmoore4): remove this implementation when/if defaultMode permissions are set as -// expected for the mounted volume. -func diffCopyReplicationTLS(postgresCluster *v1beta1.PostgresCluster, - template *v1.PodTemplateSpec, volumeName string, envVar []v1.EnvVar) { - container := findOrAppendContainer(&template.Spec.Containers, - naming.ContainerClientCertCopy) - - container.Command = copyReplicationCerts(naming.PatroniScope(postgresCluster)) - container.Image = config.PostgresContainerImage(postgresCluster) - - container.VolumeMounts = mergeVolumeMounts(container.VolumeMounts, v1.VolumeMount{ - Name: volumeName, - MountPath: configDirectory, - ReadOnly: true, - }) - - container.SecurityContext = initialize.RestrictedSecurityContext() - - container.Env = envVar -} - -// copyReplicationCerts copies the replication certificates and key from the -// mounted directory to 'tmp', sets the proper permissions, and performs a -// Patroni reload whenever a change in the directory is detected -// TODO(tjmoore4): The use of 'patronictl reload' can likely be replaced -// with a signal. This may allow for removing the loaded Patroni config -// from the sidecar. -func copyReplicationCerts(patroniScope string) []string { - script := fmt.Sprintf(` -declare -r mountDir=%s -declare -r tmpDir=%s -while sleep 5s; do - mkdir -p %s - DIFF=$(diff ${mountDir} ${tmpDir}) - if [ "$DIFF" != "" ] - then - date - echo Copying replication certificates and key and setting permissions - install -m 0600 ${mountDir}/{%s,%s,%s} ${tmpDir} - patronictl reload %s --force - fi -done -`, naming.CertMountPath+naming.ReplicationDirectory, naming.ReplicationTmp, - naming.ReplicationTmp, naming.ReplicationCert, - naming.ReplicationPrivateKey, naming.ReplicationCACert, patroniScope) - - return []string{"bash", "-c", script} -} - // instanceProbes adds Patroni liveness and readiness probes to container. -func instanceProbes(cluster *v1beta1.PostgresCluster, container *v1.Container) { +func instanceProbes(cluster *v1beta1.PostgresCluster, container *corev1.Container) { // Patroni uses a watchdog to ensure that PostgreSQL does not accept commits // after the leader lock expires, even if Patroni becomes unresponsive. @@ -227,10 +150,10 @@ func instanceProbes(cluster *v1beta1.PostgresCluster, container *v1.Container) { // TODO(cbandy): Consider if a PreStop hook is necessary. container.LivenessProbe = probeTiming(cluster.Spec.Patroni) container.LivenessProbe.InitialDelaySeconds = 3 - container.LivenessProbe.HTTPGet = &v1.HTTPGetAction{ + container.LivenessProbe.HTTPGet = &corev1.HTTPGetAction{ Path: "/liveness", Port: intstr.FromInt(int(*cluster.Spec.Patroni.Port)), - Scheme: v1.URISchemeHTTPS, + Scheme: corev1.URISchemeHTTPS, } // Readiness is reflected in the controlling object's status (e.g. ReadyReplicas) @@ -240,11 +163,29 @@ func instanceProbes(cluster *v1beta1.PostgresCluster, container *v1.Container) { // of the leader Pod in the leader Service. container.ReadinessProbe = probeTiming(cluster.Spec.Patroni) container.ReadinessProbe.InitialDelaySeconds = 3 - container.ReadinessProbe.HTTPGet = &v1.HTTPGetAction{ + container.ReadinessProbe.HTTPGet = &corev1.HTTPGetAction{ Path: "/readiness", Port: intstr.FromInt(int(*cluster.Spec.Patroni.Port)), - Scheme: v1.URISchemeHTTPS, + Scheme: corev1.URISchemeHTTPS, + } +} + +// PodIsPrimary returns whether or not pod is currently acting as the leader with +// the "master" role. This role will be called "primary" in the future, see: +// - https://github.com/zalando/patroni/blob/master/docs/releases.rst?plain=1#L213 +func PodIsPrimary(pod metav1.Object) bool { + if pod == nil { + return false } + + // TODO(cbandy): This works only when using Kubernetes for DCS. + + // - https://github.com/zalando/patroni/blob/v3.1.1/patroni/ha.py#L296 + // - https://github.com/zalando/patroni/blob/v3.1.1/patroni/ha.py#L583 + // - https://github.com/zalando/patroni/blob/v3.1.1/patroni/ha.py#L782 + // - https://github.com/zalando/patroni/blob/v3.1.1/patroni/ha.py#L1574 + status := pod.GetAnnotations()["status"] + return strings.Contains(status, `"role":"master"`) } // PodIsStandbyLeader returns whether or not pod is currently acting as a "standby_leader". @@ -253,9 +194,27 @@ func PodIsStandbyLeader(pod metav1.Object) bool { return false } + // TODO(cbandy): This works only when using Kubernetes for DCS. + // - https://github.com/zalando/patroni/blob/v2.0.2/patroni/ha.py#L190 // - https://github.com/zalando/patroni/blob/v2.0.2/patroni/ha.py#L294 // - https://github.com/zalando/patroni/blob/v2.0.2/patroni/ha.py#L353 status := pod.GetAnnotations()["status"] return strings.Contains(status, `"role":"standby_leader"`) } + +// PodRequiresRestart returns whether or not PostgreSQL inside pod has (pending) +// parameter changes that require a PostgreSQL restart. +func PodRequiresRestart(pod metav1.Object) bool { + if pod == nil { + return false + } + + // TODO(cbandy): This works only when using Kubernetes for DCS. + + // - https://github.com/zalando/patroni/blob/v2.1.1/patroni/ha.py#L198 + // - https://github.com/zalando/patroni/blob/v2.1.1/patroni/postgresql/config.py#L977 + // - https://github.com/zalando/patroni/blob/v2.1.1/patroni/postgresql/config.py#L1007 + status := pod.GetAnnotations()["status"] + return strings.Contains(status, `"pending_restart":true`) +} diff --git a/internal/patroni/reconcile_test.go b/internal/patroni/reconcile_test.go index 9fcf06bc1f..5d2a2c0ad5 100644 --- a/internal/patroni/reconcile_test.go +++ b/internal/patroni/reconcile_test.go @@ -1,32 +1,21 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package patroni import ( "context" - "strings" "testing" "gotest.tools/v3/assert" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/internal/pki" "github.com/crunchydata/postgres-operator/internal/postgres" + "github.com/crunchydata/postgres-operator/internal/testing/cmp" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -39,7 +28,7 @@ func TestClusterConfigMap(t *testing.T) { pgParameters := postgres.Parameters{} cluster.Default() - config := new(v1.ConfigMap) + config := new(corev1.ConfigMap) assert.NilError(t, ClusterConfigMap(ctx, cluster, pgHBAs, pgParameters, config)) // The output of clusterYAML should go into config. @@ -55,28 +44,50 @@ func TestClusterConfigMap(t *testing.T) { func TestReconcileInstanceCertificates(t *testing.T) { t.Parallel() - root := pki.NewRootCertificateAuthority() - assert.NilError(t, root.Generate(), "bug in test") - - leaf := pki.NewLeafCertificate("any", nil, nil) - assert.NilError(t, leaf.Generate(root), "bug in test") + root, err := pki.NewRootCertificateAuthority() + assert.NilError(t, err, "bug in test") + + leaf, err := root.GenerateLeafCertificate("any", nil) + assert.NilError(t, err, "bug in test") + + dataCA, _ := certFile(root.Certificate) + assert.Assert(t, + cmp.Regexp(`^`+ + `-----BEGIN CERTIFICATE-----\n`+ + `([^-]+\n)+`+ + `-----END CERTIFICATE-----\n`+ + `$`, string(dataCA), + ), + "expected a PEM-encoded certificate bundle") + + dataCert, _ := certFile(leaf.PrivateKey, leaf.Certificate) + assert.Assert(t, + cmp.Regexp(`^`+ + `-----BEGIN [^ ]+ PRIVATE KEY-----\n`+ + `([^-]+\n)+`+ + `-----END [^ ]+ PRIVATE KEY-----\n`+ + `-----BEGIN CERTIFICATE-----\n`+ + `([^-]+\n)+`+ + `-----END CERTIFICATE-----\n`+ + `$`, string(dataCert), + ), + // - https://docs.python.org/3/library/ssl.html#combined-key-and-certificate + // - https://docs.python.org/3/library/ssl.html#certificate-chains + "expected a PEM-encoded key followed by the certificate") ctx := context.Background() - secret := new(v1.Secret) - cert := leaf.Certificate - key := leaf.PrivateKey + secret := new(corev1.Secret) - dataCA, _ := certAuthorities(root.Certificate) - dataCert, _ := certFile(key, cert) - - assert.NilError(t, InstanceCertificates(ctx, root.Certificate, cert, key, secret)) + assert.NilError(t, InstanceCertificates(ctx, + root.Certificate, leaf.Certificate, leaf.PrivateKey, secret)) assert.DeepEqual(t, secret.Data["patroni.ca-roots"], dataCA) assert.DeepEqual(t, secret.Data["patroni.crt-combined"], dataCert) // No change when called again. before := secret.DeepCopy() - assert.NilError(t, InstanceCertificates(ctx, root.Certificate, cert, key, secret)) + assert.NilError(t, InstanceCertificates(ctx, + root.Certificate, leaf.Certificate, leaf.PrivateKey, secret)) assert.DeepEqual(t, secret, before) } @@ -86,7 +97,7 @@ func TestInstanceConfigMap(t *testing.T) { ctx := context.Background() cluster := new(v1beta1.PostgresCluster) instance := new(v1beta1.PostgresInstanceSetSpec) - config := new(v1.ConfigMap) + config := new(corev1.ConfigMap) data, _ := instanceYAML(cluster, instance, nil) assert.NilError(t, InstanceConfigMap(ctx, cluster, instance, config)) @@ -106,18 +117,21 @@ func TestInstancePod(t *testing.T) { cluster.Default() cluster.Name = "some-such" cluster.Spec.PostgresVersion = 11 - clusterConfigMap := new(v1.ConfigMap) - clusterPodService := new(v1.Service) - instanceCertficates := new(v1.Secret) - instanceConfigMap := new(v1.ConfigMap) + cluster.Spec.Image = "image" + cluster.Spec.ImagePullPolicy = corev1.PullAlways + clusterConfigMap := new(corev1.ConfigMap) + clusterPodService := new(corev1.Service) + instanceCertificates := new(corev1.Secret) + instanceConfigMap := new(corev1.ConfigMap) instanceSpec := new(v1beta1.PostgresInstanceSetSpec) - patroniLeaderService := new(v1.Service) - template := new(v1.PodTemplateSpec) + patroniLeaderService := new(corev1.Service) + template := new(corev1.PodTemplateSpec) + template.Spec.Containers = []corev1.Container{{Name: "database"}} call := func() error { return InstancePod(context.Background(), cluster, clusterConfigMap, clusterPodService, patroniLeaderService, - instanceSpec, instanceCertficates, instanceConfigMap, template) + instanceSpec, instanceCertificates, instanceConfigMap, template) } assert.NilError(t, call()) @@ -126,7 +140,7 @@ func TestInstancePod(t *testing.T) { Labels: map[string]string{naming.LabelPatroni: "some-such-ha"}, }) - assert.Assert(t, marshalEquals(template.Spec, strings.TrimSpace(` + assert.Assert(t, cmp.MarshalMatches(template.Spec, ` containers: - command: - patroni @@ -185,63 +199,6 @@ containers: - mountPath: /etc/patroni name: patroni-config readOnly: true -- command: - - bash - - -c - - |2 - - declare -r mountDir=/pgconf/tls/replication - declare -r tmpDir=/tmp/replication - while sleep 5s; do - mkdir -p /tmp/replication - DIFF=$(diff ${mountDir} ${tmpDir}) - if [ "$DIFF" != "" ] - then - date - echo Copying replication certificates and key and setting permissions - install -m 0600 ${mountDir}/{tls.crt,tls.key,ca.crt} ${tmpDir} - patronictl reload some-such-ha --force - fi - done - env: - - name: PATRONI_NAME - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: metadata.name - - name: PATRONI_KUBERNETES_POD_IP - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: status.podIP - - name: PATRONI_KUBERNETES_PORTS - value: | - [] - - name: PATRONI_POSTGRESQL_CONNECT_ADDRESS - value: $(PATRONI_NAME).:5432 - - name: PATRONI_POSTGRESQL_LISTEN - value: '*:5432' - - name: PATRONI_POSTGRESQL_CONFIG_DIR - value: /pgdata/pg11 - - name: PATRONI_POSTGRESQL_DATA_DIR - value: /pgdata/pg11 - - name: PATRONI_RESTAPI_CONNECT_ADDRESS - value: $(PATRONI_NAME).:8008 - - name: PATRONI_RESTAPI_LISTEN - value: '*:8008' - - name: PATRONICTL_CONFIG_FILE - value: /etc/patroni - name: replication-cert-copy - resources: {} - securityContext: - allowPrivilegeEscalation: false - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: true - volumeMounts: - - mountPath: /etc/patroni - name: patroni-config - readOnly: true volumes: - name: patroni-config projected: @@ -260,132 +217,32 @@ volumes: path: ~postgres-operator/patroni.ca-roots - key: patroni.crt-combined path: ~postgres-operator/patroni.crt+key -`)+"\n")) + `)) +} - // No change when called again. - before := template.DeepCopy() - assert.NilError(t, call()) - assert.DeepEqual(t, template, before) - - t.Run("ExistingEnvironment", func(t *testing.T) { - // test the env changes are made to both the database - // and sidecar container as the sidecar env vars will be - // updated to match - for i := range template.Spec.Containers { - template.Spec.Containers[i].Env = []v1.EnvVar{ - {Name: "existed"}, - {Name: "PATRONI_KUBERNETES_POD_IP"}, - {Name: "also", Value: "kept"}, - } - - assert.NilError(t, call()) - - // Correct values are there and in order. - assert.Assert(t, marshalContains(template.Spec.Containers[i].Env, - strings.TrimSpace(` -- name: PATRONI_NAME - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: metadata.name -- name: PATRONI_KUBERNETES_POD_IP - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: status.podIP - `)+"\n")) - - // Existing values are there and in the original order. - assert.Assert(t, marshalContains(template.Spec.Containers[i].Env, - strings.TrimSpace(` -- name: existed -- name: also - value: kept - `)+"\n")) - - // Correct values can be in the middle somewhere. - // Use a merge so a duplicate is not added. - template.Spec.Containers[i].Env = mergeEnvVars(template.Spec.Containers[i].Env, - v1.EnvVar{Name: "at", Value: "end"}) - } - // No change when already correct. - before := template.DeepCopy() - assert.NilError(t, call()) - assert.DeepEqual(t, template, before) - }) +func TestPodIsPrimary(t *testing.T) { + // No object + assert.Assert(t, !PodIsPrimary(nil)) - t.Run("ExistingVolumes", func(t *testing.T) { - template.Spec.Volumes = []v1.Volume{ - {Name: "existing"}, - {Name: "patroni-config", VolumeSource: v1.VolumeSource{ - EmptyDir: &v1.EmptyDirVolumeSource{Medium: "Memory"}, - }}, - } + // No annotations + pod := &corev1.Pod{} + assert.Assert(t, !PodIsPrimary(pod)) - assert.NilError(t, call()) + // No role + pod.Annotations = map[string]string{"status": `{}`} + assert.Assert(t, !PodIsPrimary(pod)) - // Correct values are there. - assert.Assert(t, marshalContains(template.Spec.Volumes, - strings.TrimSpace(` -- name: patroni-config - projected: - sources: - - configMap: - items: - - key: patroni.yaml - `)+"\n")) - - // Existing values are there. - assert.Assert(t, marshalContains(template.Spec.Volumes, - strings.TrimSpace(` -- name: existing - `)+"\n")) - - // Correct values can be in the middle somewhere. - template.Spec.Volumes = append(template.Spec.Volumes, - v1.Volume{Name: "later"}) - - // No change when already correct. - before := template.DeepCopy() - assert.NilError(t, call()) - assert.DeepEqual(t, template, before) - }) + // Replica + pod.Annotations["status"] = `{"role":"replica"}` + assert.Assert(t, !PodIsPrimary(pod)) - t.Run("ExistingVolumeMounts", func(t *testing.T) { - // run the volume mount tests for all containers in pod - for i := range template.Spec.Containers { - template.Spec.Containers[i].VolumeMounts = []v1.VolumeMount{ - {Name: "existing", MountPath: "mount"}, - {Name: "patroni-config", MountPath: "wrong"}, - } - - assert.NilError(t, call()) - - // Correct values are there. - assert.Assert(t, marshalContains(template.Spec.Containers[i].VolumeMounts, - strings.TrimSpace(` -- mountPath: /etc/patroni - name: patroni-config - readOnly: true - `)+"\n")) - - // Existing values are there. - assert.Assert(t, marshalContains(template.Spec.Containers[i].VolumeMounts, - strings.TrimSpace(` -- mountPath: mount - name: existing - `)+"\n")) - - // Correct values can be in the middle somewhere. - template.Spec.Containers[i].VolumeMounts = append( - template.Spec.Containers[i].VolumeMounts, v1.VolumeMount{Name: "later"}) - } - - // No change when already correct. - before := template.DeepCopy() - assert.NilError(t, call()) - assert.DeepEqual(t, template, before) - }) + // Standby leader + pod.Annotations["status"] = `{"role":"standby_leader"}` + assert.Assert(t, !PodIsPrimary(pod)) + + // Primary + pod.Annotations["status"] = `{"role":"master"}` + assert.Assert(t, PodIsPrimary(pod)) } func TestPodIsStandbyLeader(t *testing.T) { @@ -393,7 +250,7 @@ func TestPodIsStandbyLeader(t *testing.T) { assert.Assert(t, !PodIsStandbyLeader(nil)) // No annotations - pod := &v1.Pod{} + pod := &corev1.Pod{} assert.Assert(t, !PodIsStandbyLeader(pod)) // No role @@ -412,3 +269,24 @@ func TestPodIsStandbyLeader(t *testing.T) { pod.Annotations["status"] = `{"role":"standby_leader"}` assert.Assert(t, PodIsStandbyLeader(pod)) } + +func TestPodRequiresRestart(t *testing.T) { + // No object + assert.Assert(t, !PodRequiresRestart(nil)) + + // No annotations + pod := &corev1.Pod{} + assert.Assert(t, !PodRequiresRestart(pod)) + + // Normal; no flag + pod.Annotations = map[string]string{"status": `{}`} + assert.Assert(t, !PodRequiresRestart(pod)) + + // Unexpected value + pod.Annotations["status"] = `{"pending_restart":"mystery"}` + assert.Assert(t, !PodRequiresRestart(pod)) + + // Expected value + pod.Annotations["status"] = `{"pending_restart":true}` + assert.Assert(t, PodRequiresRestart(pod)) +} diff --git a/internal/pgadmin/config.go b/internal/pgadmin/config.go new file mode 100644 index 0000000000..553a90f656 --- /dev/null +++ b/internal/pgadmin/config.go @@ -0,0 +1,173 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package pgadmin + +import ( + "strings" + + corev1 "k8s.io/api/core/v1" + + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +const ( + // tmp volume to hold the nss_wrapper, process and socket files + // both the '/tmp' mount path and '/etc/httpd/run' mount path + // mount the 'tmp' volume + tmpVolume = "tmp" + + // runMountPath holds the pgAdmin run path, which mounts the 'tmp' volume + runMountPath = "/etc/httpd/run" + + // log volume and path where the pgadmin4.log is located + logVolume = "pgadmin-log" + logMountPath = "/var/log/pgadmin" + + // data volume and path to hold persistent pgAdmin data + dataVolume = "pgadmin-data" + dataMountPath = "/var/lib/pgadmin" + + // ldapPasswordPath is the path for mounting the LDAP Bind Password + ldapPasswordPath = "~postgres-operator/ldap-bind-password" /* #nosec */ + ldapPasswordAbsolutePath = configMountPath + "/" + ldapPasswordPath + + // TODO(tjmoore4): The login and password implementation will be updated in + // upcoming enhancement work. + + // initial pgAdmin login email address + loginEmail = "admin" + + // initial pgAdmin login password + loginPassword = "admin" + + // default pgAdmin port + pgAdminPort = 5050 + + // configMountPath is where to mount configuration files, secrets, etc. + configMountPath = "/etc/pgadmin/conf.d" + + settingsAbsolutePath = configMountPath + "/" + settingsProjectionPath + settingsConfigMapKey = "pgadmin-settings.json" + settingsProjectionPath = "~postgres-operator/pgadmin.json" + + // startupMountPath is where to mount a temporary directory that is only + // writable during Pod initialization. + // + // NOTE: No ConfigMap nor Secret should ever be mounted here because they + // could be used to inject code through "config_system.py". + startupMountPath = "/etc/pgadmin" + + // configSystemAbsolutePath is imported by pgAdmin after all other config files. + // - https://github.com/pgadmin-org/pgadmin4/blob/REL-4_30/docs/en_US/config_py.rst + configSystemAbsolutePath = startupMountPath + "/config_system.py" +) + +// podConfigFiles returns projections of pgAdmin's configuration files to +// include in the configuration volume. +func podConfigFiles(configmap *corev1.ConfigMap, spec v1beta1.PGAdminPodSpec) []corev1.VolumeProjection { + config := append(append([]corev1.VolumeProjection{}, spec.Config.Files...), + []corev1.VolumeProjection{ + { + ConfigMap: &corev1.ConfigMapProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: configmap.Name, + }, + Items: []corev1.KeyToPath{ + { + Key: settingsConfigMapKey, + Path: settingsProjectionPath, + }, + }, + }, + }, + }...) + + // To enable LDAP authentication for pgAdmin, various LDAP settings must be configured. + // While most of the required configuration can be set using the 'settings' + // feature on the spec (.Spec.UserInterface.PGAdmin.Config.Settings), those + // values are stored in a ConfigMap in plaintext. + // As a special case, here we mount a provided Secret containing the LDAP_BIND_PASSWORD + // for use with the other pgAdmin LDAP configuration. + // - https://www.pgadmin.org/docs/pgadmin4/latest/config_py.html + // - https://www.pgadmin.org/docs/pgadmin4/development/enabling_ldap_authentication.html + if spec.Config.LDAPBindPassword != nil { + config = append(config, corev1.VolumeProjection{ + Secret: &corev1.SecretProjection{ + LocalObjectReference: spec.Config.LDAPBindPassword.LocalObjectReference, + Optional: spec.Config.LDAPBindPassword.Optional, + Items: []corev1.KeyToPath{ + { + Key: spec.Config.LDAPBindPassword.Key, + Path: ldapPasswordPath, + }, + }, + }, + }) + } + + return config +} + +// startupCommand returns an entrypoint that prepares the filesystem for pgAdmin. +func startupCommand() []string { + // pgAdmin reads from the following file by importing its public names. + // Make sure to assign only to variables that begin with underscore U+005F. + // - https://github.com/pgadmin-org/pgadmin4/blob/REL-4_30/web/config.py#L669 + // - https://docs.python.org/3/reference/simple_stmts.html#import + // + // DEFAULT_BINARY_PATHS contains the paths to various client tools. The "pg" + // key is for PostgreSQL. Use the latest version found in "/usr" or fallback + // to the default of empty string. + // - https://github.com/pgadmin-org/pgadmin4/blob/REL-4_30/web/config.py#L415 + // + // Python 3.6.8 (default, Sep 10 2021, 09:13:53) + // >>> sorted(['']+[]).pop() + // '' + // >>> sorted(['']+['/pg13','/pg10']).pop() + // '/pg13' + // + // Set all remaining variables from the JSON in settingsAbsolutePath. All + // pgAdmin settings are uppercase with underscores, so ignore any keys/names + // that are not. + // + // Lastly, set pgAdmin's LDAP_BIND_PASSWORD setting, if the value was provided + // via Secret. As this assignment happens after any values provided via the + // 'Settings' ConfigMap loaded above, this value will overwrite any previous + // configuration of LDAP_BIND_PASSWORD (that is, last write wins). + const configSystem = ` +import glob, json, re, os +DEFAULT_BINARY_PATHS = {'pg': sorted([''] + glob.glob('/usr/pgsql-*/bin')).pop()} +with open('` + settingsAbsolutePath + `') as _f: + _conf, _data = re.compile(r'[A-Z_0-9]+'), json.load(_f) + if type(_data) is dict: + globals().update({k: v for k, v in _data.items() if _conf.fullmatch(k)}) +if os.path.isfile('` + ldapPasswordAbsolutePath + `'): + with open('` + ldapPasswordAbsolutePath + `') as _f: + LDAP_BIND_PASSWORD = _f.read() +` + + args := []string{strings.TrimLeft(configSystem, "\n")} + + script := strings.Join([]string{ + // Write the system configuration into a read-only file. + `(umask a-w && echo "$1" > ` + configSystemAbsolutePath + `)`, + }, "\n") + + return append([]string{"bash", "-ceu", "--", script, "startup"}, args...) +} + +// systemSettings returns pgAdmin settings as a value that can be marshaled to JSON. +func systemSettings(spec *v1beta1.PGAdminPodSpec) map[string]interface{} { + settings := *spec.Config.Settings.DeepCopy() + if settings == nil { + settings = make(map[string]interface{}) + } + + // SERVER_MODE must always be enabled when running on a webserver. + // - https://github.com/pgadmin-org/pgadmin4/blob/REL-4_30/web/config.py#L105 + settings["SERVER_MODE"] = true + + return settings +} diff --git a/internal/pgadmin/config_test.go b/internal/pgadmin/config_test.go new file mode 100644 index 0000000000..87cd7847c2 --- /dev/null +++ b/internal/pgadmin/config_test.go @@ -0,0 +1,119 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package pgadmin + +import ( + "os" + "os/exec" + "path/filepath" + "testing" + + "gotest.tools/v3/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/crunchydata/postgres-operator/internal/testing/cmp" + "github.com/crunchydata/postgres-operator/internal/testing/require" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +func TestPodConfigFiles(t *testing.T) { + configmap := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "some-cm"}} + + spec := v1beta1.PGAdminPodSpec{ + Config: v1beta1.PGAdminConfiguration{Files: []corev1.VolumeProjection{{ + Secret: &corev1.SecretProjection{LocalObjectReference: corev1.LocalObjectReference{ + Name: "test-secret", + }}, + }, { + ConfigMap: &corev1.ConfigMapProjection{LocalObjectReference: corev1.LocalObjectReference{ + Name: "test-cm", + }}, + }}}, + } + + projections := podConfigFiles(configmap, spec) + assert.Assert(t, cmp.MarshalMatches(projections, ` +- secret: + name: test-secret +- configMap: + name: test-cm +- configMap: + items: + - key: pgadmin-settings.json + path: ~postgres-operator/pgadmin.json + name: some-cm + `)) +} + +func TestStartupCommand(t *testing.T) { + assert.Assert(t, cmp.MarshalMatches(startupCommand(), ` +- bash +- -ceu +- -- +- (umask a-w && echo "$1" > /etc/pgadmin/config_system.py) +- startup +- | + import glob, json, re, os + DEFAULT_BINARY_PATHS = {'pg': sorted([''] + glob.glob('/usr/pgsql-*/bin')).pop()} + with open('/etc/pgadmin/conf.d/~postgres-operator/pgadmin.json') as _f: + _conf, _data = re.compile(r'[A-Z_0-9]+'), json.load(_f) + if type(_data) is dict: + globals().update({k: v for k, v in _data.items() if _conf.fullmatch(k)}) + if os.path.isfile('/etc/pgadmin/conf.d/~postgres-operator/ldap-bind-password'): + with open('/etc/pgadmin/conf.d/~postgres-operator/ldap-bind-password') as _f: + LDAP_BIND_PASSWORD = _f.read() +`)) + + t.Run("ShellCheck", func(t *testing.T) { + command := startupCommand() + shellcheck := require.ShellCheck(t) + + assert.Assert(t, len(command) > 3) + dir := t.TempDir() + file := filepath.Join(dir, "script.bash") + assert.NilError(t, os.WriteFile(file, []byte(command[3]), 0o600)) + + // Expect shellcheck to be happy. + cmd := exec.Command(shellcheck, "--enable=all", file) + output, err := cmd.CombinedOutput() + assert.NilError(t, err, "%q\n%s", cmd.Args, output) + }) + + t.Run("ConfigSystemFlake8", func(t *testing.T) { + command := startupCommand() + flake8 := require.Flake8(t) + + assert.Assert(t, len(command) > 5) + dir := t.TempDir() + file := filepath.Join(dir, "script.py") + assert.NilError(t, os.WriteFile(file, []byte(command[5]), 0o600)) + + // Expect flake8 to be happy. Ignore "E401 multiple imports on one line" + // in addition to the defaults. The file contents appear in PodSpec, so + // allow lines longer than the default to save some vertical space. + cmd := exec.Command(flake8, "--extend-ignore=E401", "--max-line-length=99", file) + output, err := cmd.CombinedOutput() + assert.NilError(t, err, "%q\n%s", cmd.Args, output) + }) +} + +func TestSystemSettings(t *testing.T) { + spec := new(v1beta1.PGAdminPodSpec) + assert.Assert(t, cmp.MarshalMatches(systemSettings(spec), ` +SERVER_MODE: true + `)) + + spec.Config.Settings = map[string]interface{}{ + "ALLOWED_HOSTS": []interface{}{"225.0.0.0/8", "226.0.0.0/7", "228.0.0.0/6"}, + } + assert.Assert(t, cmp.MarshalMatches(systemSettings(spec), ` +ALLOWED_HOSTS: +- 225.0.0.0/8 +- 226.0.0.0/7 +- 228.0.0.0/6 +SERVER_MODE: true + `)) +} diff --git a/internal/pgadmin/reconcile.go b/internal/pgadmin/reconcile.go new file mode 100644 index 0000000000..af62c482f2 --- /dev/null +++ b/internal/pgadmin/reconcile.go @@ -0,0 +1,301 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package pgadmin + +import ( + "bytes" + "encoding/json" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/util/intstr" + + "github.com/crunchydata/postgres-operator/internal/config" + "github.com/crunchydata/postgres-operator/internal/initialize" + "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +// startupScript is the script for the configuration and startup of the pgAdmin service. +// It is based on the start-pgadmin4.sh script from the Crunchy Containers Project. +// Any required functions from common_lib.sh are added as required. +// - https://github.com/CrunchyData/crunchy-containers/blob/master/bin/pgadmin4/start-pgadmin4.sh +// - https://github.com/CrunchyData/crunchy-containers/blob/master/bin/common/common_lib.sh +const startupScript = `CRUNCHY_DIR=${CRUNCHY_DIR:-'/opt/crunchy'} +PGADMIN_DIR=/usr/lib/python3.6/site-packages/pgadmin4-web +APACHE_PIDFILE='/tmp/httpd.pid' +export PATH=$PATH:/usr/pgsql-*/bin + +RED="\033[0;31m" +GREEN="\033[0;32m" +RESET="\033[0m" + +function enable_debugging() { + if [[ ${CRUNCHY_DEBUG:-false} == "true" ]] + then + echo_info "Turning debugging on.." + export PS4='+(${BASH_SOURCE}:${LINENO})> ${FUNCNAME[0]:+${FUNCNAME[0]}(): }' + set -x + fi +} + +function env_check_err() { + if [[ -z ${!1} ]] + then + echo_err "$1 environment variable is not set, aborting." + exit 1 + fi +} + +function echo_info() { + echo -e "${GREEN?}$(date) INFO: ${1?}${RESET?}" +} + +function echo_err() { + echo -e "${RED?}$(date) ERROR: ${1?}${RESET?}" +} + +function err_check { + RC=${1?} + CONTEXT=${2?} + ERROR=${3?} + + if [[ ${RC?} != 0 ]] + then + echo_err "${CONTEXT?}: ${ERROR?}" + exit ${RC?} + fi +} + +function trap_sigterm() { + echo_info "Doing trap logic.." + echo_warn "Clean shutdown of Apache.." + /usr/sbin/httpd -k stop + kill -SIGINT $(head -1 $APACHE_PIDFILE) +} + +enable_debugging +trap 'trap_sigterm' SIGINT SIGTERM + +env_check_err "PGADMIN_SETUP_EMAIL" +env_check_err "PGADMIN_SETUP_PASSWORD" + +if [[ ${ENABLE_TLS:-false} == 'true' ]] +then + echo_info "TLS enabled. Applying https configuration.." + if [[ ( ! -f /certs/server.key ) || ( ! -f /certs/server.crt ) ]] + then + echo_err "ENABLE_TLS true but /certs/server.key or /certs/server.crt not found, aborting" + exit 1 + fi + cp "${CRUNCHY_DIR}/conf/pgadmin-https.conf" /var/lib/pgadmin/pgadmin.conf +else + echo_info "TLS disabled. Applying http configuration.." + cp "${CRUNCHY_DIR}/conf/pgadmin-http.conf" /var/lib/pgadmin/pgadmin.conf +fi + +cp "${CRUNCHY_DIR}/conf/config_local.py" /var/lib/pgadmin/config_local.py + +if [[ -z "${SERVER_PATH}" ]] +then + sed -i "/RedirectMatch/d" /var/lib/pgadmin/pgadmin.conf +fi + +sed -i "s|SERVER_PATH|${SERVER_PATH:-/}|g" /var/lib/pgadmin/pgadmin.conf +sed -i "s|SERVER_PORT|${SERVER_PORT:-5050}|g" /var/lib/pgadmin/pgadmin.conf +sed -i "s/^DEFAULT_SERVER_PORT.*/DEFAULT_SERVER_PORT = ${SERVER_PORT:-5050}/" /var/lib/pgadmin/config_local.py +sed -i "s|\"pg\":.*|\"pg\": \"/usr/pgsql-${PGVERSION?}/bin\",|g" /var/lib/pgadmin/config_local.py + +cd ${PGADMIN_DIR?} + +if [[ ! -f /var/lib/pgadmin/pgadmin4.db ]] +then + echo_info "Setting up pgAdmin4 database.." + python3 setup.py > /tmp/pgadmin4.stdout 2> /tmp/pgadmin4.stderr + err_check "$?" "pgAdmin4 Database Setup" "Could not create pgAdmin4 database: \n$(cat /tmp/pgadmin4.stderr)" +fi + +echo_info "Starting Apache web server.." +/usr/sbin/httpd -D FOREGROUND & +echo $! > $APACHE_PIDFILE + +wait` + +// ConfigMap populates a ConfigMap with the configuration needed to run pgAdmin. +func ConfigMap( + inCluster *v1beta1.PostgresCluster, + outConfigMap *corev1.ConfigMap, +) error { + if inCluster.Spec.UserInterface == nil || inCluster.Spec.UserInterface.PGAdmin == nil { + // pgAdmin is disabled; there is nothing to do. + return nil + } + + initialize.Map(&outConfigMap.Data) + + // To avoid spurious reconciles, the following value must not change when + // the spec does not change. [json.Encoder] and [json.Marshal] do this by + // emitting map keys in sorted order. Indent so the value is not rendered + // as one long line by `kubectl`. + buffer := new(bytes.Buffer) + encoder := json.NewEncoder(buffer) + encoder.SetEscapeHTML(false) + encoder.SetIndent("", " ") + err := encoder.Encode(systemSettings(inCluster.Spec.UserInterface.PGAdmin)) + if err == nil { + outConfigMap.Data[settingsConfigMapKey] = buffer.String() + } + return err +} + +// Pod populates a PodSpec with the container and volumes needed to run pgAdmin. +func Pod( + inCluster *v1beta1.PostgresCluster, + inConfigMap *corev1.ConfigMap, + outPod *corev1.PodSpec, pgAdminVolume *corev1.PersistentVolumeClaim, +) { + if inCluster.Spec.UserInterface == nil || inCluster.Spec.UserInterface.PGAdmin == nil { + // pgAdmin is disabled; there is nothing to do. + return + } + + // create the pgAdmin Pod volumes + tmp := corev1.Volume{Name: tmpVolume} + tmp.EmptyDir = &corev1.EmptyDirVolumeSource{ + Medium: corev1.StorageMediumMemory, + } + + pgAdminLog := corev1.Volume{Name: logVolume} + pgAdminLog.EmptyDir = &corev1.EmptyDirVolumeSource{ + Medium: corev1.StorageMediumMemory, + } + + pgAdminData := corev1.Volume{Name: dataVolume} + pgAdminData.VolumeSource = corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: pgAdminVolume.Name, + ReadOnly: false, + }, + } + + configVolumeMount := corev1.VolumeMount{ + Name: "pgadmin-config", MountPath: configMountPath, ReadOnly: true, + } + configVolume := corev1.Volume{Name: configVolumeMount.Name} + configVolume.Projected = &corev1.ProjectedVolumeSource{ + Sources: podConfigFiles(inConfigMap, *inCluster.Spec.UserInterface.PGAdmin), + } + + startupVolumeMount := corev1.VolumeMount{ + Name: "pgadmin-startup", MountPath: startupMountPath, ReadOnly: true, + } + startupVolume := corev1.Volume{Name: startupVolumeMount.Name} + startupVolume.EmptyDir = &corev1.EmptyDirVolumeSource{ + Medium: corev1.StorageMediumMemory, + + // When this volume is too small, the Pod will be evicted and recreated + // by the StatefulSet controller. + // - https://kubernetes.io/docs/concepts/storage/volumes/#emptydir + // NOTE: tmpfs blocks are PAGE_SIZE, usually 4KiB, and size rounds up. + SizeLimit: resource.NewQuantity(32<<10, resource.BinarySI), + } + + // pgadmin container + container := corev1.Container{ + Name: naming.ContainerPGAdmin, + Env: []corev1.EnvVar{ + { + Name: "PGADMIN_SETUP_EMAIL", + Value: loginEmail, + }, + { + Name: "PGADMIN_SETUP_PASSWORD", + Value: loginPassword, + }, + // Setting the KRB5_CONFIG for kerberos + // - https://web.mit.edu/kerberos/krb5-current/doc/admin/conf_files/krb5_conf.html + { + Name: "KRB5_CONFIG", + Value: configMountPath + "/krb5.conf", + }, + // In testing it was determined that we need to set this env var for the replay cache + // otherwise it defaults to the read-only location `/var/tmp/` + // - https://web.mit.edu/kerberos/krb5-current/doc/basic/rcache_def.html#replay-cache-types + { + Name: "KRB5RCACHEDIR", + Value: "/tmp", + }, + }, + Command: []string{"bash", "-c", startupScript}, + Image: config.PGAdminContainerImage(inCluster), + ImagePullPolicy: inCluster.Spec.ImagePullPolicy, + Resources: inCluster.Spec.UserInterface.PGAdmin.Resources, + + SecurityContext: initialize.RestrictedSecurityContext(), + + Ports: []corev1.ContainerPort{{ + Name: naming.PortPGAdmin, + ContainerPort: int32(pgAdminPort), + Protocol: corev1.ProtocolTCP, + }}, + VolumeMounts: []corev1.VolumeMount{ + startupVolumeMount, + configVolumeMount, + { + Name: tmpVolume, + MountPath: runMountPath, + }, + { + Name: logVolume, + MountPath: logMountPath, + }, + { + Name: dataVolume, + MountPath: dataMountPath, + }, + }, + ReadinessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + TCPSocket: &corev1.TCPSocketAction{ + Port: intstr.FromInt(pgAdminPort), + }, + }, + InitialDelaySeconds: 20, + PeriodSeconds: 10, + }, + LivenessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + TCPSocket: &corev1.TCPSocketAction{ + Port: intstr.FromInt(pgAdminPort), + }, + }, + InitialDelaySeconds: 15, + PeriodSeconds: 20, + }, + } + + startup := corev1.Container{ + Name: naming.ContainerPGAdminStartup, + Command: startupCommand(), + + Image: container.Image, + ImagePullPolicy: container.ImagePullPolicy, + Resources: container.Resources, + SecurityContext: initialize.RestrictedSecurityContext(), + VolumeMounts: []corev1.VolumeMount{ + startupVolumeMount, + configVolumeMount, + }, + } + + // The startup container is the only one allowed to write to the startup volume. + startup.VolumeMounts[0].ReadOnly = false + + outPod.InitContainers = []corev1.Container{startup} + // add all volumes other than 'tmp' as that is added later + outPod.Volumes = []corev1.Volume{pgAdminLog, pgAdminData, configVolume, startupVolume} + + outPod.Containers = []corev1.Container{container} +} diff --git a/internal/pgadmin/reconcile_test.go b/internal/pgadmin/reconcile_test.go new file mode 100644 index 0000000000..f91a9b807f --- /dev/null +++ b/internal/pgadmin/reconcile_test.go @@ -0,0 +1,551 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package pgadmin + +import ( + "testing" + + "gotest.tools/v3/assert" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + + "github.com/crunchydata/postgres-operator/internal/testing/cmp" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +func TestConfigMap(t *testing.T) { + t.Parallel() + + cluster := new(v1beta1.PostgresCluster) + config := new(corev1.ConfigMap) + + t.Run("Disabled", func(t *testing.T) { + before := config.DeepCopy() + assert.NilError(t, ConfigMap(cluster, config)) + + // No change when pgAdmin is not requested in the spec. + assert.DeepEqual(t, before, config) + }) + + t.Run("Defaults", func(t *testing.T) { + cluster.Spec.UserInterface = new(v1beta1.UserInterfaceSpec) + cluster.Spec.UserInterface.PGAdmin = new(v1beta1.PGAdminPodSpec) + cluster.Default() + + assert.NilError(t, ConfigMap(cluster, config)) + + assert.Assert(t, cmp.MarshalMatches(config.Data, ` +pgadmin-settings.json: | + { + "SERVER_MODE": true + } + `)) + }) + + t.Run("Customizations", func(t *testing.T) { + cluster.Spec.UserInterface = new(v1beta1.UserInterfaceSpec) + cluster.Spec.UserInterface.PGAdmin = new(v1beta1.PGAdminPodSpec) + cluster.Spec.UserInterface.PGAdmin.Config.Settings = map[string]interface{}{ + "some": "thing", + "UPPER_CASE": false, + } + cluster.Default() + + assert.NilError(t, ConfigMap(cluster, config)) + + assert.Assert(t, cmp.MarshalMatches(config.Data, ` +pgadmin-settings.json: | + { + "SERVER_MODE": true, + "UPPER_CASE": false, + "some": "thing" + } + `)) + }) +} + +func TestPod(t *testing.T) { + t.Parallel() + + cluster := new(v1beta1.PostgresCluster) + config := new(corev1.ConfigMap) + pod := new(corev1.PodSpec) + pvc := new(corev1.PersistentVolumeClaim) + + call := func() { Pod(cluster, config, pod, pvc) } + + t.Run("Disabled", func(t *testing.T) { + before := pod.DeepCopy() + call() + + // No change when pgAdmin is not requested in the spec. + assert.DeepEqual(t, before, pod) + }) + + t.Run("Defaults", func(t *testing.T) { + cluster.Spec.UserInterface = new(v1beta1.UserInterfaceSpec) + cluster.Spec.UserInterface.PGAdmin = new(v1beta1.PGAdminPodSpec) + cluster.Default() + + call() + + assert.Assert(t, cmp.MarshalMatches(pod, ` +containers: +- command: + - bash + - -c + - |- + CRUNCHY_DIR=${CRUNCHY_DIR:-'/opt/crunchy'} + PGADMIN_DIR=/usr/lib/python3.6/site-packages/pgadmin4-web + APACHE_PIDFILE='/tmp/httpd.pid' + export PATH=$PATH:/usr/pgsql-*/bin + + RED="\033[0;31m" + GREEN="\033[0;32m" + RESET="\033[0m" + + function enable_debugging() { + if [[ ${CRUNCHY_DEBUG:-false} == "true" ]] + then + echo_info "Turning debugging on.." + export PS4='+(${BASH_SOURCE}:${LINENO})> ${FUNCNAME[0]:+${FUNCNAME[0]}(): }' + set -x + fi + } + + function env_check_err() { + if [[ -z ${!1} ]] + then + echo_err "$1 environment variable is not set, aborting." + exit 1 + fi + } + + function echo_info() { + echo -e "${GREEN?}$(date) INFO: ${1?}${RESET?}" + } + + function echo_err() { + echo -e "${RED?}$(date) ERROR: ${1?}${RESET?}" + } + + function err_check { + RC=${1?} + CONTEXT=${2?} + ERROR=${3?} + + if [[ ${RC?} != 0 ]] + then + echo_err "${CONTEXT?}: ${ERROR?}" + exit ${RC?} + fi + } + + function trap_sigterm() { + echo_info "Doing trap logic.." + echo_warn "Clean shutdown of Apache.." + /usr/sbin/httpd -k stop + kill -SIGINT $(head -1 $APACHE_PIDFILE) + } + + enable_debugging + trap 'trap_sigterm' SIGINT SIGTERM + + env_check_err "PGADMIN_SETUP_EMAIL" + env_check_err "PGADMIN_SETUP_PASSWORD" + + if [[ ${ENABLE_TLS:-false} == 'true' ]] + then + echo_info "TLS enabled. Applying https configuration.." + if [[ ( ! -f /certs/server.key ) || ( ! -f /certs/server.crt ) ]] + then + echo_err "ENABLE_TLS true but /certs/server.key or /certs/server.crt not found, aborting" + exit 1 + fi + cp "${CRUNCHY_DIR}/conf/pgadmin-https.conf" /var/lib/pgadmin/pgadmin.conf + else + echo_info "TLS disabled. Applying http configuration.." + cp "${CRUNCHY_DIR}/conf/pgadmin-http.conf" /var/lib/pgadmin/pgadmin.conf + fi + + cp "${CRUNCHY_DIR}/conf/config_local.py" /var/lib/pgadmin/config_local.py + + if [[ -z "${SERVER_PATH}" ]] + then + sed -i "/RedirectMatch/d" /var/lib/pgadmin/pgadmin.conf + fi + + sed -i "s|SERVER_PATH|${SERVER_PATH:-/}|g" /var/lib/pgadmin/pgadmin.conf + sed -i "s|SERVER_PORT|${SERVER_PORT:-5050}|g" /var/lib/pgadmin/pgadmin.conf + sed -i "s/^DEFAULT_SERVER_PORT.*/DEFAULT_SERVER_PORT = ${SERVER_PORT:-5050}/" /var/lib/pgadmin/config_local.py + sed -i "s|\"pg\":.*|\"pg\": \"/usr/pgsql-${PGVERSION?}/bin\",|g" /var/lib/pgadmin/config_local.py + + cd ${PGADMIN_DIR?} + + if [[ ! -f /var/lib/pgadmin/pgadmin4.db ]] + then + echo_info "Setting up pgAdmin4 database.." + python3 setup.py > /tmp/pgadmin4.stdout 2> /tmp/pgadmin4.stderr + err_check "$?" "pgAdmin4 Database Setup" "Could not create pgAdmin4 database: \n$(cat /tmp/pgadmin4.stderr)" + fi + + echo_info "Starting Apache web server.." + /usr/sbin/httpd -D FOREGROUND & + echo $! > $APACHE_PIDFILE + + wait + env: + - name: PGADMIN_SETUP_EMAIL + value: admin + - name: PGADMIN_SETUP_PASSWORD + value: admin + - name: KRB5_CONFIG + value: /etc/pgadmin/conf.d/krb5.conf + - name: KRB5RCACHEDIR + value: /tmp + livenessProbe: + initialDelaySeconds: 15 + periodSeconds: 20 + tcpSocket: + port: 5050 + name: pgadmin + ports: + - containerPort: 5050 + name: pgadmin + protocol: TCP + readinessProbe: + initialDelaySeconds: 20 + periodSeconds: 10 + tcpSocket: + port: 5050 + resources: {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /etc/pgadmin + name: pgadmin-startup + readOnly: true + - mountPath: /etc/pgadmin/conf.d + name: pgadmin-config + readOnly: true + - mountPath: /etc/httpd/run + name: tmp + - mountPath: /var/log/pgadmin + name: pgadmin-log + - mountPath: /var/lib/pgadmin + name: pgadmin-data +initContainers: +- command: + - bash + - -ceu + - -- + - (umask a-w && echo "$1" > /etc/pgadmin/config_system.py) + - startup + - | + import glob, json, re, os + DEFAULT_BINARY_PATHS = {'pg': sorted([''] + glob.glob('/usr/pgsql-*/bin')).pop()} + with open('/etc/pgadmin/conf.d/~postgres-operator/pgadmin.json') as _f: + _conf, _data = re.compile(r'[A-Z_0-9]+'), json.load(_f) + if type(_data) is dict: + globals().update({k: v for k, v in _data.items() if _conf.fullmatch(k)}) + if os.path.isfile('/etc/pgadmin/conf.d/~postgres-operator/ldap-bind-password'): + with open('/etc/pgadmin/conf.d/~postgres-operator/ldap-bind-password') as _f: + LDAP_BIND_PASSWORD = _f.read() + name: pgadmin-startup + resources: {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /etc/pgadmin + name: pgadmin-startup + - mountPath: /etc/pgadmin/conf.d + name: pgadmin-config + readOnly: true +volumes: +- emptyDir: + medium: Memory + name: pgadmin-log +- name: pgadmin-data + persistentVolumeClaim: + claimName: "" +- name: pgadmin-config + projected: + sources: + - configMap: + items: + - key: pgadmin-settings.json + path: ~postgres-operator/pgadmin.json +- emptyDir: + medium: Memory + sizeLimit: 32Ki + name: pgadmin-startup + `)) + + // No change when called again. + before := pod.DeepCopy() + call() + assert.DeepEqual(t, before, pod) + }) + + t.Run("Customizations", func(t *testing.T) { + cluster.Spec.ImagePullPolicy = corev1.PullAlways + cluster.Spec.UserInterface.PGAdmin.Image = "new-image" + cluster.Spec.UserInterface.PGAdmin.Resources.Requests = corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + } + cluster.Spec.UserInterface.PGAdmin.Config.Files = []corev1.VolumeProjection{{ + Secret: &corev1.SecretProjection{LocalObjectReference: corev1.LocalObjectReference{ + Name: "test", + }}, + }} + cluster.Spec.UserInterface.PGAdmin.Config.LDAPBindPassword = &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "podtest", + }, + Key: "podtestpw", + } + + call() + + assert.Assert(t, cmp.MarshalMatches(pod, ` +containers: +- command: + - bash + - -c + - |- + CRUNCHY_DIR=${CRUNCHY_DIR:-'/opt/crunchy'} + PGADMIN_DIR=/usr/lib/python3.6/site-packages/pgadmin4-web + APACHE_PIDFILE='/tmp/httpd.pid' + export PATH=$PATH:/usr/pgsql-*/bin + + RED="\033[0;31m" + GREEN="\033[0;32m" + RESET="\033[0m" + + function enable_debugging() { + if [[ ${CRUNCHY_DEBUG:-false} == "true" ]] + then + echo_info "Turning debugging on.." + export PS4='+(${BASH_SOURCE}:${LINENO})> ${FUNCNAME[0]:+${FUNCNAME[0]}(): }' + set -x + fi + } + + function env_check_err() { + if [[ -z ${!1} ]] + then + echo_err "$1 environment variable is not set, aborting." + exit 1 + fi + } + + function echo_info() { + echo -e "${GREEN?}$(date) INFO: ${1?}${RESET?}" + } + + function echo_err() { + echo -e "${RED?}$(date) ERROR: ${1?}${RESET?}" + } + + function err_check { + RC=${1?} + CONTEXT=${2?} + ERROR=${3?} + + if [[ ${RC?} != 0 ]] + then + echo_err "${CONTEXT?}: ${ERROR?}" + exit ${RC?} + fi + } + + function trap_sigterm() { + echo_info "Doing trap logic.." + echo_warn "Clean shutdown of Apache.." + /usr/sbin/httpd -k stop + kill -SIGINT $(head -1 $APACHE_PIDFILE) + } + + enable_debugging + trap 'trap_sigterm' SIGINT SIGTERM + + env_check_err "PGADMIN_SETUP_EMAIL" + env_check_err "PGADMIN_SETUP_PASSWORD" + + if [[ ${ENABLE_TLS:-false} == 'true' ]] + then + echo_info "TLS enabled. Applying https configuration.." + if [[ ( ! -f /certs/server.key ) || ( ! -f /certs/server.crt ) ]] + then + echo_err "ENABLE_TLS true but /certs/server.key or /certs/server.crt not found, aborting" + exit 1 + fi + cp "${CRUNCHY_DIR}/conf/pgadmin-https.conf" /var/lib/pgadmin/pgadmin.conf + else + echo_info "TLS disabled. Applying http configuration.." + cp "${CRUNCHY_DIR}/conf/pgadmin-http.conf" /var/lib/pgadmin/pgadmin.conf + fi + + cp "${CRUNCHY_DIR}/conf/config_local.py" /var/lib/pgadmin/config_local.py + + if [[ -z "${SERVER_PATH}" ]] + then + sed -i "/RedirectMatch/d" /var/lib/pgadmin/pgadmin.conf + fi + + sed -i "s|SERVER_PATH|${SERVER_PATH:-/}|g" /var/lib/pgadmin/pgadmin.conf + sed -i "s|SERVER_PORT|${SERVER_PORT:-5050}|g" /var/lib/pgadmin/pgadmin.conf + sed -i "s/^DEFAULT_SERVER_PORT.*/DEFAULT_SERVER_PORT = ${SERVER_PORT:-5050}/" /var/lib/pgadmin/config_local.py + sed -i "s|\"pg\":.*|\"pg\": \"/usr/pgsql-${PGVERSION?}/bin\",|g" /var/lib/pgadmin/config_local.py + + cd ${PGADMIN_DIR?} + + if [[ ! -f /var/lib/pgadmin/pgadmin4.db ]] + then + echo_info "Setting up pgAdmin4 database.." + python3 setup.py > /tmp/pgadmin4.stdout 2> /tmp/pgadmin4.stderr + err_check "$?" "pgAdmin4 Database Setup" "Could not create pgAdmin4 database: \n$(cat /tmp/pgadmin4.stderr)" + fi + + echo_info "Starting Apache web server.." + /usr/sbin/httpd -D FOREGROUND & + echo $! > $APACHE_PIDFILE + + wait + env: + - name: PGADMIN_SETUP_EMAIL + value: admin + - name: PGADMIN_SETUP_PASSWORD + value: admin + - name: KRB5_CONFIG + value: /etc/pgadmin/conf.d/krb5.conf + - name: KRB5RCACHEDIR + value: /tmp + image: new-image + imagePullPolicy: Always + livenessProbe: + initialDelaySeconds: 15 + periodSeconds: 20 + tcpSocket: + port: 5050 + name: pgadmin + ports: + - containerPort: 5050 + name: pgadmin + protocol: TCP + readinessProbe: + initialDelaySeconds: 20 + periodSeconds: 10 + tcpSocket: + port: 5050 + resources: + requests: + cpu: 100m + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /etc/pgadmin + name: pgadmin-startup + readOnly: true + - mountPath: /etc/pgadmin/conf.d + name: pgadmin-config + readOnly: true + - mountPath: /etc/httpd/run + name: tmp + - mountPath: /var/log/pgadmin + name: pgadmin-log + - mountPath: /var/lib/pgadmin + name: pgadmin-data +initContainers: +- command: + - bash + - -ceu + - -- + - (umask a-w && echo "$1" > /etc/pgadmin/config_system.py) + - startup + - | + import glob, json, re, os + DEFAULT_BINARY_PATHS = {'pg': sorted([''] + glob.glob('/usr/pgsql-*/bin')).pop()} + with open('/etc/pgadmin/conf.d/~postgres-operator/pgadmin.json') as _f: + _conf, _data = re.compile(r'[A-Z_0-9]+'), json.load(_f) + if type(_data) is dict: + globals().update({k: v for k, v in _data.items() if _conf.fullmatch(k)}) + if os.path.isfile('/etc/pgadmin/conf.d/~postgres-operator/ldap-bind-password'): + with open('/etc/pgadmin/conf.d/~postgres-operator/ldap-bind-password') as _f: + LDAP_BIND_PASSWORD = _f.read() + image: new-image + imagePullPolicy: Always + name: pgadmin-startup + resources: + requests: + cpu: 100m + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /etc/pgadmin + name: pgadmin-startup + - mountPath: /etc/pgadmin/conf.d + name: pgadmin-config + readOnly: true +volumes: +- emptyDir: + medium: Memory + name: pgadmin-log +- name: pgadmin-data + persistentVolumeClaim: + claimName: "" +- name: pgadmin-config + projected: + sources: + - secret: + name: test + - configMap: + items: + - key: pgadmin-settings.json + path: ~postgres-operator/pgadmin.json + - secret: + items: + - key: podtestpw + path: ~postgres-operator/ldap-bind-password + name: podtest +- emptyDir: + medium: Memory + sizeLimit: 32Ki + name: pgadmin-startup + `)) + }) +} diff --git a/internal/pgadmin/users.go b/internal/pgadmin/users.go new file mode 100644 index 0000000000..7ce69ce211 --- /dev/null +++ b/internal/pgadmin/users.go @@ -0,0 +1,258 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package pgadmin + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "strings" + + "github.com/crunchydata/postgres-operator/internal/logging" + "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +type Executor func( + ctx context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, +) error + +// WriteUsersInPGAdmin uses exec and "python" to create users in pgAdmin and +// update their passwords when they already exist. A blank password for a user +// blocks that user from logging in to pgAdmin. The pgAdmin configuration +// database must exist before calling this. +func WriteUsersInPGAdmin( + ctx context.Context, cluster *v1beta1.PostgresCluster, exec Executor, + users []v1beta1.PostgresUserSpec, passwords map[string]string, +) error { + primary := naming.ClusterPrimaryService(cluster) + + args := []string{ + cluster.Name, + primary.Name + "." + primary.Namespace + ".svc", + fmt.Sprint(*cluster.Spec.Port), + } + script := strings.Join([]string{ + // Unpack arguments into an object. + // - https://docs.python.org/3/library/types.html#types.SimpleNamespace + ` +import sys +import types + +cluster = types.SimpleNamespace() +(cluster.name, cluster.hostname, cluster.port) = sys.argv[1:]`, + + // The location of pgAdmin files can vary by container image. Look for + // typical names in the module search path: the PyPI package is named + // "pgadmin4" while custom builds might use "pgadmin4-web". The pgAdmin + // packages expect to find themselves on the search path, so prepend + // that directory there (like pgAdmin does in its WSGI entrypoint). + // - https://pypi.org/project/pgadmin4/ + // - https://github.com/pgadmin-org/pgadmin4/blob/REL-4_30/web/pgAdmin4.wsgi#L18 + ` +import importlib.util +import os +import sys + +spec = importlib.util.find_spec('.pgadmin', ( + importlib.util.find_spec('pgadmin4') or + importlib.util.find_spec('pgadmin4-web') +).name) +root = os.path.dirname(spec.submodule_search_locations[0]) +if sys.path[0] != root: + sys.path.insert(0, root)`, + + // Import pgAdmin modules now that they are on the search path. + // NOTE: When testing with the REPL, use the `__enter__` method to + // avoid one level of indentation. + // + // create_app().app_context().__enter__() + // + ` +import copy +import json +import sys + +from pgadmin import create_app +from pgadmin.model import db, Role, User, Server, ServerGroup +from pgadmin.utils.constants import INTERNAL +from pgadmin.utils.crypto import encrypt + +with create_app().app_context():`, + + // The user with id=1 is automatically created by pgAdmin when it + // creates its configuration database. Clear that email and username + // so they cannot conflict with users we create, and deactivate the user + // so it cannot log in. + // - https://github.com/pgadmin-org/pgadmin4/blob/REL-4_30/web/migrations/versions/fdc58d9bd449_.py#L129 + ` + admin = db.session.query(User).filter_by(id=1).first() + admin.active = False + admin.email = '' + admin.password = '' + admin.username = '' + + db.session.add(admin) + db.session.commit()`, + + // Process each line of input as a single user definition. Those with + // a non-blank password are allowed to login. + // + // The "internal" authentication source requires that username and email + // be the same and be an email address. Append "@pgo" to the username + // to pass login validation. + // - https://github.com/pgadmin-org/pgadmin4/blob/REL-4_30/web/pgadmin/authenticate/internal.py#L88 + // - https://github.com/pgadmin-org/pgadmin4/blob/REL-4_30/web/pgadmin/utils/validation_utils.py#L13 + // + // The "auth_source" and "username" attributes are part of the User + // model since pgAdmin v4.21. + // - https://github.com/pgadmin-org/pgadmin4/blob/REL-4_30/web/pgadmin/model/__init__.py#L66 + ` + for line in sys.stdin: + if not line.strip(): + continue + + data = json.loads(line) + address = data['username'] + '@pgo' + user = ( + db.session.query(User).filter_by(username=address).first() or + User() + ) + user.auth_source = INTERNAL + user.email = user.username = address + user.password = data['password'] + user.active = bool(user.password) + user.roles = db.session.query(Role).filter_by(name='User').all()`, + + // After a user logs in, pgAdmin checks that the "master password" is + // set. It does not seem to use the value nor check that it is valid. + // We set it to "any" to satisfy the check. + // - https://github.com/pgadmin-org/pgadmin4/blob/REL-4_30/web/pgadmin/browser/__init__.py#L963 + // + // The "verify_and_update_password" method hashes the plaintext password + // according to pgAdmin security settings. It is part of the User model + // since pgAdmin v4.19 and Flask-Security-Too v3.20. + // - https://github.com/pgadmin-org/pgadmin4/blob/REL-4_30/requirements.txt#L40 + // - https://github.com/pgadmin-org/pgadmin4/blob/REL-4_30/web/pgadmin/model/__init__.py#L66 + // - https://flask-security-too.readthedocs.io/en/stable/api.html#flask_security.UserMixin.verify_and_update_password + ` + if user.password: + user.masterpass_check = 'any' + user.verify_and_update_password(user.password)`, + + // Write the user to get its generated identity. + ` + db.session.add(user) + db.session.commit()`, + + // One server group and connection are configured for each user, similar + // to the way they are made using their respective dialog windows. + // - https://www.pgadmin.org/docs/pgadmin4/latest/server_group_dialog.html + // - https://www.pgadmin.org/docs/pgadmin4/latest/server_dialog.html + // + // We use a similar method to the import method when creating server connections + // - https://www.pgadmin.org/docs/pgadmin4/latest/import_export_servers.html + // - https://github.com/pgadmin-org/pgadmin4/blob/REL-4_30/web/setup.py#L294 + ` + group = ( + db.session.query(ServerGroup).filter_by( + user_id=user.id, + ).order_by("id").first() or + ServerGroup() + ) + group.name = "Crunchy PostgreSQL Operator" + group.user_id = user.id + db.session.add(group) + db.session.commit()`, + + // The name of the server connection is the same as the cluster name. + // Note that the server connections are created when the users are created or + // modified. Changes to a server connection will generally persist until a + // change is made to the corresponding user. For custom server connections, + // a new server should be created with a unique name. + ` + server = ( + db.session.query(Server).filter_by( + servergroup_id=group.id, + user_id=user.id, + name=cluster.name, + ).first() or + Server() + ) + + server.name = cluster.name + server.host = cluster.hostname + server.port = cluster.port + server.servergroup_id = group.id + server.user_id = user.id + server.maintenance_db = "postgres" + server.ssl_mode = "prefer"`, + + // Encrypt the Server password with the User's plaintext password. + // - https://github.com/pgadmin-org/pgadmin4/blob/REL-4_30/web/pgadmin/__init__.py#L601 + // - https://github.com/pgadmin-org/pgadmin4/blob/REL-4_30/web/pgadmin/utils/master_password.py#L21 + // - https://github.com/pgadmin-org/pgadmin4/blob/REL-4_30/web/pgadmin/browser/server_groups/servers/__init__.py#L1091 + // + // The "save_password" attribute is part of the Server model since + // pgAdmin v4.21. + // - https://github.com/pgadmin-org/pgadmin4/blob/REL-4_30/web/pgadmin/model/__init__.py#L108 + ` + server.username = data['username'] + server.password = encrypt(data['password'], data['password']) + server.save_password = int(bool(data['password']))`, + + // Due to limitations on the types of updates that can be made to active + // server connections, when the current server connection is updated, we + // need to delete it and add a new server connection in its place. This + // will require a refresh if pgAdmin web GUI is being used when the + // update takes place. + // - https://github.com/pgadmin-org/pgadmin4/blob/REL-4_30/web/pgadmin/browser/server_groups/servers/__init__.py#L772 + // + // TODO(cbandy): We could possibly get the same effect by invalidating + // the user's sessions in pgAdmin v5.4 with Flask-Security-Too v4. + // - https://github.com/pgadmin-org/pgadmin4/blob/REL-5_4/web/pgadmin/model/__init__.py#L67 + // - https://flask-security-too.readthedocs.io/en/stable/api.html#flask_security.UserDatastore.set_uniquifier + ` + if server.id and db.session.is_modified(server): + old = copy.deepcopy(server) + db.make_transient(server) + server.id = None + db.session.delete(old) + + db.session.add(server) + db.session.commit()`, + }, "\n") + "\n" + + var err error + var stdin, stdout, stderr bytes.Buffer + + encoder := json.NewEncoder(&stdin) + encoder.SetEscapeHTML(false) + + for i := range users { + spec := users[i] + + if err == nil { + err = encoder.Encode(map[string]interface{}{ + "username": spec.Name, + "password": passwords[string(spec.Name)], + }) + } + } + + if err == nil { + err = exec(ctx, &stdin, &stdout, &stderr, + append([]string{"python", "-c", script}, args...)...) + + log := logging.FromContext(ctx) + log.V(1).Info("wrote pgAdmin users", + "stdout", stdout.String(), + "stderr", stderr.String()) + } + + return err +} diff --git a/internal/pgadmin/users_test.go b/internal/pgadmin/users_test.go new file mode 100644 index 0000000000..69619667af --- /dev/null +++ b/internal/pgadmin/users_test.go @@ -0,0 +1,255 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package pgadmin + +import ( + "context" + "errors" + "io" + "os" + "os/exec" + "path/filepath" + "strings" + "testing" + + "gotest.tools/v3/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/crunchydata/postgres-operator/internal/initialize" + "github.com/crunchydata/postgres-operator/internal/testing/require" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +func TestWriteUsersInPGAdmin(t *testing.T) { + ctx := context.Background() + cluster := &v1beta1.PostgresCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testcluster", + Namespace: "testnamespace", + }, + Spec: v1beta1.PostgresClusterSpec{ + Port: initialize.Int32(5432), + }, + } + + t.Run("Arguments", func(t *testing.T) { + expected := errors.New("pass-through") + exec := func( + _ context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, + ) error { + assert.Assert(t, stdin != nil, "should send stdin") + assert.Assert(t, stdout != nil, "should capture stdout") + assert.Assert(t, stderr != nil, "should capture stderr") + + assert.Check(t, !strings.ContainsRune(strings.Join(command, ""), '\t'), + "Python should not be indented with tabs") + + assert.DeepEqual(t, command, []string{"python", "-c", ` +import sys +import types + +cluster = types.SimpleNamespace() +(cluster.name, cluster.hostname, cluster.port) = sys.argv[1:] + +import importlib.util +import os +import sys + +spec = importlib.util.find_spec('.pgadmin', ( + importlib.util.find_spec('pgadmin4') or + importlib.util.find_spec('pgadmin4-web') +).name) +root = os.path.dirname(spec.submodule_search_locations[0]) +if sys.path[0] != root: + sys.path.insert(0, root) + +import copy +import json +import sys + +from pgadmin import create_app +from pgadmin.model import db, Role, User, Server, ServerGroup +from pgadmin.utils.constants import INTERNAL +from pgadmin.utils.crypto import encrypt + +with create_app().app_context(): + + admin = db.session.query(User).filter_by(id=1).first() + admin.active = False + admin.email = '' + admin.password = '' + admin.username = '' + + db.session.add(admin) + db.session.commit() + + for line in sys.stdin: + if not line.strip(): + continue + + data = json.loads(line) + address = data['username'] + '@pgo' + user = ( + db.session.query(User).filter_by(username=address).first() or + User() + ) + user.auth_source = INTERNAL + user.email = user.username = address + user.password = data['password'] + user.active = bool(user.password) + user.roles = db.session.query(Role).filter_by(name='User').all() + + if user.password: + user.masterpass_check = 'any' + user.verify_and_update_password(user.password) + + db.session.add(user) + db.session.commit() + + group = ( + db.session.query(ServerGroup).filter_by( + user_id=user.id, + ).order_by("id").first() or + ServerGroup() + ) + group.name = "Crunchy PostgreSQL Operator" + group.user_id = user.id + db.session.add(group) + db.session.commit() + + server = ( + db.session.query(Server).filter_by( + servergroup_id=group.id, + user_id=user.id, + name=cluster.name, + ).first() or + Server() + ) + + server.name = cluster.name + server.host = cluster.hostname + server.port = cluster.port + server.servergroup_id = group.id + server.user_id = user.id + server.maintenance_db = "postgres" + server.ssl_mode = "prefer" + + server.username = data['username'] + server.password = encrypt(data['password'], data['password']) + server.save_password = int(bool(data['password'])) + + if server.id and db.session.is_modified(server): + old = copy.deepcopy(server) + db.make_transient(server) + server.id = None + db.session.delete(old) + + db.session.add(server) + db.session.commit() +`, + "testcluster", + "testcluster-primary.testnamespace.svc", + "5432", + }) + return expected + } + + assert.Equal(t, expected, WriteUsersInPGAdmin(ctx, cluster, exec, nil, nil)) + }) + + t.Run("Flake8", func(t *testing.T) { + flake8 := require.Flake8(t) + + called := false + exec := func( + _ context.Context, _ io.Reader, _, _ io.Writer, command ...string, + ) error { + called = true + + // Expect a python command with an inline script. + assert.DeepEqual(t, command[:2], []string{"python", "-c"}) + assert.Assert(t, len(command) > 2) + script := command[2] + + // Write out that inline script. + dir := t.TempDir() + file := filepath.Join(dir, "script.py") + assert.NilError(t, os.WriteFile(file, []byte(script), 0o600)) + + // Expect flake8 to be happy. Ignore "E402 module level import not + // at top of file" in addition to the defaults. + cmd := exec.Command(flake8, "--extend-ignore=E402", file) + output, err := cmd.CombinedOutput() + assert.NilError(t, err, "%q\n%s", cmd.Args, output) + + return nil + } + + _ = WriteUsersInPGAdmin(ctx, cluster, exec, nil, nil) + assert.Assert(t, called) + }) + + t.Run("Empty", func(t *testing.T) { + calls := 0 + exec := func( + _ context.Context, stdin io.Reader, _, _ io.Writer, _ ...string, + ) error { + calls++ + + b, err := io.ReadAll(stdin) + assert.NilError(t, err) + assert.Assert(t, len(b) == 0, "expected no stdin, got %q", string(b)) + return nil + } + + assert.NilError(t, WriteUsersInPGAdmin(ctx, cluster, exec, nil, nil)) + assert.Equal(t, calls, 1) + + assert.NilError(t, WriteUsersInPGAdmin(ctx, cluster, exec, []v1beta1.PostgresUserSpec{}, nil)) + assert.Equal(t, calls, 2) + + assert.NilError(t, WriteUsersInPGAdmin(ctx, cluster, exec, nil, map[string]string{})) + assert.Equal(t, calls, 3) + }) + + t.Run("Passwords", func(t *testing.T) { + calls := 0 + exec := func( + _ context.Context, stdin io.Reader, _, _ io.Writer, _ ...string, + ) error { + calls++ + + b, err := io.ReadAll(stdin) + assert.NilError(t, err) + assert.DeepEqual(t, string(b), strings.TrimLeft(` +{"password":"","username":"user-no-options"} +{"password":"","username":"user-no-databases"} +{"password":"some$pass!word","username":"user-with-password"} +`, "\n")) + return nil + } + + assert.NilError(t, WriteUsersInPGAdmin(ctx, cluster, exec, + []v1beta1.PostgresUserSpec{ + { + Name: "user-no-options", + Databases: []v1beta1.PostgresIdentifier{"db1"}, + }, + { + Name: "user-no-databases", + Options: "some options here", + }, + { + Name: "user-with-password", + }, + }, + map[string]string{ + "no-user": "ignored", + "user-with-password": "some$pass!word", + }, + )) + assert.Equal(t, calls, 1) + }) +} diff --git a/internal/pgaudit/postgres.go b/internal/pgaudit/postgres.go new file mode 100644 index 0000000000..07867d020e --- /dev/null +++ b/internal/pgaudit/postgres.go @@ -0,0 +1,59 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package pgaudit + +import ( + "context" + + "github.com/crunchydata/postgres-operator/internal/logging" + "github.com/crunchydata/postgres-operator/internal/postgres" +) + +// When the pgAudit shared library is not loaded, the extension cannot be +// installed. The "CREATE EXTENSION" command fails with an error, "pgaudit must +// be loaded…". +// +// When the pgAudit shared library is loaded but the extension is not installed, +// AUDIT messages are logged according to the various levels and settings +// (including both SESSION and OBJECT events) but the messages contain fewer +// details than normal. DDL messages, for example, lack the affected object name +// and type. +// +// When the pgAudit extension is installed but the shared library is not loaded, +// 1. No AUDIT messages are logged. +// 2. DDL commands fail with error "pgaudit must be loaded…". +// 3. DML commands and SELECT queries succeed and return results. +// 4. Databases can be created and dropped. +// 5. Roles and privileges can be created, dropped, granted, and revoked, but +// the "DROP OWNED" command fails. + +// EnableInPostgreSQL installs pgAudit triggers into every database. +func EnableInPostgreSQL(ctx context.Context, exec postgres.Executor) error { + log := logging.FromContext(ctx) + + stdout, stderr, err := exec.ExecInAllDatabases(ctx, + // Quiet the NOTICE from IF EXISTS, and install the pgAudit event triggers. + // - https://www.postgresql.org/docs/current/runtime-config-client.html + // - https://github.com/pgaudit/pgaudit#settings + `SET client_min_messages = WARNING; CREATE EXTENSION IF NOT EXISTS pgaudit;`, + map[string]string{ + "ON_ERROR_STOP": "on", // Abort when any one command fails. + "QUIET": "on", // Do not print successful commands to stdout. + }) + + log.V(1).Info("enabled pgAudit", "stdout", stdout, "stderr", stderr) + + return err +} + +// PostgreSQLParameters sets the parameters required by pgAudit. +func PostgreSQLParameters(outParameters *postgres.Parameters) { + + // Load the shared library when PostgreSQL starts. + // PostgreSQL must be restarted when changing this value. + // - https://github.com/pgaudit/pgaudit#settings + // - https://www.postgresql.org/docs/current/runtime-config-client.html + outParameters.Mandatory.AppendToList("shared_preload_libraries", "pgaudit") +} diff --git a/internal/pgaudit/postgres_test.go b/internal/pgaudit/postgres_test.go new file mode 100644 index 0000000000..3734e511f0 --- /dev/null +++ b/internal/pgaudit/postgres_test.go @@ -0,0 +1,65 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package pgaudit + +import ( + "context" + "errors" + "io" + "strings" + "testing" + + "gotest.tools/v3/assert" + + "github.com/crunchydata/postgres-operator/internal/postgres" +) + +func TestEnableInPostgreSQL(t *testing.T) { + expected := errors.New("whoops") + exec := func( + _ context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, + ) error { + assert.Assert(t, stdout != nil, "should capture stdout") + assert.Assert(t, stderr != nil, "should capture stderr") + + assert.Assert(t, strings.Contains(strings.Join(command, "\n"), + `SELECT datname FROM pg_catalog.pg_database`, + ), "expected all databases and templates") + + b, err := io.ReadAll(stdin) + assert.NilError(t, err) + assert.Equal(t, string(b), strings.Trim(` +SET client_min_messages = WARNING; CREATE EXTENSION IF NOT EXISTS pgaudit; + `, "\t\n")) + + return expected + } + + ctx := context.Background() + assert.Equal(t, expected, EnableInPostgreSQL(ctx, exec)) +} + +func TestPostgreSQLParameters(t *testing.T) { + parameters := postgres.Parameters{ + Mandatory: postgres.NewParameterSet(), + } + + // No comma when empty. + PostgreSQLParameters(¶meters) + + assert.Assert(t, parameters.Default == nil) + assert.DeepEqual(t, parameters.Mandatory.AsMap(), map[string]string{ + "shared_preload_libraries": "pgaudit", + }) + + // Appended when not empty. + parameters.Mandatory.Add("shared_preload_libraries", "some,existing") + PostgreSQLParameters(¶meters) + + assert.Assert(t, parameters.Default == nil) + assert.DeepEqual(t, parameters.Mandatory.AsMap(), map[string]string{ + "shared_preload_libraries": "some,existing,pgaudit", + }) +} diff --git a/internal/pgbackrest/assertions_test.go b/internal/pgbackrest/assertions_test.go deleted file mode 100644 index 67d7a3b345..0000000000 --- a/internal/pgbackrest/assertions_test.go +++ /dev/null @@ -1,31 +0,0 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package pgbackrest - -import ( - "gotest.tools/v3/assert/cmp" - "sigs.k8s.io/yaml" -) - -func marshalEquals(actual interface{}, expected string) cmp.Comparison { - b, err := yaml.Marshal(actual) - return func() cmp.Result { - if err != nil { - return cmp.ResultFromError(err) - } - return cmp.DeepEqual(string(b), expected)() - } -} diff --git a/internal/pgbackrest/certificates.go b/internal/pgbackrest/certificates.go new file mode 100644 index 0000000000..bb2633dfe7 --- /dev/null +++ b/internal/pgbackrest/certificates.go @@ -0,0 +1,129 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package pgbackrest + +import ( + "encoding" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/crunchydata/postgres-operator/internal/initialize" +) + +const ( + certAuthorityAbsolutePath = configDirectory + "/" + certAuthorityProjectionPath + certClientPrivateKeyAbsolutePath = configDirectory + "/" + certClientPrivateKeyProjectionPath + certClientAbsolutePath = configDirectory + "/" + certClientProjectionPath + certServerPrivateKeyAbsolutePath = serverMountPath + "/" + certServerPrivateKeyProjectionPath + certServerAbsolutePath = serverMountPath + "/" + certServerProjectionPath + + certAuthorityProjectionPath = "~postgres-operator/tls-ca.crt" + certClientPrivateKeyProjectionPath = "~postgres-operator/client-tls.key" + certClientProjectionPath = "~postgres-operator/client-tls.crt" + certServerPrivateKeyProjectionPath = "server-tls.key" + certServerProjectionPath = "server-tls.crt" + + certAuthoritySecretKey = "pgbackrest.ca-roots" // #nosec G101 this is a name, not a credential + certClientPrivateKeySecretKey = "pgbackrest-client.key" // #nosec G101 this is a name, not a credential + certClientSecretKey = "pgbackrest-client.crt" // #nosec G101 this is a name, not a credential + + certInstancePrivateKeySecretKey = "pgbackrest-server.key" + certInstanceSecretKey = "pgbackrest-server.crt" + + certRepoPrivateKeySecretKey = "pgbackrest-repo-host.key" // #nosec G101 this is a name, not a credential + certRepoSecretKey = "pgbackrest-repo-host.crt" // #nosec G101 this is a name, not a credential +) + +// certFile concatenates the results of multiple PEM-encoding marshalers. +func certFile(texts ...encoding.TextMarshaler) ([]byte, error) { + var out []byte + + for i := range texts { + if b, err := texts[i].MarshalText(); err == nil { + out = append(out, b...) + } else { + return nil, err + } + } + + return out, nil +} + +// clientCertificates returns projections of CAs, keys, and certificates to +// include in a configuration volume from the pgBackRest Secret. +func clientCertificates() []corev1.KeyToPath { + return []corev1.KeyToPath{ + { + Key: certAuthoritySecretKey, + Path: certAuthorityProjectionPath, + }, + { + Key: certClientSecretKey, + Path: certClientProjectionPath, + }, + { + Key: certClientPrivateKeySecretKey, + Path: certClientPrivateKeyProjectionPath, + + // pgBackRest requires that certificate keys not be readable by any + // other user. + // - https://github.com/pgbackrest/pgbackrest/blob/release/2.38/src/common/io/tls/common.c#L128 + Mode: initialize.Int32(0o600), + }, + } +} + +// clientCommonName returns a client certificate common name (CN) for cluster. +func clientCommonName(cluster metav1.Object) string { + // The common name (ASN.1 OID 2.5.4.3) of a certificate must be + // 64 characters or less. ObjectMeta.UID is a UUID in its 36-character + // string representation. + // - https://tools.ietf.org/html/rfc5280#appendix-A + // - https://docs.k8s.io/concepts/overview/working-with-objects/names/#uids + // - https://releases.k8s.io/v1.22.0/staging/src/k8s.io/apiserver/pkg/registry/rest/create.go#L111 + // - https://releases.k8s.io/v1.22.0/staging/src/k8s.io/apiserver/pkg/registry/rest/meta.go#L30 + return "pgbackrest@" + string(cluster.GetUID()) +} + +// instanceServerCertificates returns projections of keys and certificates to +// include in a server volume from an instance Secret. +func instanceServerCertificates() []corev1.KeyToPath { + return []corev1.KeyToPath{ + { + Key: certInstanceSecretKey, + Path: certServerProjectionPath, + }, + { + Key: certInstancePrivateKeySecretKey, + Path: certServerPrivateKeyProjectionPath, + + // pgBackRest requires that certificate keys not be readable by any + // other user. + // - https://github.com/pgbackrest/pgbackrest/blob/release/2.38/src/common/io/tls/common.c#L128 + Mode: initialize.Int32(0o600), + }, + } +} + +// repositoryServerCertificates returns projections of keys and certificates to +// include in a server volume from the pgBackRest Secret. +func repositoryServerCertificates() []corev1.KeyToPath { + return []corev1.KeyToPath{ + { + Key: certRepoSecretKey, + Path: certServerProjectionPath, + }, + { + Key: certRepoPrivateKeySecretKey, + Path: certServerPrivateKeyProjectionPath, + + // pgBackRest requires that certificate keys not be readable by any + // other user. + // - https://github.com/pgbackrest/pgbackrest/blob/release/2.38/src/common/io/tls/common.c#L128 + Mode: initialize.Int32(0o600), + }, + } +} diff --git a/internal/pgbackrest/certificates.md b/internal/pgbackrest/certificates.md new file mode 100644 index 0000000000..344616486b --- /dev/null +++ b/internal/pgbackrest/certificates.md @@ -0,0 +1,74 @@ + + +Server +------ + +pgBackRest uses OpenSSL to protect connections between machines. The [TLS server](tls-server.md) +listens on a TCP port, encrypts connections with its server certificate, and +verifies client certificates against a certificate authority. + +- `tls-server-ca-file` is used for client verification. It is the path to a file + of trusted certificates concatenated in PEM format. When this is set, clients + are also authorized according to `tls-server-auth`. + + See https://www.openssl.org/docs/man1.1.1/man3/SSL_CTX_load_verify_locations.html + +- `tls-server-cert-file` is the server certificate. It is the path to a file in + PEM format containing the certificate as well as any number of CA certificates + needed to establish its authenticity. + + See https://www.openssl.org/docs/man1.1.1/man3/SSL_CTX_use_certificate_chain_file.html + +- `tls-server-key-file` is the server certificate's private key. It is the path + to a file in PEM format. + + See https://www.openssl.org/docs/man1.1.1/man3/SSL_CTX_use_PrivateKey_file.html + + +Clients +------- + +pgBackRest uses OpenSSL to protect connections it makes to PostgreSQL instances +and repository hosts. It presents a client certificate that is verified by the +server and must contain a common name (CN) that is authorized according to `tls-server-auth`. + +- `pg-host-ca-file` is used for server verification when connecting to + pgBackRest on a PostgreSQL instance. It is the path to a file of trusted + certificates concatenated in PEM format. + + See https://www.openssl.org/docs/man1.1.1/man3/SSL_CTX_load_verify_locations.html + +- `pg-host-cert-file` is the client certificate to present when connecting to + pgBackRest on a PostgreSQL instance. It is the path to a file in PEM format + containing the certificate as well as any number of CA certificates needed to + establish its authenticity. + + See https://www.openssl.org/docs/man1.1.1/man3/SSL_CTX_use_certificate_chain_file.html + +- `pg-host-key-file` is the client certificate's private key. It is the path + to a file in PEM format. + + See https://www.openssl.org/docs/man1.1.1/man3/SSL_CTX_use_PrivateKey_file.html + +- `repo-host-ca-file` is used for server verification when connecting to + pgBackRest on a repository host. It is the path to a file of trusted + certificates concatenated in PEM format. + + See https://www.openssl.org/docs/man1.1.1/man3/SSL_CTX_load_verify_locations.html + +- `repo-host-cert-file` is the client certificate to present when connecting to + pgBackRest on a repository host. It is the path to a file in PEM format + containing the certificate as well as any number of CA certificates needed to + establish its authenticity. + + See https://www.openssl.org/docs/man1.1.1/man3/SSL_CTX_use_certificate_chain_file.html + +- `repo-host-key-file` is the client certificate's private key. It is the path + to a file in PEM format. + + See https://www.openssl.org/docs/man1.1.1/man3/SSL_CTX_use_PrivateKey_file.html + diff --git a/internal/pgbackrest/certificates_test.go b/internal/pgbackrest/certificates_test.go new file mode 100644 index 0000000000..4ef41b2879 --- /dev/null +++ b/internal/pgbackrest/certificates_test.go @@ -0,0 +1,51 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package pgbackrest + +import ( + "errors" + "strings" + "testing" + + "gotest.tools/v3/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/uuid" + + "github.com/crunchydata/postgres-operator/internal/testing/cmp" +) + +type funcMarshaler func() ([]byte, error) + +func (f funcMarshaler) MarshalText() ([]byte, error) { return f() } + +func TestCertFile(t *testing.T) { + expected := errors.New("boom") + var short funcMarshaler = func() ([]byte, error) { return []byte(`one`), nil } + var fail funcMarshaler = func() ([]byte, error) { return nil, expected } + + text, err := certFile(short, short, short) + assert.NilError(t, err) + assert.DeepEqual(t, text, []byte(`oneoneone`)) + + text, err = certFile(short, fail, short) + assert.Equal(t, err, expected) + assert.DeepEqual(t, text, []byte(nil)) +} + +func TestClientCommonName(t *testing.T) { + t.Parallel() + + cluster := &metav1.ObjectMeta{UID: uuid.NewUUID()} + cn := clientCommonName(cluster) + + assert.Assert(t, cmp.Regexp("^[-[:xdigit:]]{36}$", string(cluster.UID)), + "expected Kubernetes UID to be a UUID string") + + assert.Assert(t, cmp.Regexp("^[[:print:]]{1,64}$", cn), + "expected printable ASCII within 64 characters for %q", cluster) + + assert.Assert(t, strings.HasPrefix(cn, "pgbackrest@"), + `expected %q to begin with "pgbackrest@" for %q`, cn, cluster) +} diff --git a/internal/pgbackrest/config.go b/internal/pgbackrest/config.go index 9789111055..f50b2690ee 100644 --- a/internal/pgbackrest/config.go +++ b/internal/pgbackrest/config.go @@ -1,28 +1,19 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbackrest import ( "context" "fmt" - "sort" + "strconv" + "strings" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/crunchydata/postgres-operator/internal/config" "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/internal/postgres" @@ -30,20 +21,12 @@ import ( ) const ( - // global pgBackRest default log path configuration, used by all three - // default pod configurations - defaultLogPath = "/tmp" - // defaultRepo1Path stores the default pgBackRest repo path defaultRepo1Path = "/pgbackrest/" // DefaultStanzaName is the name of the default pgBackRest stanza DefaultStanzaName = "db" - // configmap key references - cmJobKey = "pgbackrest_job.conf" - cmPrimaryKey = "pgbackrest_primary.conf" - // CMInstanceKey is the name of the pgBackRest configuration file for a PostgreSQL instance CMInstanceKey = "pgbackrest_instance.conf" @@ -51,19 +34,30 @@ const ( // repository host CMRepoKey = "pgbackrest_repo.conf" - // ConfigDir is the pgBackRest configuration directory - ConfigDir = "/etc/pgbackrest/conf.d" + // configDirectory is the pgBackRest configuration directory. + configDirectory = "/etc/pgbackrest/conf.d" + // ConfigHashKey is the name of the file storing the pgBackRest config hash ConfigHashKey = "config-hash" - // ConfigVol is the name of the pgBackRest configuration volume - ConfigVol = "pgbackrest-config" - // configPath is the pgBackRest configuration file path - configPath = "/etc/pgbackrest/pgbackrest.conf" - - // CMNameSuffix is the suffix used with postgrescluster name for associated configmap. - // for instance, if the cluster is named 'mycluster', the - // configmap will be named 'mycluster-pgbackrest-config' - CMNameSuffix = "%s-pgbackrest-config" + + // repoMountPath is where to mount the pgBackRest repo volume. + repoMountPath = "/pgbackrest" + + serverConfigAbsolutePath = configDirectory + "/" + serverConfigProjectionPath + serverConfigProjectionPath = "~postgres-operator_server.conf" + + serverConfigMapKey = "pgbackrest-server.conf" + + // serverMountPath is the directory containing the TLS server certificate + // and key. This is outside of configDirectory so the hash calculated by + // backup jobs does not change when the primary changes. + serverMountPath = "/etc/pgbackrest/server" +) + +const ( + iniGeneratedWarning = "" + + "# Generated by postgres-operator. DO NOT EDIT.\n" + + "# Your changes will not be saved.\n" ) // CreatePGBackRestConfigMapIntent creates a configmap struct with pgBackRest pgbackrest.conf settings in the data field. @@ -73,18 +67,19 @@ const ( // pgbackrest_repo.conf is used by the pgBackRest repository pod func CreatePGBackRestConfigMapIntent(postgresCluster *v1beta1.PostgresCluster, repoHostName, configHash, serviceName, serviceNamespace string, - instanceNames []string) *v1.ConfigMap { + instanceNames []string) *corev1.ConfigMap { meta := naming.PGBackRestConfig(postgresCluster) meta.Annotations = naming.Merge( postgresCluster.Spec.Metadata.GetAnnotationsOrNil(), postgresCluster.Spec.Backups.PGBackRest.Metadata.GetAnnotationsOrNil()) - meta.Labels = naming.Merge(postgresCluster.Spec.Metadata.GetLabelsOrNil(), + meta.Labels = naming.Merge( + postgresCluster.Spec.Metadata.GetLabelsOrNil(), postgresCluster.Spec.Backups.PGBackRest.Metadata.GetLabelsOrNil(), naming.PGBackRestConfigLabels(postgresCluster.GetName()), ) - cm := &v1.ConfigMap{ + cm := &corev1.ConfigMap{ TypeMeta: metav1.TypeMeta{ Kind: "ConfigMap", APIVersion: "v1", @@ -93,23 +88,40 @@ func CreatePGBackRestConfigMapIntent(postgresCluster *v1beta1.PostgresCluster, } // create an empty map for the config data - initialize.StringMap(&cm.Data) + initialize.Map(&cm.Data) - addDedicatedHost := DedicatedRepoHostEnabled(postgresCluster) pgdataDir := postgres.DataDirectory(postgresCluster) // Port will always be populated, since the API will set a default of 5432 if not provided pgPort := *postgresCluster.Spec.Port - cm.Data[CMInstanceKey] = getConfigString( - populatePGInstanceConfigurationMap(serviceName, serviceNamespace, repoHostName, - pgdataDir, pgPort, postgresCluster.Spec.Backups.PGBackRest.Repos, - postgresCluster.Spec.Backups.PGBackRest.Global)) - - if addDedicatedHost && repoHostName != "" { - cm.Data[CMRepoKey] = getConfigString( - populateRepoHostConfigurationMap(serviceName, serviceNamespace, - pgdataDir, pgPort, instanceNames, + cm.Data[CMInstanceKey] = iniGeneratedWarning + + populatePGInstanceConfigurationMap( + serviceName, serviceNamespace, repoHostName, pgdataDir, + config.FetchKeyCommand(&postgresCluster.Spec), + strconv.Itoa(postgresCluster.Spec.PostgresVersion), + pgPort, postgresCluster.Spec.Backups.PGBackRest.Repos, + postgresCluster.Spec.Backups.PGBackRest.Global, + ).String() + + // PostgreSQL instances that have not rolled out expect to mount a server + // config file. Always populate that file so those volumes stay valid and + // Kubernetes propagates their contents to those pods. The repo host name + // given below should always be set, but this guards for cases when it might + // not be. + cm.Data[serverConfigMapKey] = "" + + if repoHostName != "" { + cm.Data[serverConfigMapKey] = iniGeneratedWarning + + serverConfig(postgresCluster).String() + + cm.Data[CMRepoKey] = iniGeneratedWarning + + populateRepoHostConfigurationMap( + serviceName, serviceNamespace, + pgdataDir, config.FetchKeyCommand(&postgresCluster.Spec), + strconv.Itoa(postgresCluster.Spec.PostgresVersion), + pgPort, instanceNames, postgresCluster.Spec.Backups.PGBackRest.Repos, - postgresCluster.Spec.Backups.PGBackRest.Global)) + postgresCluster.Spec.Backups.PGBackRest.Global, + ).String() } cm.Data[ConfigHashKey] = configHash @@ -117,225 +129,318 @@ func CreatePGBackRestConfigMapIntent(postgresCluster *v1beta1.PostgresCluster, return cm } -// configVolumeAndMount creates a volume and mount configuration from the pgBackRest configmap to be used by the postgrescluster -func configVolumeAndMount(pgBackRestConfigMap *v1.ConfigMap, pod *v1.PodSpec, containerName, configKey string) { - // Note: the 'container' string will be 'database' for the PostgreSQL database container, - // otherwise it will be 'backrest' - var ( - pgBackRestConfig []v1.VolumeProjection - ) +// MakePGBackrestLogDir creates the pgBackRest default log path directory used when a +// dedicated repo host is configured. +func MakePGBackrestLogDir(template *corev1.PodTemplateSpec, + cluster *v1beta1.PostgresCluster) { - volume := v1.Volume{Name: ConfigVol} - volume.Projected = &v1.ProjectedVolumeSource{} - - // Add our projections after those specified in the CR. Items later in the - // list take precedence over earlier items (that is, last write wins). - // - https://docs.openshift.com/container-platform/latest/nodes/containers/nodes-containers-projected-volumes.html - // - https://kubernetes.io/docs/concepts/storage/volumes/#projected - volume.Projected.Sources = append( - pgBackRestConfig, - v1.VolumeProjection{ - ConfigMap: &v1.ConfigMapProjection{ - LocalObjectReference: v1.LocalObjectReference{ - Name: pgBackRestConfigMap.Name, - }, - Items: []v1.KeyToPath{{ - Key: configKey, - Path: configPath, - }}, - }, - }, - ) + var pgBackRestLogPath string + for _, repo := range cluster.Spec.Backups.PGBackRest.Repos { + if repo.Volume != nil { + pgBackRestLogPath = fmt.Sprintf(naming.PGBackRestRepoLogPath, repo.Name) + break + } + } - mount := v1.VolumeMount{ - Name: volume.Name, - MountPath: ConfigDir, - ReadOnly: true, + container := corev1.Container{ + Command: []string{"bash", "-c", "mkdir -p " + pgBackRestLogPath}, + Image: config.PGBackRestContainerImage(cluster), + ImagePullPolicy: cluster.Spec.ImagePullPolicy, + Name: naming.ContainerPGBackRestLogDirInit, + SecurityContext: initialize.RestrictedSecurityContext(), } - pod.Volumes = mergeVolumes(pod.Volumes, volume) + // Set the container resources to the 'pgbackrest' container configuration. + for i, c := range template.Spec.Containers { + if c.Name == naming.PGBackRestRepoContainerName { + container.Resources = template.Spec.Containers[i].Resources + break + } + } + template.Spec.InitContainers = append(template.Spec.InitContainers, container) +} - container := findOrAppendContainer(&pod.Containers, containerName) +// RestoreCommand returns the command for performing a pgBackRest restore. In addition to calling +// the pgBackRest restore command with any pgBackRest options provided, the script also does the +// following: +// - Removes the patroni.dynamic.json file if present. This ensures the configuration from the +// cluster being restored from is not utilized when bootstrapping a new cluster, and the +// configuration for the new cluster is utilized instead. +// - Starts the database and allows recovery to complete. A temporary postgresql.conf file +// with the minimum settings needed to safely start the database is created and utilized. +// - Renames the data directory as needed to bootstrap the cluster using the restored database. +// This ensures compatibility with the "existing" bootstrap method that is included in the +// Patroni config when bootstrapping a cluster using an existing data directory. +func RestoreCommand(pgdata, hugePagesSetting, fetchKeyCommand string, tablespaceVolumes []*corev1.PersistentVolumeClaim, args ...string) []string { + + // After pgBackRest restores files, PostgreSQL starts in recovery to finish + // replaying WAL files. "hot_standby" is "on" (by default) so we can detect + // when recovery has finished. In that mode, some parameters cannot be + // smaller than they were when PostgreSQL was backed up. Configure them to + // match the values reported by "pg_controldata". Those parameters are also + // written to WAL files and may change during recovery. When they increase, + // PostgreSQL exits and we reconfigure and restart it. + // For PG14, when some parameters from WAL require a restart, the behavior is + // to pause unless a restart is requested. For this edge case, we run a CASE + // query to check + // (a) if the instance is in recovery; + // (b) if so, if the WAL replay is paused; + // (c) if so, to unpause WAL replay, allowing our expected behavior to resume. + // A note on the PostgreSQL code: we cast `pg_catalog.pg_wal_replay_resume()` as text + // because that method returns a void (which is a non-NULL but empty result). When + // that void is cast as a string, it is an '' + // - https://www.postgresql.org/docs/current/hot-standby.html + // - https://www.postgresql.org/docs/current/app-pgcontroldata.html + + // The postmaster.pid file is removed, if it exists, before attempting a restore. + // This allows the restore to be tried more than once without the causing an + // error due to the presence of the file in subsequent attempts. + + // The 'pg_ctl' timeout is set to a very large value (1 year) to ensure there + // are no timeouts when starting or stopping Postgres. + + tablespaceCmd := "" + for _, tablespaceVolume := range tablespaceVolumes { + tablespaceCmd = tablespaceCmd + fmt.Sprintf( + "\ninstall --directory --mode=0700 '/tablespaces/%s/data'", + tablespaceVolume.Labels[naming.LabelData]) + } - container.VolumeMounts = mergeVolumeMounts(container.VolumeMounts, mount) -} + // If the fetch key command is not empty, save the GUC variable and value + // to a new string. + var ekc string + if fetchKeyCommand != "" { + ekc = ` +encryption_key_command = '` + fetchKeyCommand + `'` + } -// PostgreSQLConfigVolumeAndMount creates a volume and mount configuration from the pgBackRest configmap to be used by the -// postgrescluster's PostgreSQL pod -func PostgreSQLConfigVolumeAndMount(pgBackRestConfigMap *v1.ConfigMap, pod *v1.PodSpec, containerName string) { - configVolumeAndMount(pgBackRestConfigMap, pod, containerName, cmPrimaryKey) -} + restoreScript := `declare -r pgdata="$1" opts="$2" +install --directory --mode=0700 "${pgdata}"` + tablespaceCmd + ` +rm -f "${pgdata}/postmaster.pid" +bash -xc "pgbackrest restore ${opts}" +rm -f "${pgdata}/patroni.dynamic.json" +export PGDATA="${pgdata}" PGHOST='/tmp' + +until [[ "${recovery=}" == 'f' ]]; do +if [[ -z "${recovery}" ]]; then +control=$(pg_controldata) +read -r max_conn <<< "${control##*max_connections setting:}" +read -r max_lock <<< "${control##*max_locks_per_xact setting:}" +read -r max_ptxn <<< "${control##*max_prepared_xacts setting:}" +read -r max_work <<< "${control##*max_worker_processes setting:}" +echo > /tmp/pg_hba.restore.conf 'local all "postgres" peer' +cat > /tmp/postgres.restore.conf <> /tmp/postgres.restore.conf "max_wal_senders = '${max_wals}'" +fi + +pg_ctl start --silent --timeout=31536000 --wait --options='--config-file=/tmp/postgres.restore.conf' +fi + +recovery=$(psql -Atc "SELECT CASE + WHEN NOT pg_catalog.pg_is_in_recovery() THEN false + WHEN NOT pg_catalog.pg_is_wal_replay_paused() THEN true + ELSE pg_catalog.pg_wal_replay_resume()::text = '' +END recovery" && sleep 1) ||: +done + +pg_ctl stop --silent --wait --timeout=31536000 +mv "${pgdata}" "${pgdata}_bootstrap"` -// RepositoryConfigVolumeAndMount creates a volume and mount configuration from the pgBackRest configmap to be used by the -// postgrescluster's pgBackRest repo pod -func RepositoryConfigVolumeAndMount(pgBackRestConfigMap *v1.ConfigMap, pod *v1.PodSpec, containerName string) { - configVolumeAndMount(pgBackRestConfigMap, pod, containerName, CMRepoKey) + return append([]string{"bash", "-ceu", "--", restoreScript, "-", pgdata}, args...) } -// JobConfigVolumeAndMount creates a volume and mount configuration from the pgBackRest configmap to be used by the -// postgrescluster's job pods -func JobConfigVolumeAndMount(pgBackRestConfigMap *v1.ConfigMap, pod *v1.PodSpec, containerName string) { - configVolumeAndMount(pgBackRestConfigMap, pod, containerName, cmJobKey) -} +// DedicatedSnapshotVolumeRestoreCommand returns the command for performing a pgBackRest delta restore +// into a dedicated snapshot volume. In addition to calling the pgBackRest restore command with any +// pgBackRest options provided, the script also removes the patroni.dynamic.json file if present. This +// ensures the configuration from the cluster being restored from is not utilized when bootstrapping a +// new cluster, and the configuration for the new cluster is utilized instead. +func DedicatedSnapshotVolumeRestoreCommand(pgdata string, args ...string) []string { + + // The postmaster.pid file is removed, if it exists, before attempting a restore. + // This allows the restore to be tried more than once without the causing an + // error due to the presence of the file in subsequent attempts. + + // Wrap pgbackrest restore command in backup_label checks. If pre/post + // backup_labels are different, restore moved database forward, so return 0 + // so that the Job is successful and we know to proceed with snapshot. + // Otherwise return 1, Job will fail, and we will not proceed with snapshot. + restoreScript := `declare -r pgdata="$1" opts="$2" +BACKUP_LABEL=$([[ ! -e "${pgdata}/backup_label" ]] || md5sum "${pgdata}/backup_label") +echo "Starting pgBackRest delta restore" -// RestoreCommand returns the command for performing a pgBackRest restore. In addition to calling -// the pgBackRest restore command with any pgBackRest options provided, the script also does the -// following: -// - Removes the patroni.dynamic.json file if present. This ensures the configuration from the -// cluster being restored from is not utilized when bootstrapping a new cluster, and the -// configuration for the new cluster is utilized instead. -// - Starts the database and allows recovery to complete. A temporary postgresql.conf file -// with the minimum settings needed to safely start the database is created and utilized. -// - Renames the data directory as needed to bootstrap the cluster using the restored database. -// This ensures compatibility with the "existing" bootstrap method that is included in the -// Patroni config when bootstrapping a cluster using an existing data directory. -func RestoreCommand(pgdata string, args ...string) []string { - - const restoreScript = `declare -r pgdata="$1" opts="$2" install --directory --mode=0700 "${pgdata}" -eval "pgbackrest restore ${opts}" +rm -f "${pgdata}/postmaster.pid" +bash -xc "pgbackrest restore ${opts}" rm -f "${pgdata}/patroni.dynamic.json" -echo "unix_socket_directories = '/tmp'" > /tmp/postgres.restore.conf -echo "archive_command = 'false'" >> /tmp/postgres.restore.conf -echo "archive_mode = 'on'" >> /tmp/postgres.restore.conf -pg_ctl start -D "${pgdata}" -o "--config-file=/tmp/postgres.restore.conf" -until [[ $(psql -At -c "SELECT pg_catalog.pg_is_in_recovery()") == "f" ]]; do sleep 1; done -pg_ctl stop -D "${pgdata}" -mv "${pgdata}" "${pgdata}_bootstrap"` + +BACKUP_LABEL_POST=$([[ ! -e "${pgdata}/backup_label" ]] || md5sum "${pgdata}/backup_label") +if [[ "${BACKUP_LABEL}" != "${BACKUP_LABEL_POST}" ]] +then + exit 0 +fi +echo Database was not advanced by restore. No snapshot will be taken. +echo Check that your last backup was successful. +exit 1` return append([]string{"bash", "-ceu", "--", restoreScript, "-", pgdata}, args...) } -// populatePGInstanceConfigurationMap returns a map representing the pgBackRest configuration for +// populatePGInstanceConfigurationMap returns options representing the pgBackRest configuration for // a PostgreSQL instance -func populatePGInstanceConfigurationMap(serviceName, serviceNamespace, repoHostName, pgdataDir string, +func populatePGInstanceConfigurationMap( + serviceName, serviceNamespace, repoHostName, pgdataDir, + fetchKeyCommand, postgresVersion string, pgPort int32, repos []v1beta1.PGBackRestRepo, - globalConfig map[string]string) map[string]map[string]string { + globalConfig map[string]string, +) iniSectionSet { - pgBackRestConfig := map[string]map[string]string{ + // TODO(cbandy): pass a FQDN in already. + repoHostFQDN := repoHostName + "-0." + + serviceName + "." + serviceNamespace + ".svc." + + naming.KubernetesClusterDomain(context.Background()) - // will hold the [global] configs - "global": {}, - // will hold the [stanza-name] configs - "stanza": {}, - } + global := iniMultiSet{} + stanza := iniMultiSet{} - // set the default stanza name - pgBackRestConfig["stanza"]["name"] = DefaultStanzaName + // For faster and more robust WAL archiving, we turn on pgBackRest archive-async. + global.Set("archive-async", "y") + // pgBackRest spool-path should always be co-located with the Postgres WAL path. + global.Set("spool-path", "/pgdata/pgbackrest-spool") + // pgBackRest will log to the pgData volume for commands run on the PostgreSQL instance + global.Set("log-path", naming.PGBackRestPGDataLogPath) - // set global settings, which includes all repos - pgBackRestConfig["global"]["log-path"] = defaultLogPath for _, repo := range repos { - - repoConfigs := make(map[string]string) + global.Set(repo.Name+"-path", defaultRepo1Path+repo.Name) // repo volumes do not contain configuration (unlike other repo types which has actual // pgBackRest settings such as "bucket", "region", etc.), so only grab the name from the // repo if a Volume is detected, and don't attempt to get an configs if repo.Volume == nil { - repoConfigs = getExternalRepoConfigs(repo) + for option, val := range getExternalRepoConfigs(repo) { + global.Set(option, val) + } } // Only "volume" (i.e. PVC-based) repos should ever have a repo host configured. This // means cloud-based repos (S3, GCS or Azure) should not have a repo host configured. if repoHostName != "" && repo.Volume != nil { - pgBackRestConfig["global"][repo.Name+"-host"] = repoHostName + "-0." + serviceName + - "." + serviceNamespace + ".svc." + - naming.KubernetesClusterDomain(context.Background()) - pgBackRestConfig["global"][repo.Name+"-host-user"] = "postgres" - } - pgBackRestConfig["global"][repo.Name+"-path"] = defaultRepo1Path + repo.Name - - for option, val := range repoConfigs { - pgBackRestConfig["global"][option] = val + global.Set(repo.Name+"-host", repoHostFQDN) + global.Set(repo.Name+"-host-type", "tls") + global.Set(repo.Name+"-host-ca-file", certAuthorityAbsolutePath) + global.Set(repo.Name+"-host-cert-file", certClientAbsolutePath) + global.Set(repo.Name+"-host-key-file", certClientPrivateKeyAbsolutePath) + global.Set(repo.Name+"-host-user", "postgres") } } for option, val := range globalConfig { - pgBackRestConfig["global"][option] = val + global.Set(option, val) } // Now add the local PG instance to the stanza section. The local PG host must always be // index 1: https://github.com/pgbackrest/pgbackrest/issues/1197#issuecomment-708381800 - pgBackRestConfig["stanza"]["pg1-path"] = pgdataDir - pgBackRestConfig["stanza"]["pg1-port"] = fmt.Sprint(pgPort) - pgBackRestConfig["stanza"]["pg1-socket-path"] = postgres.SocketDirectory + stanza.Set("pg1-path", pgdataDir) + stanza.Set("pg1-port", fmt.Sprint(pgPort)) + stanza.Set("pg1-socket-path", postgres.SocketDirectory) + + if fetchKeyCommand != "" { + stanza.Set("archive-header-check", "n") + stanza.Set("page-header-check", "n") + stanza.Set("pg-version-force", postgresVersion) + } - return pgBackRestConfig + return iniSectionSet{ + "global": global, + DefaultStanzaName: stanza, + } } -// populateRepoHostConfigurationMap returns a map representing the pgBackRest configuration for +// populateRepoHostConfigurationMap returns options representing the pgBackRest configuration for // a pgBackRest dedicated repository host -func populateRepoHostConfigurationMap(serviceName, serviceNamespace, pgdataDir string, +func populateRepoHostConfigurationMap( + serviceName, serviceNamespace, pgdataDir, + fetchKeyCommand, postgresVersion string, pgPort int32, pgHosts []string, repos []v1beta1.PGBackRestRepo, - globalConfig map[string]string) map[string]map[string]string { + globalConfig map[string]string, +) iniSectionSet { - pgBackRestConfig := map[string]map[string]string{ + global := iniMultiSet{} + stanza := iniMultiSet{} - // will hold the [global] configs - "global": {}, - // will hold the [stanza-name] configs - "stanza": {}, - } - - // set the default stanza name - pgBackRestConfig["stanza"]["name"] = DefaultStanzaName - - // set the config for the local repo host - pgBackRestConfig["global"]["log-path"] = defaultLogPath + var pgBackRestLogPathSet bool for _, repo := range repos { - var repoConfigs map[string]string + global.Set(repo.Name+"-path", defaultRepo1Path+repo.Name) // repo volumes do not contain configuration (unlike other repo types which has actual // pgBackRest settings such as "bucket", "region", etc.), so only grab the name from the // repo if a Volume is detected, and don't attempt to get an configs if repo.Volume == nil { - repoConfigs = getExternalRepoConfigs(repo) + for option, val := range getExternalRepoConfigs(repo) { + global.Set(option, val) + } } - pgBackRestConfig["global"][repo.Name+"-path"] = defaultRepo1Path + repo.Name - for option, val := range repoConfigs { - pgBackRestConfig["global"][option] = val + if !pgBackRestLogPathSet && repo.Volume != nil { + // pgBackRest will log to the first configured repo volume when commands + // are run on the pgBackRest repo host. With our previous check in + // RepoHostVolumeDefined(), we've already validated that at least one + // defined repo has a volume. + global.Set("log-path", fmt.Sprintf(naming.PGBackRestRepoLogPath, repo.Name)) + pgBackRestLogPathSet = true } } + // If no log path was set, don't log because the default path is not writable. + if !pgBackRestLogPathSet { + global.Set("log-level-file", "off") + } + for option, val := range globalConfig { - pgBackRestConfig["global"][option] = val + global.Set(option, val) } // set the configs for all PG hosts for i, pgHost := range pgHosts { - pgBackRestConfig["stanza"][fmt.Sprintf("pg%d-host", i+1)] = pgHost + "-0." + serviceName + - "." + serviceNamespace + ".svc." + + // TODO(cbandy): pass a FQDN in already. + pgHostFQDN := pgHost + "-0." + + serviceName + "." + serviceNamespace + ".svc." + naming.KubernetesClusterDomain(context.Background()) - pgBackRestConfig["stanza"][fmt.Sprintf("pg%d-path", i+1)] = pgdataDir - pgBackRestConfig["stanza"][fmt.Sprintf("pg%d-port", i+1)] = fmt.Sprint(pgPort) - pgBackRestConfig["stanza"][fmt.Sprintf("pg%d-socket-path", i+1)] = postgres.SocketDirectory - } - return pgBackRestConfig -} + stanza.Set(fmt.Sprintf("pg%d-host", i+1), pgHostFQDN) + stanza.Set(fmt.Sprintf("pg%d-host-type", i+1), "tls") + stanza.Set(fmt.Sprintf("pg%d-host-ca-file", i+1), certAuthorityAbsolutePath) + stanza.Set(fmt.Sprintf("pg%d-host-cert-file", i+1), certClientAbsolutePath) + stanza.Set(fmt.Sprintf("pg%d-host-key-file", i+1), certClientPrivateKeyAbsolutePath) -// getConfigString provides a formatted string of the desired -// pgBackRest configuration for insertion into the relevant -// configmap -func getConfigString(c map[string]map[string]string) string { + stanza.Set(fmt.Sprintf("pg%d-path", i+1), pgdataDir) + stanza.Set(fmt.Sprintf("pg%d-port", i+1), fmt.Sprint(pgPort)) + stanza.Set(fmt.Sprintf("pg%d-socket-path", i+1), postgres.SocketDirectory) - configString := fmt.Sprintln("[global]") - for _, k := range sortedKeys(c["global"]) { - configString += fmt.Sprintf("%s=%s\n", k, c["global"][k]) + if fetchKeyCommand != "" { + stanza.Set("archive-header-check", "n") + stanza.Set("page-header-check", "n") + stanza.Set("pg-version-force", postgresVersion) + } } - if c["stanza"]["name"] != "" { - configString += fmt.Sprintf("\n[%s]\n", c["stanza"]["name"]) - - for _, k := range sortedKeys(c["stanza"]) { - if k != "name" { - configString += fmt.Sprintf("%s=%s\n", k, c["stanza"][k]) - } - } + return iniSectionSet{ + "global": global, + DefaultStanzaName: stanza, } - return configString } // getExternalRepoConfigs returns a map containing the configuration settings for an external @@ -360,13 +465,112 @@ func getExternalRepoConfigs(repo v1beta1.PGBackRestRepo) map[string]string { return repoConfigs } -// sortedKeys sorts and returns the keys from a given map -func sortedKeys(m map[string]string) []string { - keys := make([]string, 0, len(m)) - for k := range m { - keys = append(keys, k) +// reloadCommand returns an entrypoint that convinces the pgBackRest TLS server +// to reload its options and certificate files when they change. The process +// will appear as name in `ps` and `top`. +func reloadCommand(name string) []string { + // Use a Bash loop to periodically check the mtime of the mounted server + // volume and configuration file. When either changes, signal pgBackRest + // and print the observed timestamp. + // + // We send SIGHUP because this allows the TLS server configuration to be + // reloaded starting in pgBackRest 2.37. We filter by parent process to ignore + // the forked connection handlers. The server parent process is zero because + // it is started by Kubernetes. + // - https://github.com/pgbackrest/pgbackrest/commit/7b3ea883c7c010aafbeb14d150d073a113b703e4 + + // Coreutils `sleep` uses a lot of memory, so the following opens a file + // descriptor and uses the timeout of the builtin `read` to wait. That same + // descriptor gets closed and reopened to use the builtin `[ -nt` to check + // mtimes. + // - https://unix.stackexchange.com/a/407383 + const script = ` +exec {fd}<> <(:||:) +until read -r -t 5 -u "${fd}"; do + if + [[ "${filename}" -nt "/proc/self/fd/${fd}" ]] && + pkill -HUP --exact --parent=0 pgbackrest + then + exec {fd}>&- && exec {fd}<> <(:||:) + stat --dereference --format='Loaded configuration dated %y' "${filename}" + elif + { [[ "${directory}" -nt "/proc/self/fd/${fd}" ]] || + [[ "${authority}" -nt "/proc/self/fd/${fd}" ]] + } && + pkill -HUP --exact --parent=0 pgbackrest + then + exec {fd}>&- && exec {fd}<> <(:||:) + stat --format='Loaded certificates dated %y' "${directory}" + fi +done +` + + // Elide the above script from `ps` and `top` by wrapping it in a function + // and calling that. + wrapper := `monitor() {` + script + `};` + + ` export directory="$1" authority="$2" filename="$3"; export -f monitor;` + + ` exec -a "$0" bash -ceu monitor` + + return []string{"bash", "-ceu", "--", wrapper, name, + serverMountPath, certAuthorityAbsolutePath, serverConfigAbsolutePath} +} + +// serverConfig returns the options needed to run the TLS server for cluster. +func serverConfig(cluster *v1beta1.PostgresCluster) iniSectionSet { + global := iniMultiSet{} + server := iniMultiSet{} + + // IPv6 support is a relatively recent addition to Kubernetes, so listen on + // the IPv4 wildcard address and trust that Pod DNS names will resolve to + // IPv4 addresses for now. + // + // NOTE(cbandy): The unspecified IPv6 address, which ends up being the IPv6 + // wildcard address, did not work in all environments. In some cases, the + // "server-ping" command would not connect. + // - https://tools.ietf.org/html/rfc3493#section-3.8 + // + // TODO(cbandy): When pgBackRest provides a way to bind to all addresses, + // use that here and configure "server-ping" to use "localhost" which + // Kubernetes guarantees resolves to a loopback address. + // - https://kubernetes.io/docs/concepts/cluster-administration/networking/ + // - https://releases.k8s.io/v1.18.0/pkg/kubelet/kubelet_pods.go#L327 + // - https://releases.k8s.io/v1.23.0/pkg/kubelet/kubelet_pods.go#L345 + global.Set("tls-server-address", "0.0.0.0") + + // NOTE (dsessler7): As pointed out by Chris above, there is an issue in + // pgBackRest (#1841), where using a wildcard address to bind all addresses + // does not work in certain IPv6 environments. Until this is fixed, we are + // going to workaround the issue by allowing the user to add an annotation to + // enable IPv6. We will check for that annotation here and override the + // "tls-server-address" setting accordingly. + if strings.EqualFold(cluster.Annotations[naming.PGBackRestIPVersion], "ipv6") { + global.Set("tls-server-address", "::") } - sort.Strings(keys) - return keys + // The client certificate for this cluster is allowed to connect for any stanza. + // Without the wildcard "*", the "pgbackrest info" and "pgbackrest repo-ls" + // commands fail with "access denied" when invoked without a "--stanza" flag. + global.Add("tls-server-auth", clientCommonName(cluster)+"=*") + + global.Set("tls-server-ca-file", certAuthorityAbsolutePath) + global.Set("tls-server-cert-file", certServerAbsolutePath) + global.Set("tls-server-key-file", certServerPrivateKeyAbsolutePath) + + // Send all server logs to stderr and stdout without timestamps. + // - stderr has ERROR messages + // - stdout has WARN, INFO, and DETAIL messages + // + // The "trace" level shows when a connection is accepted, but nothing about + // the remote address or what commands it might send. + // - https://github.com/pgbackrest/pgbackrest/blob/release/2.38/src/command/server/server.c#L158-L159 + // - https://pgbackrest.org/configuration.html#section-log + server.Set("log-level-console", "detail") + server.Set("log-level-stderr", "error") + server.Set("log-level-file", "off") + server.Set("log-timestamp", "n") + + return iniSectionSet{ + "global": global, + "global:server": server, + } } diff --git a/internal/pgbackrest/config.md b/internal/pgbackrest/config.md index 67c7b58833..2101535b3a 100644 --- a/internal/pgbackrest/config.md +++ b/internal/pgbackrest/config.md @@ -1,16 +1,7 @@ # pgBackRest Configuration Overview @@ -25,21 +16,23 @@ During initial cluster creation, four pgBackRest use cases are involved. These settings are configured in either the [global] or [stanza] sections of the pgBackRest configuration based on their designation in the pgBackRest code. For more information on the above, and other settings, please see -https://github.com/pgbackrest/pgbackrest/blob/master/src/config/parse.auto.c +https://github.com/pgbackrest/pgbackrest/blob/release/2.38/src/config/parse.auto.c As shown, the settings with the `cfgSectionGlobal` designation are `log-path`: The log path provides a location for pgBackRest to store log files. +`log-level-file`: Level for file logging. Set to 'off' when the repo host has no volume. + `repo-path`: Path where backups and archive are stored. The repository is where pgBackRest stores backups and archives WAL segments. -`repo-host`: Repository host when operating remotely via SSH. +`repo-host`: Repository host when operating remotely via TLS. The settings with the `cfgSectionStanza` designation are -`pg-host`: PostgreSQL host for operating remotely via SSH. +`pg-host`: PostgreSQL host for operating remotely via TLS. `pg-path`: The path of the PostgreSQL data directory. This should be the same as the data_directory setting in postgresql.conf. @@ -75,6 +68,7 @@ pg1-socket-path [global] log-path repo1-path +log-level-file [stanza] pg1-host @@ -127,3 +121,140 @@ above MUST BE CONFIGURED VIA THE POSTGRESCLUSTER SPEC so as to avoid errors. For more information, please see `https://pgbackrest.org/user-guide.html#quickstart/configure-stanza`. + +--- + +There are three ways to configure pgBackRest: INI files, environment variables, +and command-line arguments. Any particular option comes from exactly one of those +places. For example, when an option is in an INI file and a command-line argument, +only the command-line argument is used. This is true even for options that can +be specified more than once. The [precedence](https://pgbackrest.org/command.html#introduction): + +> Command-line options override environment options which override config file options. + +From one of those places, only a handful of options may be set more than once +(see `PARSE_RULE_OPTION_MULTI` in [parse.auto.c][]). The resulting value of +these options matches the order in which they were loaded: left-to-right on the +command-line or top-to-bottom in INI files. + +The remaining options must be set exactly once. `pgbackrest` exits non-zero when +the option occurs twice on the command-line or twice in a file: + +``` +ERROR: [031]: option 'io-timeout' cannot be set multiple times +``` + +A few options are only allowed in certain places. Credentials, for example, +cannot be passed as command-line arguments (see `PARSE_RULE_OPTION_SECURE` in [parse.auto.c][]). +Some others cannot be in INI files (see `cfgSectionCommandLine` in [parse.auto.c][]). +Notably, these must be environment variables or command-line arguments: + +- `--repo` and `--stanza` +- restore `--target` and `--target-action` +- backup and restore `--type` + +pgBackRest looks for and loads multiple INI files from multiple places according +to the `config`, `config-include-path`, and/or `config-path` options. The order +is a [little complicated][file-precedence]. When none of these options are set: + + 1. One of `/etc/pgbackrest/pgbackrest.conf` or `/etc/pgbackrest.conf` is read + in that order, [whichever exists][default-config]. + 2. All `/etc/pgbackrest/conf.d/*.conf` files that exist are read in alphabetical order. + +There is no "precedence" between these files; they do not "override" each other. +Options that can be set multiple times are interpreted as each file is loaded. +Options that cannot be set multiple times will error when they are in multiple files. + +There *is* precedence, however, *inside* these files, organized by INI sections. + +- The "global" section applies to all repositories, stanzas, and commands. +- The "global:*command*" section applies to all repositories and stanzas for a particular command. +- The "*stanza*" section applies to all repositories and commands for a particular stanza. +- The "*stanza*:*command*" section applies to all repositories for a particular stanza and command. + +Options in more specific sections (lower in the list) [override][file-precedence] +options in less specific sections. + +[default-config]: https://pgbackrest.org/configuration.html#introduction +[file-precedence]: https://pgbackrest.org/user-guide.html#quickstart/configure-stanza +[parse.auto.c]: https://github.com/pgbackrest/pgbackrest/blob/release/2.38/src/config/parse.auto.c + +```console +$ tail -vn+0 pgbackrest.conf conf.d/* +==> pgbackrest.conf <== +[global] +exclude = main +exclude = main +io-timeout = 10 +link-map = x=x1 +link-map = x=x2 +link-map = y=y1 + +[global:backup] +io-timeout = 20 + +[db] +io-timeout = 30 +link-map = y=y2 + +[db:backup] +io-timeout = 40 + +==> conf.d/one.conf <== +[global] +exclude = one + +==> conf.d/two.conf <== +[global] +exclude = two + +==> conf.d/!three.conf <== +[global] +exclude = three + +==> conf.d/~four.conf <== +[global] +exclude = four + +$ pgbackrest --config-path="$(pwd)" help backup | grep -A1 exclude + --exclude exclude paths/files from the backup + [current=main, main, three, one, two, four] + +$ pgbackrest --config-path="$(pwd)" help backup --exclude=five | grep -A1 exclude + --exclude exclude paths/files from the backup + [current=five] + +$ pgbackrest --config-path="$(pwd)" help backup | grep io-timeout + --io-timeout I/O timeout [current=20, default=60] + +$ pgbackrest --config-path="$(pwd)" help backup --stanza=db | grep io-timeout + --io-timeout I/O timeout [current=40, default=60] + +$ pgbackrest --config-path="$(pwd)" help info | grep io-timeout + --io-timeout I/O timeout [current=10, default=60] + +$ pgbackrest --config-path="$(pwd)" help info --stanza=db | grep io-timeout + --io-timeout I/O timeout [current=30, default=60] + +$ pgbackrest --config-path="$(pwd)" help restore | grep -A1 link-map + --link-map modify the destination of a symlink + [current=x=x2, y=y1] + +$ pgbackrest --config-path="$(pwd)" help restore --stanza=db | grep -A1 link-map + --link-map modify the destination of a symlink + [current=y=y2] +``` + +--- + +Given all the above, we configure pgBackRest using files mounted into the +`/etc/pgbackrest/conf.d` directory. They are last in the projected volume to +ensure they take precedence over other projections. + +- `/etc/pgbackrest/conf.d`
+ Use this directory to store pgBackRest configuration. Files ending with `.conf` + are loaded in alphabetical order. + +- `/etc/pgbackrest/conf.d/~postgres-operator/*`
+ Use this subdirectory to store things like TLS certificates and keys. Files in + subdirectories are not loaded automatically. diff --git a/internal/pgbackrest/config_test.go b/internal/pgbackrest/config_test.go index 4ed1951f63..b74bf9a4a8 100644 --- a/internal/pgbackrest/config_test.go +++ b/internal/pgbackrest/config_test.go @@ -1,323 +1,439 @@ -// +build envtest - -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbackrest import ( "context" - "io/ioutil" + "os" "os/exec" "path/filepath" - "reflect" - "strconv" "strings" "testing" "gotest.tools/v3/assert" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/rand" - "sigs.k8s.io/controller-runtime/pkg/client" + "gotest.tools/v3/assert/cmp" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "sigs.k8s.io/yaml" "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/internal/testing/require" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) -// TestPGBackRestConfiguration goes through the various steps of the current -// pgBackRest configuration setup and verifies the expected values are set in -// the expected configmap and volumes -func TestPGBackRestConfiguration(t *testing.T) { +func TestCreatePGBackRestConfigMapIntent(t *testing.T) { + cluster := v1beta1.PostgresCluster{} + cluster.Namespace = "ns1" + cluster.Name = "hippo-dance" - // set cluster name and namespace values in postgrescluster spec - postgresCluster := &v1beta1.PostgresCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: testclustername, - Namespace: "postgres-operator-test-" + rand.String(6), - }, - Spec: v1beta1.PostgresClusterSpec{ - PostgresVersion: 12, - Port: initialize.Int32(2345), - Backups: v1beta1.Backups{ - PGBackRest: v1beta1.PGBackRestArchive{ - Global: map[string]string{"repo2-test": "config", "repo4-test": "config", - "repo3-test": "config"}, - // By defining a "Volume" repo a dedicated repo host will be enabled - Repos: []v1beta1.PGBackRestRepo{{ - Name: "repo1", - Volume: &v1beta1.RepoPVC{}, - }, { - Name: "repo2", - Azure: &v1beta1.RepoAzure{ - Container: "container", - }, - }, { - Name: "repo3", - GCS: &v1beta1.RepoGCS{ - Bucket: "bucket", - }, - }, { - Name: "repo4", - S3: &v1beta1.RepoS3{ - Bucket: "bucket", - Endpoint: "endpoint", - Region: "region", - }, - }}, - }, - }, - }, - } - - // the initially created configmap - var cmInitial *v1.ConfigMap - // the returned configmap - var cmReturned v1.ConfigMap - // pod spec for testing projected volumes and volume mounts - pod := &v1.PodSpec{} - - testInstanceName := "test-instance-abc" - testRepoName := "repo-host" - testConfigHash := "abcde12345" + cluster.Spec.Port = initialize.Int32(2345) + cluster.Spec.PostgresVersion = 12 domain := naming.KubernetesClusterDomain(context.Background()) - t.Run("pgbackrest configmap checks", func(t *testing.T) { + t.Run("NoVolumeRepo", func(t *testing.T) { + cluster := cluster.DeepCopy() + cluster.Spec.Backups.PGBackRest.Repos = nil - // setup the test environment and ensure a clean teardown - testEnv, testClient := setupTestEnv(t) - - // define the cleanup steps to run once the tests complete - t.Cleanup(func() { - teardownTestEnv(t, testEnv) - }) - - t.Run("create pgbackrest configmap struct", func(t *testing.T) { - // create an array of one host string value - pghosts := []string{testInstanceName} - // create the configmap struct - cmInitial = CreatePGBackRestConfigMapIntent(postgresCluster, testRepoName, - testConfigHash, naming.ClusterPodService(postgresCluster).Name, "test-ns", pghosts) - - // check that there is configmap data - assert.Assert(t, cmInitial.Data != nil) - }) + configmap := CreatePGBackRestConfigMapIntent(cluster, + "", "number", "pod-service-name", "test-ns", + []string{"some-instance"}) - t.Run("create pgbackrest configmap", func(t *testing.T) { - - ns := &v1.Namespace{} - ns.Name = naming.PGBackRestConfig(postgresCluster).Namespace - assert.NilError(t, testClient.Create(context.Background(), ns)) - t.Cleanup(func() { assert.Check(t, testClient.Delete(context.Background(), ns)) }) - - // create the configmap - err := testClient.Patch(context.Background(), cmInitial, client.Apply, client.ForceOwnership, client.FieldOwner(testFieldOwner)) - - assert.NilError(t, err) - }) - - t.Run("get pgbackrest configmap", func(t *testing.T) { + assert.Equal(t, configmap.Data["config-hash"], "number") + assert.Equal(t, configmap.Data["pgbackrest-server.conf"], "") + }) - objectKey := client.ObjectKey{ - Namespace: naming.PGBackRestConfig(postgresCluster).Namespace, - Name: naming.PGBackRestConfig(postgresCluster).Name, - } + t.Run("DedicatedRepoHost", func(t *testing.T) { + cluster := cluster.DeepCopy() + cluster.Spec.Backups.PGBackRest.Global = map[string]string{ + "repo3-test": "something", + } + cluster.Spec.Backups.PGBackRest.Repos = []v1beta1.PGBackRestRepo{ + { + Name: "repo1", + Volume: &v1beta1.RepoPVC{}, + }, + { + Name: "repo2", + Azure: &v1beta1.RepoAzure{Container: "a-container"}, + }, + { + Name: "repo3", + GCS: &v1beta1.RepoGCS{Bucket: "g-bucket"}, + }, + { + Name: "repo4", + S3: &v1beta1.RepoS3{ + Bucket: "s-bucket", Endpoint: "endpoint-s", Region: "earth", + }, + }, + } - err := testClient.Get(context.Background(), objectKey, &cmReturned) + configmap := CreatePGBackRestConfigMapIntent(cluster, + "repo-hostname", "abcde12345", "pod-service-name", "test-ns", + []string{"some-instance"}) - assert.NilError(t, err) + assert.DeepEqual(t, configmap.Annotations, map[string]string{}) + assert.DeepEqual(t, configmap.Labels, map[string]string{ + "postgres-operator.crunchydata.com/cluster": "hippo-dance", + "postgres-operator.crunchydata.com/pgbackrest": "", + "postgres-operator.crunchydata.com/pgbackrest-config": "", }) - // finally, verify initial and returned match - assert.Assert(t, reflect.DeepEqual(cmInitial.Data, cmReturned.Data)) - - }) + assert.Equal(t, configmap.Data["config-hash"], "abcde12345") + assert.Equal(t, configmap.Data["pgbackrest_repo.conf"], strings.Trim(` +# Generated by postgres-operator. DO NOT EDIT. +# Your changes will not be saved. + +[global] +log-path = /pgbackrest/repo1/log +repo1-path = /pgbackrest/repo1 +repo2-azure-container = a-container +repo2-path = /pgbackrest/repo2 +repo2-type = azure +repo3-gcs-bucket = g-bucket +repo3-path = /pgbackrest/repo3 +repo3-test = something +repo3-type = gcs +repo4-path = /pgbackrest/repo4 +repo4-s3-bucket = s-bucket +repo4-s3-endpoint = endpoint-s +repo4-s3-region = earth +repo4-type = s3 - t.Run("check pgbackrest configmap repo configuration", func(t *testing.T) { - - assert.Equal(t, getCMData(cmReturned, CMRepoKey), - `[global] -log-path=/tmp -repo1-path=/pgbackrest/repo1 -repo2-azure-container=container -repo2-path=/pgbackrest/repo2 -repo2-test=config -repo2-type=azure -repo3-gcs-bucket=bucket -repo3-path=/pgbackrest/repo3 -repo3-test=config -repo3-type=gcs -repo4-path=/pgbackrest/repo4 -repo4-s3-bucket=bucket -repo4-s3-endpoint=endpoint -repo4-s3-region=region -repo4-test=config -repo4-type=s3 +[db] +pg1-host = some-instance-0.pod-service-name.test-ns.svc.`+domain+` +pg1-host-ca-file = /etc/pgbackrest/conf.d/~postgres-operator/tls-ca.crt +pg1-host-cert-file = /etc/pgbackrest/conf.d/~postgres-operator/client-tls.crt +pg1-host-key-file = /etc/pgbackrest/conf.d/~postgres-operator/client-tls.key +pg1-host-type = tls +pg1-path = /pgdata/pg12 +pg1-port = 2345 +pg1-socket-path = /tmp/postgres + `, "\t\n")+"\n") + + assert.Equal(t, configmap.Data["pgbackrest_instance.conf"], strings.Trim(` +# Generated by postgres-operator. DO NOT EDIT. +# Your changes will not be saved. + +[global] +archive-async = y +log-path = /pgdata/pgbackrest/log +repo1-host = repo-hostname-0.pod-service-name.test-ns.svc.`+domain+` +repo1-host-ca-file = /etc/pgbackrest/conf.d/~postgres-operator/tls-ca.crt +repo1-host-cert-file = /etc/pgbackrest/conf.d/~postgres-operator/client-tls.crt +repo1-host-key-file = /etc/pgbackrest/conf.d/~postgres-operator/client-tls.key +repo1-host-type = tls +repo1-host-user = postgres +repo1-path = /pgbackrest/repo1 +repo2-azure-container = a-container +repo2-path = /pgbackrest/repo2 +repo2-type = azure +repo3-gcs-bucket = g-bucket +repo3-path = /pgbackrest/repo3 +repo3-test = something +repo3-type = gcs +repo4-path = /pgbackrest/repo4 +repo4-s3-bucket = s-bucket +repo4-s3-endpoint = endpoint-s +repo4-s3-region = earth +repo4-type = s3 +spool-path = /pgdata/pgbackrest-spool [db] -pg1-host=`+testInstanceName+`-0.testcluster-pods.test-ns.svc.`+domain+` -pg1-path=/pgdata/pg`+strconv.Itoa(postgresCluster.Spec.PostgresVersion)+` -pg1-port=2345 -pg1-socket-path=/tmp/postgres -`) +pg1-path = /pgdata/pg12 +pg1-port = 2345 +pg1-socket-path = /tmp/postgres + `, "\t\n")+"\n") }) - t.Run("check pgbackrest configmap instance configuration", func(t *testing.T) { - - assert.Equal(t, getCMData(cmReturned, CMInstanceKey), - `[global] -log-path=/tmp -repo1-host=`+testRepoName+`-0.testcluster-pods.test-ns.svc.`+domain+` -repo1-host-user=postgres -repo1-path=/pgbackrest/repo1 -repo2-azure-container=container -repo2-path=/pgbackrest/repo2 -repo2-test=config -repo2-type=azure -repo3-gcs-bucket=bucket -repo3-path=/pgbackrest/repo3 -repo3-test=config -repo3-type=gcs -repo4-path=/pgbackrest/repo4 -repo4-s3-bucket=bucket -repo4-s3-endpoint=endpoint -repo4-s3-region=region -repo4-test=config -repo4-type=s3 + t.Run("CustomMetadata", func(t *testing.T) { + cluster := cluster.DeepCopy() + cluster.Spec.Metadata = &v1beta1.Metadata{ + Annotations: map[string]string{ + "ak1": "cluster-av1", + "ak2": "cluster-av2", + }, + Labels: map[string]string{ + "lk1": "cluster-lv1", + "lk2": "cluster-lv2", -[db] -pg1-path=/pgdata/pg`+strconv.Itoa(postgresCluster.Spec.PostgresVersion)+` -pg1-port=2345 -pg1-socket-path=/tmp/postgres -`) - }) + "postgres-operator.crunchydata.com/cluster": "cluster-ignored", + }, + } + cluster.Spec.Backups.PGBackRest.Metadata = &v1beta1.Metadata{ + Annotations: map[string]string{ + "ak2": "backups-av2", + "ak3": "backups-av3", + }, + Labels: map[string]string{ + "lk2": "backups-lv2", + "lk3": "backups-lv3", - t.Run("check primary config volume", func(t *testing.T) { + "postgres-operator.crunchydata.com/cluster": "backups-ignored", + }, + } - PostgreSQLConfigVolumeAndMount(&cmReturned, pod, "database") + configmap := CreatePGBackRestConfigMapIntent(cluster, + "any", "any", "any", "any", nil) - assert.Assert(t, simpleMarshalContains(&pod.Volumes, strings.TrimSpace(` - - name: pgbackrest-config - projected: - sources: - - configMap: - items: - - key: pgbackrest_primary.conf - path: /etc/pgbackrest/pgbackrest.conf - name: `+postgresCluster.GetName()+`-pgbackrest-config - `)+"\n")) + assert.DeepEqual(t, configmap.Annotations, map[string]string{ + "ak1": "cluster-av1", + "ak2": "backups-av2", + "ak3": "backups-av3", + }) + assert.DeepEqual(t, configmap.Labels, map[string]string{ + "lk1": "cluster-lv1", + "lk2": "backups-lv2", + "lk3": "backups-lv3", + + "postgres-operator.crunchydata.com/cluster": "hippo-dance", + "postgres-operator.crunchydata.com/pgbackrest": "", + "postgres-operator.crunchydata.com/pgbackrest-config": "", + }) }) - t.Run("check primary config volume mount", func(t *testing.T) { + t.Run("EnabledTDE", func(t *testing.T) { + cluster := cluster.DeepCopy() + cluster.Spec.Patroni = &v1beta1.PatroniSpec{ + DynamicConfiguration: map[string]any{ + "postgresql": map[string]any{ + "parameters": map[string]any{ + "encryption_key_command": "echo test", + }, + }, + }, + } + + configmap := CreatePGBackRestConfigMapIntent(cluster, + "", "number", "pod-service-name", "test-ns", + []string{"some-instance"}) + + assert.Assert(t, + strings.Contains(configmap.Data["pgbackrest_instance.conf"], + "archive-header-check = n")) + assert.Assert(t, + strings.Contains(configmap.Data["pgbackrest_instance.conf"], + "page-header-check = n")) + assert.Assert(t, + strings.Contains(configmap.Data["pgbackrest_instance.conf"], + "pg-version-force")) + + cluster.Spec.Backups.PGBackRest.Repos = []v1beta1.PGBackRestRepo{ + { + Name: "repo1", + Volume: &v1beta1.RepoPVC{}, + }, + } + + configmap = CreatePGBackRestConfigMapIntent(cluster, + "repo1", "number", "pod-service-name", "test-ns", + []string{"some-instance"}) + + assert.Assert(t, + strings.Contains(configmap.Data["pgbackrest_repo.conf"], + "archive-header-check = n")) + assert.Assert(t, + strings.Contains(configmap.Data["pgbackrest_repo.conf"], + "page-header-check = n")) + assert.Assert(t, + strings.Contains(configmap.Data["pgbackrest_repo.conf"], + "pg-version-force")) + }) +} - PostgreSQLConfigVolumeAndMount(&cmReturned, pod, "database") +func TestMakePGBackrestLogDir(t *testing.T) { + podTemplate := &corev1.PodTemplateSpec{Spec: corev1.PodSpec{ + InitContainers: []corev1.Container{ + {Name: "test"}, + }, + Containers: []corev1.Container{ + {Name: "pgbackrest", + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("23m"), + }, + }, + }, + }}} - container := findOrAppendContainer(&pod.Containers, "database") + cluster := &v1beta1.PostgresCluster{ + Spec: v1beta1.PostgresClusterSpec{ + ImagePullPolicy: corev1.PullAlways, + Backups: v1beta1.Backups{ + PGBackRest: v1beta1.PGBackRestArchive{ + Image: "test-image", + Repos: []v1beta1.PGBackRestRepo{ + {Name: "repo1"}, + {Name: "repo2", + Volume: &v1beta1.RepoPVC{}, + }, + }, + }, + }, + }, + } - assert.Assert(t, simpleMarshalContains(container.VolumeMounts, strings.TrimSpace(` - - mountPath: /etc/pgbackrest/conf.d - name: pgbackrest-config - readOnly: true - `)+"\n")) - }) + beforeAddInit := podTemplate.Spec.InitContainers + + MakePGBackrestLogDir(podTemplate, cluster) + + assert.Equal(t, len(beforeAddInit)+1, len(podTemplate.Spec.InitContainers)) + + var foundInitContainer bool + // verify init container command, image & name + for _, c := range podTemplate.Spec.InitContainers { + if c.Name == naming.ContainerPGBackRestLogDirInit { + // ignore "bash -c", should skip repo with no volume + assert.Equal(t, "mkdir -p /pgbackrest/repo2/log", c.Command[2]) + assert.Equal(t, c.Image, "test-image") + assert.Equal(t, c.ImagePullPolicy, corev1.PullAlways) + assert.Assert(t, !cmp.DeepEqual(c.SecurityContext, + &corev1.SecurityContext{})().Success()) + assert.Equal(t, c.Resources.Limits.Cpu().String(), "23m") + foundInitContainer = true + break + } + } + // verify init container is present + assert.Assert(t, foundInitContainer) +} - t.Run("check default config volume", func(t *testing.T) { +func TestReloadCommand(t *testing.T) { + shellcheck := require.ShellCheck(t) - JobConfigVolumeAndMount(&cmReturned, pod, "pgbackrest") + command := reloadCommand("some-name") - assert.Assert(t, simpleMarshalContains(pod.Volumes, strings.TrimSpace(` - - name: pgbackrest-config - projected: - sources: - - configMap: - items: - - key: pgbackrest_job.conf - path: /etc/pgbackrest/pgbackrest.conf - name: `+postgresCluster.GetName()+`-pgbackrest-config - `)+"\n")) - }) + // Expect a bash command with an inline script. + assert.DeepEqual(t, command[:3], []string{"bash", "-ceu", "--"}) + assert.Assert(t, len(command) > 3) - t.Run("check default config volume mount", func(t *testing.T) { + // Write out that inline script. + dir := t.TempDir() + file := filepath.Join(dir, "script.bash") + assert.NilError(t, os.WriteFile(file, []byte(command[3]), 0o600)) - JobConfigVolumeAndMount(&cmReturned, pod, "pgbackrest") + // Expect shellcheck to be happy. + cmd := exec.Command(shellcheck, "--enable=all", file) + output, err := cmd.CombinedOutput() + assert.NilError(t, err, "%q\n%s", cmd.Args, output) +} - container := findOrAppendContainer(&pod.Containers, "pgbackrest") +func TestReloadCommandPrettyYAML(t *testing.T) { + b, err := yaml.Marshal(reloadCommand("any")) + assert.NilError(t, err) + assert.Assert(t, strings.Contains(string(b), "\n- |"), + "expected literal block scalar, got:\n%s", b) +} - assert.Assert(t, simpleMarshalContains(container.VolumeMounts, strings.TrimSpace(` - - mountPath: /etc/pgbackrest/conf.d - name: pgbackrest-config - readOnly: true - `)+"\n")) - }) +func TestRestoreCommand(t *testing.T) { + shellcheck := require.ShellCheck(t) + + pgdata := "/pgdata/pg13" + opts := []string{ + "--stanza=" + DefaultStanzaName, "--pg1-path=" + pgdata, + "--repo=1"} + command := RestoreCommand(pgdata, "try", "", nil, strings.Join(opts, " ")) - t.Run("check repo config volume", func(t *testing.T) { + assert.DeepEqual(t, command[:3], []string{"bash", "-ceu", "--"}) + assert.Assert(t, len(command) > 3) - RepositoryConfigVolumeAndMount(&cmReturned, pod, "pgbackrest") + dir := t.TempDir() + file := filepath.Join(dir, "script.bash") + assert.NilError(t, os.WriteFile(file, []byte(command[3]), 0o600)) - assert.Assert(t, simpleMarshalContains(&pod.Volumes, strings.TrimSpace(` - - name: pgbackrest-config - projected: - sources: - - configMap: - items: - - key: pgbackrest_repo.conf - path: /etc/pgbackrest/pgbackrest.conf - name: `+postgresCluster.GetName()+`-pgbackrest-config - `)+"\n")) - }) + cmd := exec.Command(shellcheck, "--enable=all", file) + output, err := cmd.CombinedOutput() + assert.NilError(t, err, "%q\n%s", cmd.Args, output) +} - t.Run("check repo config volume mount", func(t *testing.T) { +func TestRestoreCommandPrettyYAML(t *testing.T) { + b, err := yaml.Marshal(RestoreCommand("/dir", "try", "", nil, "--options")) - RepositoryConfigVolumeAndMount(&cmReturned, pod, "pgbackrest") + assert.NilError(t, err) + assert.Assert(t, strings.Contains(string(b), "\n- |"), + "expected literal block scalar, got:\n%s", b) +} - container := findOrAppendContainer(&pod.Containers, "pgbackrest") +func TestRestoreCommandTDE(t *testing.T) { + b, err := yaml.Marshal(RestoreCommand("/dir", "try", "echo testValue", nil, "--options")) - assert.Assert(t, simpleMarshalContains(container.VolumeMounts, strings.TrimSpace(` - - mountPath: /etc/pgbackrest/conf.d - name: pgbackrest-config - readOnly: true - `)+"\n")) - }) + assert.NilError(t, err) + assert.Assert(t, strings.Contains(string(b), "encryption_key_command = 'echo testValue'"), + "expected encryption_key_command setting, got:\n%s", b) } -func TestRestoreCommand(t *testing.T) { - shellcheck, err := exec.LookPath("shellcheck") - if err != nil { - t.Skip(`requires "shellcheck" executable`) - } else { - output, err := exec.Command(shellcheck, "--version").CombinedOutput() - assert.NilError(t, err) - t.Logf("using %q:\n%s", shellcheck, output) - } +func TestDedicatedSnapshotVolumeRestoreCommand(t *testing.T) { + shellcheck := require.ShellCheck(t) pgdata := "/pgdata/pg13" opts := []string{ "--stanza=" + DefaultStanzaName, "--pg1-path=" + pgdata, "--repo=1"} - command := RestoreCommand(pgdata, strings.Join(opts, " ")) + command := DedicatedSnapshotVolumeRestoreCommand(pgdata, strings.Join(opts, " ")) assert.DeepEqual(t, command[:3], []string{"bash", "-ceu", "--"}) assert.Assert(t, len(command) > 3) dir := t.TempDir() file := filepath.Join(dir, "script.bash") - assert.NilError(t, ioutil.WriteFile(file, []byte(command[3]), 0o600)) + assert.NilError(t, os.WriteFile(file, []byte(command[3]), 0o600)) cmd := exec.Command(shellcheck, "--enable=all", file) output, err := cmd.CombinedOutput() assert.NilError(t, err, "%q\n%s", cmd.Args, output) } + +func TestDedicatedSnapshotVolumeRestoreCommandPrettyYAML(t *testing.T) { + b, err := yaml.Marshal(DedicatedSnapshotVolumeRestoreCommand("/dir", "--options")) + + assert.NilError(t, err) + assert.Assert(t, strings.Contains(string(b), "\n- |"), + "expected literal block scalar, got:\n%s", b) +} + +func TestServerConfig(t *testing.T) { + cluster := &v1beta1.PostgresCluster{} + cluster.UID = "shoe" + + assert.Equal(t, serverConfig(cluster).String(), ` +[global] +tls-server-address = 0.0.0.0 +tls-server-auth = pgbackrest@shoe=* +tls-server-ca-file = /etc/pgbackrest/conf.d/~postgres-operator/tls-ca.crt +tls-server-cert-file = /etc/pgbackrest/server/server-tls.crt +tls-server-key-file = /etc/pgbackrest/server/server-tls.key + +[global:server] +log-level-console = detail +log-level-file = off +log-level-stderr = error +log-timestamp = n +`) +} + +func TestServerConfigIPv6(t *testing.T) { + cluster := &v1beta1.PostgresCluster{} + cluster.UID = "shoe" + cluster.Annotations = map[string]string{ + naming.PGBackRestIPVersion: "IPv6", + } + + assert.Equal(t, serverConfig(cluster).String(), ` +[global] +tls-server-address = :: +tls-server-auth = pgbackrest@shoe=* +tls-server-ca-file = /etc/pgbackrest/conf.d/~postgres-operator/tls-ca.crt +tls-server-cert-file = /etc/pgbackrest/server/server-tls.crt +tls-server-key-file = /etc/pgbackrest/server/server-tls.key + +[global:server] +log-level-console = detail +log-level-file = off +log-level-stderr = error +log-timestamp = n +`) +} diff --git a/internal/pgbackrest/helpers.go b/internal/pgbackrest/helpers.go deleted file mode 100644 index 77099599f1..0000000000 --- a/internal/pgbackrest/helpers.go +++ /dev/null @@ -1,89 +0,0 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package pgbackrest - -import ( - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/equality" - "k8s.io/apimachinery/pkg/util/sets" -) - -// findOrAppendContainer goes through a pod's container list and returns -// the container, if found, or appends the named container to the list -func findOrAppendContainer(containers *[]v1.Container, name string) *v1.Container { - for i := range *containers { - if (*containers)[i].Name == name { - return &(*containers)[i] - } - } - - *containers = append(*containers, v1.Container{Name: name}) - return &(*containers)[len(*containers)-1] -} - -// mergeVolumes adds the given volumes to a pod's existing volume -// list. If a volume with the same name already exists, the new -// volume replaces it. -func mergeVolumes(from []v1.Volume, vols ...v1.Volume) []v1.Volume { - names := sets.NewString() - for i := range vols { - names.Insert(vols[i].Name) - } - - // Partition original slice by whether or not the name was passed in. - var existing, others []v1.Volume - for i := range from { - if names.Has(from[i].Name) { - existing = append(existing, from[i]) - } else { - others = append(others, from[i]) - } - } - - // When the new vols don't match, replace them. - if !equality.Semantic.DeepEqual(existing, vols) { - return append(others, vols...) - } - - return from -} - -// mergeVolumeMounts adds the given volumes to a pod's existing volume mount -// list. If a volume mount with the same name already exists, the new -// volume mount replaces it. -func mergeVolumeMounts(from []v1.VolumeMount, mounts ...v1.VolumeMount) []v1.VolumeMount { - names := sets.NewString() - for i := range mounts { - names.Insert(mounts[i].Name) - } - - // Partition original slice by whether or not the name was passed in. - var existing, others []v1.VolumeMount - for i := range from { - if names.Has(from[i].Name) { - existing = append(existing, from[i]) - } else { - others = append(others, from[i]) - } - } - - // When the new mounts don't match, replace them. - if !equality.Semantic.DeepEqual(existing, mounts) { - return append(others, mounts...) - } - - return from -} diff --git a/internal/pgbackrest/helpers_test.go b/internal/pgbackrest/helpers_test.go deleted file mode 100644 index aa2f8aa899..0000000000 --- a/internal/pgbackrest/helpers_test.go +++ /dev/null @@ -1,95 +0,0 @@ -// +build envtest - -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package pgbackrest - -import ( - "path/filepath" - "testing" - - v1 "k8s.io/api/core/v1" - - // Google Kubernetes Engine / Google Cloud Platform authentication provider - _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" - - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/envtest" - "sigs.k8s.io/yaml" - - "github.com/crunchydata/postgres-operator/internal/controller/runtime" -) - -// Testing namespace and postgrescluster name -const ( - testclustername = "testcluster" - testFieldOwner = "pgbackrestConfigTestFieldOwner" -) - -// getCMData returns the 'Data' content from the specified configmap -func getCMData(cm v1.ConfigMap, key string) string { - - return cm.Data[key] -} - -// simpleMarshalContains takes in a YAML object and checks whether -// it includes the expected string -func simpleMarshalContains(actual interface{}, expected string) bool { - b, err := yaml.Marshal(actual) - - if err != nil { - return false - } - - if string(b) == expected { - return true - } - return false -} - -// setupTestEnv configures and starts an EnvTest instance of etcd and the Kubernetes API server -// for test usage, as well as creates a new client instance. -func setupTestEnv(t *testing.T) (*envtest.Environment, client.Client) { - - testEnv := &envtest.Environment{ - CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")}, - } - cfg, err := testEnv.Start() - if err != nil { - t.Fatal(err) - } - t.Log("Test environment started") - - pgoScheme, err := runtime.CreatePostgresOperatorScheme() - if err != nil { - t.Fatal(err) - } - client, err := client.New(cfg, client.Options{Scheme: pgoScheme}) - if err != nil { - t.Fatal(err) - } - - return testEnv, client -} - -// teardownTestEnv stops the test environment when the tests -// have completed -func teardownTestEnv(t *testing.T, testEnv *envtest.Environment) { - if err := testEnv.Stop(); err != nil { - t.Error(err) - } - t.Log("Test environment stopped") -} diff --git a/internal/pgbackrest/iana.go b/internal/pgbackrest/iana.go new file mode 100644 index 0000000000..c6e2f71e6c --- /dev/null +++ b/internal/pgbackrest/iana.go @@ -0,0 +1,16 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package pgbackrest + +// The protocol used by pgBackRest is registered with the Internet Assigned +// Numbers Authority (IANA). +// - https://www.iana.org/assignments/service-names-port-numbers +const ( + // IANAPortNumber is the port assigned to pgBackRest at the IANA. + IANAPortNumber = 8432 + + // IANAServiceName is the name of the pgBackRest protocol at the IANA. + IANAServiceName = "pgbackrest" +) diff --git a/internal/pgbackrest/options.go b/internal/pgbackrest/options.go new file mode 100644 index 0000000000..2439901e47 --- /dev/null +++ b/internal/pgbackrest/options.go @@ -0,0 +1,81 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package pgbackrest + +import ( + "fmt" + "sort" + "strings" +) + +// iniMultiSet represents the key-value pairs in a pgBackRest config file section. +type iniMultiSet map[string][]string + +func (ms iniMultiSet) String() string { + keys := make([]string, 0, len(ms)) + for k := range ms { + keys = append(keys, k) + } + + sort.Strings(keys) + + var b strings.Builder + for _, k := range keys { + for _, v := range ms[k] { + if len(v) <= 0 { + _, _ = fmt.Fprintf(&b, "%s =\n", k) + } else { + _, _ = fmt.Fprintf(&b, "%s = %s\n", k, v) + } + } + } + return b.String() +} + +// Add associates value with key, appending it to any values already associated +// with key. The key is case-sensitive. +func (ms iniMultiSet) Add(key, value string) { + ms[key] = append(ms[key], value) +} + +// Set replaces the values associated with key. The key is case-sensitive. +func (ms iniMultiSet) Set(key string, values ...string) { + ms[key] = make([]string, len(values)) + copy(ms[key], values) +} + +// Values returns all values associated with the given key. +// The key is case-sensitive. The returned slice is not a copy. +func (ms iniMultiSet) Values(key string) []string { + return ms[key] +} + +// iniSectionSet represents the different sections in a pgBackRest config file. +type iniSectionSet map[string]iniMultiSet + +func (sections iniSectionSet) String() string { + global := make([]string, 0, len(sections)) + stanza := make([]string, 0, len(sections)) + + for k := range sections { + if k == "global" || strings.HasPrefix(k, "global:") { + global = append(global, k) + } else { + stanza = append(stanza, k) + } + } + + sort.Strings(global) + sort.Strings(stanza) + + var b strings.Builder + for _, k := range global { + _, _ = fmt.Fprintf(&b, "\n[%s]\n%s", k, sections[k]) + } + for _, k := range stanza { + _, _ = fmt.Fprintf(&b, "\n[%s]\n%s", k, sections[k]) + } + return b.String() +} diff --git a/internal/pgbackrest/options_test.go b/internal/pgbackrest/options_test.go new file mode 100644 index 0000000000..374737ec7f --- /dev/null +++ b/internal/pgbackrest/options_test.go @@ -0,0 +1,100 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package pgbackrest + +import ( + "strings" + "testing" + + "gotest.tools/v3/assert" + "sigs.k8s.io/yaml" +) + +func TestMultiSet(t *testing.T) { + t.Parallel() + + ms := iniMultiSet{} + assert.Equal(t, ms.String(), "") + assert.DeepEqual(t, ms.Values("any"), []string(nil)) + + ms.Add("x", "y") + assert.DeepEqual(t, ms.Values("x"), []string{"y"}) + + ms.Add("x", "a") + assert.DeepEqual(t, ms.Values("x"), []string{"y", "a"}) + + ms.Add("abc", "j'l") + assert.DeepEqual(t, ms, iniMultiSet{ + "x": []string{"y", "a"}, + "abc": []string{"j'l"}, + }) + assert.Equal(t, ms.String(), + "abc = j'l\nx = y\nx = a\n") + + ms.Set("x", "n") + assert.DeepEqual(t, ms.Values("x"), []string{"n"}) + assert.Equal(t, ms.String(), + "abc = j'l\nx = n\n") + + ms.Set("x", "p", "q") + assert.DeepEqual(t, ms.Values("x"), []string{"p", "q"}) + + t.Run("PrettyYAML", func(t *testing.T) { + b, err := yaml.Marshal(iniMultiSet{ + "x": []string{"y"}, + "z": []string{""}, + }.String()) + + assert.NilError(t, err) + assert.Assert(t, strings.HasPrefix(string(b), `|`), + "expected literal block scalar, got:\n%s", b) + }) +} + +func TestSectionSet(t *testing.T) { + t.Parallel() + + sections := iniSectionSet{} + assert.Equal(t, sections.String(), "") + + sections["db"] = iniMultiSet{"x": []string{"y"}} + assert.Equal(t, sections.String(), + "\n[db]\nx = y\n") + + sections["db:backup"] = iniMultiSet{"x": []string{"w"}} + assert.Equal(t, sections.String(), + "\n[db]\nx = y\n\n[db:backup]\nx = w\n", + "expected subcommand after its stanza") + + sections["another"] = iniMultiSet{"x": []string{"z"}} + assert.Equal(t, sections.String(), + "\n[another]\nx = z\n\n[db]\nx = y\n\n[db:backup]\nx = w\n", + "expected alphabetical stanzas") + + sections["global"] = iniMultiSet{"x": []string{"t"}} + assert.Equal(t, sections.String(), + "\n[global]\nx = t\n\n[another]\nx = z\n\n[db]\nx = y\n\n[db:backup]\nx = w\n", + "expected global before stanzas") + + sections["global:command"] = iniMultiSet{"t": []string{"v"}} + assert.Equal(t, sections.String(), + strings.Join([]string{ + "\n[global]\nx = t\n", + "\n[global:command]\nt = v\n", + "\n[another]\nx = z\n", + "\n[db]\nx = y\n", + "\n[db:backup]\nx = w\n", + }, ""), + "expected global subcommand after global") + + t.Run("PrettyYAML", func(t *testing.T) { + sections["last"] = iniMultiSet{"z": []string{""}} + b, err := yaml.Marshal(sections.String()) + + assert.NilError(t, err) + assert.Assert(t, strings.HasPrefix(string(b), `|`), + "expected literal block scalar, got:\n%s", b) + }) +} diff --git a/internal/pgbackrest/pgbackrest.go b/internal/pgbackrest/pgbackrest.go index c42551a2c5..21124b9744 100644 --- a/internal/pgbackrest/pgbackrest.go +++ b/internal/pgbackrest/pgbackrest.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbackrest @@ -22,12 +11,18 @@ import ( "io" "github.com/pkg/errors" + + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) const ( // errMsgConfigHashMismatch is the error message displayed when a configuration hash mismatch // is detected while attempting stanza creation errMsgConfigHashMismatch = "postgres operator error: pgBackRest config hash mismatch" + + // errMsgStaleReposWithVolumesConfig is the error message displayed when a volume-backed repo has been + // configured, but the configuration has not yet propagated into the container. + errMsgStaleReposWithVolumesConfig = "postgres operator error: pgBackRest stale volume-backed repo configuration" ) // Executor calls "pgbackrest" commands @@ -35,38 +30,79 @@ type Executor func( ctx context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, ) error -// StanzaCreate runs the pgBackRest "stanza-create" command. If the bool returned from this -// function is false, this indicates that a pgBackRest config hash mismatch was identified that -// prevented the "pgbackrest stanza-create" command from running (with a config has mitmatch -// indicating that pgBackRest configuration as stored in the cluster's pgBackRest ConfigMap has -// not yet propagated to the Pod). -func (exec Executor) StanzaCreate(ctx context.Context, configHash string) (bool, error) { +// StanzaCreateOrUpgrade runs either the pgBackRest "stanza-create" or "stanza-upgrade" command +// depending on the boolean value of the "upgrade" function parameter. This function is invoked +// by the "reconcileStanzaCreate" function with "upgrade" set to false; if the stanza already +// exists but the PG version has changed, pgBackRest will error with the "errMsgBackupDbMismatch" +// error. If that occurs, we then rerun the command with "upgrade" set to true. +// - https://github.com/pgbackrest/pgbackrest/blob/release/2.38/src/command/check/common.c#L154-L156 +// If the bool returned from this function is true, this indicates that a pgBackRest config hash +// mismatch was identified that prevented the pgBackRest stanza-create or stanza-upgrade command +// from running (with a config mismatch indicating that the pgBackRest configuration as stored in +// the cluster's pgBackRest ConfigMap has not yet propagated to the Pod). +func (exec Executor) StanzaCreateOrUpgrade(ctx context.Context, configHash string, + postgresCluster *v1beta1.PostgresCluster) (bool, error) { var stdout, stderr bytes.Buffer + var reposWithVolumes []v1beta1.PGBackRestRepo + for _, repo := range postgresCluster.Spec.Backups.PGBackRest.Repos { + if repo.Volume != nil { + reposWithVolumes = append(reposWithVolumes, repo) + } + } + + grep := "grep %s-path /etc/pgbackrest/conf.d/pgbackrest_instance.conf" + + var checkRepoCmd string + if len(reposWithVolumes) > 0 { + repo := reposWithVolumes[0] + checkRepoCmd = checkRepoCmd + fmt.Sprintf(grep, repo.Name) + + reposWithVolumes = reposWithVolumes[1:] + for _, repo := range reposWithVolumes { + checkRepoCmd = checkRepoCmd + fmt.Sprintf(" && "+grep, repo.Name) + } + } + // this is the script that is run to create a stanza. First it checks the // "config-hash" file to ensure all configuration changes (e.g. from ConfigMaps) have - // propagated to the container, and if so then runs the "stanza-create" command (and if - // not, it prints an error and returns with exit code 1). + // propagated to the container, and if not, it prints an error and returns with exit code 1). + // Next, it checks that any volume-backed repo added to the config has propagated into + // the container, and if not, prints an error and exits with code 1. + // Otherwise, it runs the pgbackrest command, which will either be "stanza-create" or + // "stanza-upgrade", depending on the value of the boolean "upgrade" parameter. const script = ` -declare -r hash="$1" stanza="$2" message="$3" +declare -r hash="$1" stanza="$2" hash_msg="$3" vol_msg="$4" check_repo_cmd="$5" if [[ "$(< /etc/pgbackrest/conf.d/config-hash)" != "${hash}" ]]; then - printf >&2 "%s" "${message}"; exit 1; + printf >&2 "%s" "${hash_msg}"; exit 1; +elif ! bash -c "${check_repo_cmd}"; then + printf >&2 "%s" "${vol_msg}"; exit 1; else - pgbackrest stanza-create --stanza="${stanza}" + pgbackrest stanza-create --stanza="${stanza}" || pgbackrest stanza-upgrade --stanza="${stanza}" fi ` if err := exec(ctx, nil, &stdout, &stderr, "bash", "-ceu", "--", - script, "-", configHash, DefaultStanzaName, errMsgConfigHashMismatch); err != nil { + script, "-", configHash, DefaultStanzaName, errMsgConfigHashMismatch, errMsgStaleReposWithVolumesConfig, + checkRepoCmd); err != nil { + + errReturn := stderr.String() // if the config hashes didn't match, return true and don't return an error since this is // expected while waiting for config changes in ConfigMaps and Secrets to make it to the // container - if stderr.String() == errMsgConfigHashMismatch { + if errReturn == errMsgConfigHashMismatch { + return true, nil + } + + // if the configuration for volume-backed repositories is stale, return true and don't return an error since this + // is expected while waiting for config changes in ConfigMaps to make it to the container + if errReturn == errMsgStaleReposWithVolumesConfig { return true, nil } - return false, errors.WithStack(fmt.Errorf("%w: %v", err, stderr.String())) + // if none of the above errors, return the err + return false, errors.WithStack(fmt.Errorf("%w: %v", err, errReturn)) } return false, nil diff --git a/internal/pgbackrest/pgbackrest_test.go b/internal/pgbackrest/pgbackrest_test.go index e5d16d1fbb..33c97913cf 100644 --- a/internal/pgbackrest/pgbackrest_test.go +++ b/internal/pgbackrest/pgbackrest_test.go @@ -1,53 +1,46 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbackrest import ( "context" "io" - "io/ioutil" + "os" "os/exec" "path/filepath" "testing" "gotest.tools/v3/assert" -) + "k8s.io/apimachinery/pkg/api/resource" -func TestStanzaCreate(t *testing.T) { + corev1 "k8s.io/api/core/v1" - shellcheck, err := exec.LookPath("shellcheck") - if err != nil { - t.Skip(`requires "shellcheck" executable`) - } else { - output, err := exec.Command(shellcheck, "--version").CombinedOutput() - assert.NilError(t, err) - t.Logf("using %q:\n%s", shellcheck, output) - } + "github.com/crunchydata/postgres-operator/internal/testing/require" + + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +func TestStanzaCreateOrUpgrade(t *testing.T) { + shellcheck := require.ShellCheck(t) ctx := context.Background() configHash := "7f5d4d5bdc" expectedCommand := []string{"bash", "-ceu", "--", ` -declare -r hash="$1" stanza="$2" message="$3" +declare -r hash="$1" stanza="$2" hash_msg="$3" vol_msg="$4" check_repo_cmd="$5" if [[ "$(< /etc/pgbackrest/conf.d/config-hash)" != "${hash}" ]]; then - printf >&2 "%s" "${message}"; exit 1; + printf >&2 "%s" "${hash_msg}"; exit 1; +elif ! bash -c "${check_repo_cmd}"; then + printf >&2 "%s" "${vol_msg}"; exit 1; else - pgbackrest stanza-create --stanza="${stanza}" + pgbackrest stanza-create --stanza="${stanza}" || pgbackrest stanza-upgrade --stanza="${stanza}" fi `, - "-", "7f5d4d5bdc", "db", "postgres operator error: pgBackRest config hash mismatch"} + "-", "7f5d4d5bdc", "db", "postgres operator error: pgBackRest config hash mismatch", + "postgres operator error: pgBackRest stale volume-backed repo configuration", + "grep repo1-path /etc/pgbackrest/conf.d/pgbackrest_instance.conf", + } var shellCheckScript string stanzaExec := func(ctx context.Context, stdin io.Reader, stdout, stderr io.Writer, @@ -61,8 +54,36 @@ fi return nil } + postgresCluster := &v1beta1.PostgresCluster{ + Spec: v1beta1.PostgresClusterSpec{ + Backups: v1beta1.Backups{ + PGBackRest: v1beta1.PGBackRestArchive{ + Repos: []v1beta1.PGBackRestRepo{{ + Name: "repo1", + Volume: &v1beta1.RepoPVC{ + VolumeClaimSpec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteMany}, + Resources: corev1.VolumeResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceStorage: resource.MustParse("1Gi"), + }, + }, + }, + }, + }, { + Name: "repo2", + S3: &v1beta1.RepoS3{ + Bucket: "bucket", + Endpoint: "endpoint", + Region: "region", + }, + }}, + }, + }, + }, + } - configHashMismatch, err := Executor(stanzaExec).StanzaCreate(ctx, configHash) + configHashMismatch, err := Executor(stanzaExec).StanzaCreateOrUpgrade(ctx, configHash, postgresCluster) assert.NilError(t, err) assert.Assert(t, !configHashMismatch) @@ -70,7 +91,7 @@ fi // Write out that inline script. dir := t.TempDir() file := filepath.Join(dir, "script.bash") - assert.NilError(t, ioutil.WriteFile(file, []byte(shellCheckScript), 0o600)) + assert.NilError(t, os.WriteFile(file, []byte(shellCheckScript), 0o600)) // Expect shellcheck to be happy. cmd := exec.Command(shellcheck, "--enable=all", file) diff --git a/internal/pgbackrest/postgres.go b/internal/pgbackrest/postgres.go index 1e56913179..ab5c71868c 100644 --- a/internal/pgbackrest/postgres.go +++ b/internal/pgbackrest/postgres.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbackrest @@ -26,18 +15,43 @@ import ( func PostgreSQL( inCluster *v1beta1.PostgresCluster, outParameters *postgres.Parameters, + backupsEnabled bool, ) { if outParameters.Mandatory == nil { outParameters.Mandatory = postgres.NewParameterSet() } + if outParameters.Default == nil { + outParameters.Default = postgres.NewParameterSet() + } // Send WAL files to all configured repositories when not in recovery. // - https://pgbackrest.org/user-guide.html#quickstart/configure-archiving // - https://pgbackrest.org/command.html#command-archive-push // - https://www.postgresql.org/docs/current/runtime-config-wal.html - archive := `pgbackrest --stanza=` + DefaultStanzaName + ` archive-push "%p"` outParameters.Mandatory.Add("archive_mode", "on") - outParameters.Mandatory.Add("archive_command", archive) + if backupsEnabled { + archive := `pgbackrest --stanza=` + DefaultStanzaName + ` archive-push "%p"` + outParameters.Mandatory.Add("archive_command", archive) + } else { + // If backups are disabled, keep archive_mode on (to avoid a Postgres restart) + // and throw away WAL. + outParameters.Mandatory.Add("archive_command", `true`) + } + + // archive_timeout is used to determine at what point a WAL file is switched, + // if the WAL archive has not reached its full size in # of transactions + // (16MB). This has ramifications for log shipping, i.e. it ensures a WAL file + // is shipped to an archive every X seconds to help reduce the risk of data + // loss in a disaster recovery scenario. For standby servers that are not + // connected using streaming replication, this also ensures that new data is + // available at least once a minute. + // + // PostgreSQL documentation considers an archive_timeout of 60 seconds to be + // reasonable. There are cases where you may want to set archive_timeout to 0, + // for example, when the remote archive (pgBackRest repo) is unavailable; this + // is to prevent WAL accumulation on your primary. + // - https://www.postgresql.org/docs/current/runtime-config-wal.html#GUC-ARCHIVE-TIMEOUT + outParameters.Default.Add("archive_timeout", "60s") // Fetch WAL files from any configured repository during recovery. // - https://pgbackrest.org/command.html#command-archive-get @@ -45,7 +59,7 @@ func PostgreSQL( restore := `pgbackrest --stanza=` + DefaultStanzaName + ` archive-get %f "%p"` outParameters.Mandatory.Add("restore_command", restore) - if inCluster.Spec.Standby != nil && inCluster.Spec.Standby.Enabled { + if inCluster.Spec.Standby != nil && inCluster.Spec.Standby.Enabled && inCluster.Spec.Standby.RepoName != "" { // Fetch WAL files from the designated repository. The repository name // is validated by the Kubernetes API, so it does not need to be quoted diff --git a/internal/pgbackrest/postgres_test.go b/internal/pgbackrest/postgres_test.go index b3641575e0..b87b35631a 100644 --- a/internal/pgbackrest/postgres_test.go +++ b/internal/pgbackrest/postgres_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbackrest @@ -28,19 +17,30 @@ func TestPostgreSQLParameters(t *testing.T) { cluster := new(v1beta1.PostgresCluster) parameters := new(postgres.Parameters) - PostgreSQL(cluster, parameters) + PostgreSQL(cluster, parameters, true) assert.DeepEqual(t, parameters.Mandatory.AsMap(), map[string]string{ "archive_mode": "on", "archive_command": `pgbackrest --stanza=db archive-push "%p"`, "restore_command": `pgbackrest --stanza=db archive-get %f "%p"`, }) + assert.DeepEqual(t, parameters.Default.AsMap(), map[string]string{ + "archive_timeout": "60s", + }) + + PostgreSQL(cluster, parameters, false) + assert.DeepEqual(t, parameters.Mandatory.AsMap(), map[string]string{ + "archive_mode": "on", + "archive_command": "true", + "restore_command": `pgbackrest --stanza=db archive-get %f "%p"`, + }) + cluster.Spec.Standby = &v1beta1.PostgresStandbySpec{ Enabled: true, RepoName: "repo99", } - PostgreSQL(cluster, parameters) + PostgreSQL(cluster, parameters, true) assert.DeepEqual(t, parameters.Mandatory.AsMap(), map[string]string{ "archive_mode": "on", "archive_command": `pgbackrest --stanza=db archive-push "%p"`, diff --git a/internal/pgbackrest/rbac.go b/internal/pgbackrest/rbac.go index d12de69046..950f10ef8b 100644 --- a/internal/pgbackrest/rbac.go +++ b/internal/pgbackrest/rbac.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbackrest @@ -22,8 +11,8 @@ import ( "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) -// +kubebuilder:rbac:namespace=pgbackrest,groups="",resources=pods,verbs=list -// +kubebuilder:rbac:namespace=pgbackrest,groups="",resources=pods/exec,verbs=create +// +kubebuilder:rbac:groups="",resources="pods",verbs={list} +// +kubebuilder:rbac:groups="",resources="pods/exec",verbs={create} // Permissions returns the RBAC rules pgBackRest needs for a cluster. func Permissions(cluster *v1beta1.PostgresCluster) []rbacv1.PolicyRule { diff --git a/internal/pgbackrest/rbac_test.go b/internal/pgbackrest/rbac_test.go index db247bc82b..a620276f64 100644 --- a/internal/pgbackrest/rbac_test.go +++ b/internal/pgbackrest/rbac_test.go @@ -1,26 +1,16 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbackrest import ( - "strings" "testing" - "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" "gotest.tools/v3/assert" + + "github.com/crunchydata/postgres-operator/internal/testing/cmp" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) func isUniqueAndSorted(slice []string) bool { @@ -47,7 +37,7 @@ func TestPermissions(t *testing.T) { assert.Assert(t, isUniqueAndSorted(rule.Verbs), "got %q", rule.Verbs) } - assert.Assert(t, marshalEquals(permissions, strings.Trim(` + assert.Assert(t, cmp.MarshalMatches(permissions, ` - apiGroups: - "" resources: @@ -60,5 +50,5 @@ func TestPermissions(t *testing.T) { - pods/exec verbs: - create - `, "\t\n")+"\n")) + `)) } diff --git a/internal/pgbackrest/reconcile.go b/internal/pgbackrest/reconcile.go index 78d590c93d..d22bccc3c0 100644 --- a/internal/pgbackrest/reconcile.go +++ b/internal/pgbackrest/reconcile.go @@ -1,38 +1,29 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbackrest import ( + "context" "strings" "github.com/pkg/errors" + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/intstr" "github.com/crunchydata/postgres-operator/internal/config" + "github.com/crunchydata/postgres-operator/internal/feature" "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/internal/pki" "github.com/crunchydata/postgres-operator/internal/postgres" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) // AddRepoVolumesToPod adds pgBackRest repository volumes to the provided Pod template spec, while // also adding associated volume mounts to the containers specified. -func AddRepoVolumesToPod(postgresCluster *v1beta1.PostgresCluster, template *v1.PodTemplateSpec, +func AddRepoVolumesToPod(postgresCluster *v1beta1.PostgresCluster, template *corev1.PodTemplateSpec, repoPVCNames map[string]string, containerNames ...string) error { for _, repo := range postgresCluster.Spec.Backups.PGBackRest.Repos { @@ -52,12 +43,31 @@ func AddRepoVolumesToPod(postgresCluster *v1beta1.PostgresCluster, template *v1. } template.Spec.Volumes = append(template.Spec.Volumes, corev1.Volume{ Name: repo.Name, - VolumeSource: v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ ClaimName: repoVolName}, }, }) + var initContainerFound bool + var index int + for index = range template.Spec.InitContainers { + if template.Spec.InitContainers[index].Name == naming.ContainerPGBackRestLogDirInit { + initContainerFound = true + break + } + } + if !initContainerFound { + return errors.Errorf( + "Unable to find init container %q when adding pgBackRest repo volumes", + naming.ContainerPGBackRestLogDirInit) + } + template.Spec.InitContainers[index].VolumeMounts = + append(template.Spec.InitContainers[index].VolumeMounts, corev1.VolumeMount{ + Name: repo.Name, + MountPath: "/pgbackrest/" + repo.Name, + }) + for _, name := range containerNames { var containerFound bool var index int @@ -72,7 +82,7 @@ func AddRepoVolumesToPod(postgresCluster *v1beta1.PostgresCluster, template *v1. name) } template.Spec.Containers[index].VolumeMounts = - append(template.Spec.Containers[index].VolumeMounts, v1.VolumeMount{ + append(template.Spec.Containers[index].VolumeMounts, corev1.VolumeMount{ Name: repo.Name, MountPath: "/pgbackrest/" + repo.Name, }) @@ -82,163 +92,330 @@ func AddRepoVolumesToPod(postgresCluster *v1beta1.PostgresCluster, template *v1. return nil } -// AddConfigsToPod populates a Pod template Spec with with pgBackRest configuration volumes while -// then mounting that configuration to the specified containers. -func AddConfigsToPod(postgresCluster *v1beta1.PostgresCluster, template *v1.PodTemplateSpec, - configName string, containerNames ...string) error { - - // grab user provided configs - pgBackRestConfigs := postgresCluster.Spec.Backups.PGBackRest.Configuration - // add default pgbackrest configs - defaultConfig := v1.VolumeProjection{ - ConfigMap: &v1.ConfigMapProjection{ - LocalObjectReference: v1.LocalObjectReference{ - Name: naming.PGBackRestConfig(postgresCluster).Name, - }, - Items: []v1.KeyToPath{ - {Key: configName, Path: configName}, - {Key: ConfigHashKey, Path: ConfigHashKey}, - }, - }, +// AddConfigToInstancePod adds and mounts the pgBackRest configuration volumes +// for an instance of cluster to pod. The database container and any pgBackRest +// containers must already be in pod. +func AddConfigToInstancePod( + cluster *v1beta1.PostgresCluster, pod *corev1.PodSpec, +) { + configmap := corev1.VolumeProjection{ConfigMap: &corev1.ConfigMapProjection{}} + configmap.ConfigMap.Name = naming.PGBackRestConfig(cluster).Name + configmap.ConfigMap.Items = []corev1.KeyToPath{ + {Key: CMInstanceKey, Path: CMInstanceKey}, + {Key: ConfigHashKey, Path: ConfigHashKey}, } - pgBackRestConfigs = append(pgBackRestConfigs, defaultConfig) - template.Spec.Volumes = append(template.Spec.Volumes, v1.Volume{ - Name: ConfigVol, - VolumeSource: v1.VolumeSource{ - Projected: &v1.ProjectedVolumeSource{ - Sources: pgBackRestConfigs, - }, - }, - }) + secret := corev1.VolumeProjection{Secret: &corev1.SecretProjection{}} + secret.Secret.Name = naming.PGBackRestSecret(cluster).Name - for _, name := range containerNames { - var containerFound bool - var index int - for index = range template.Spec.Containers { - if template.Spec.Containers[index].Name == name { - containerFound = true - break - } + configmap.ConfigMap.Items = append( + configmap.ConfigMap.Items, corev1.KeyToPath{ + Key: serverConfigMapKey, + Path: serverConfigProjectionPath, + }) + secret.Secret.Items = append(secret.Secret.Items, clientCertificates()...) + + // Start with a copy of projections specified in the cluster. Items later in + // the list take precedence over earlier items (that is, last write wins). + // - https://kubernetes.io/docs/concepts/storage/volumes/#projected + sources := append([]corev1.VolumeProjection{}, + cluster.Spec.Backups.PGBackRest.Configuration...) + + if len(secret.Secret.Items) > 0 { + sources = append(sources, configmap, secret) + } else { + sources = append(sources, configmap) + } + + addConfigVolumeAndMounts(pod, sources) +} + +// AddConfigToRepoPod adds and mounts the pgBackRest configuration volume for +// the dedicated repository host of cluster to pod. The pgBackRest containers +// must already be in pod. +func AddConfigToRepoPod( + cluster *v1beta1.PostgresCluster, pod *corev1.PodSpec, +) { + configmap := corev1.VolumeProjection{ConfigMap: &corev1.ConfigMapProjection{}} + configmap.ConfigMap.Name = naming.PGBackRestConfig(cluster).Name + configmap.ConfigMap.Items = []corev1.KeyToPath{ + {Key: CMRepoKey, Path: CMRepoKey}, + {Key: ConfigHashKey, Path: ConfigHashKey}, + {Key: serverConfigMapKey, Path: serverConfigProjectionPath}, + } + + secret := corev1.VolumeProjection{Secret: &corev1.SecretProjection{}} + secret.Secret.Name = naming.PGBackRestSecret(cluster).Name + secret.Secret.Items = append(secret.Secret.Items, clientCertificates()...) + + // Start with a copy of projections specified in the cluster. Items later in + // the list take precedence over earlier items (that is, last write wins). + // - https://kubernetes.io/docs/concepts/storage/volumes/#projected + sources := append([]corev1.VolumeProjection{}, + cluster.Spec.Backups.PGBackRest.Configuration...) + + addConfigVolumeAndMounts(pod, append(sources, configmap, secret)) +} + +// AddConfigToRestorePod adds and mounts the pgBackRest configuration volume +// for the restore job of cluster to pod. The pgBackRest containers must +// already be in pod. +func AddConfigToRestorePod( + cluster *v1beta1.PostgresCluster, sourceCluster *v1beta1.PostgresCluster, pod *corev1.PodSpec, +) { + configmap := corev1.VolumeProjection{ConfigMap: &corev1.ConfigMapProjection{}} + configmap.ConfigMap.Name = naming.PGBackRestConfig(cluster).Name + configmap.ConfigMap.Items = []corev1.KeyToPath{ + // TODO(cbandy): This may be the instance configuration of a cluster + // different from the one we are building/creating. For now the + // stanza options are "pg1-path", "pg1-port", and "pg1-socket-path" + // and these are safe enough to use across different clusters running + // the same PostgreSQL version. When that list grows, consider changing + // this to use local stanza options and remote repository options. + // See also [RestoreConfig]. + {Key: CMInstanceKey, Path: CMInstanceKey}, + } + + // Mount client certificates of the source cluster if they exist. + secret := corev1.VolumeProjection{Secret: &corev1.SecretProjection{}} + secret.Secret.Name = naming.PGBackRestSecret(cluster).Name + secret.Secret.Items = append(secret.Secret.Items, clientCertificates()...) + secret.Secret.Optional = initialize.Bool(true) + + // Start with a copy of projections specified in the cluster. Items later in + // the list take precedence over earlier items (that is, last write wins). + // - https://kubernetes.io/docs/concepts/storage/volumes/#projected + sources := append([]corev1.VolumeProjection{}, + cluster.Spec.Backups.PGBackRest.Configuration...) + + // For a PostgresCluster restore, append all pgBackRest configuration from + // the source cluster for the restore. + if sourceCluster != nil { + sources = append(sources, sourceCluster.Spec.Backups.PGBackRest.Configuration...) + } + + // Currently the spec accepts a dataSource with both a PostgresCluster and + // a PGBackRest section. In that case only the PostgresCluster is honored (see + // internal/controller/postgrescluster/cluster.go, reconcileDataSource). + // + // `sourceCluster` is always nil for a cloud based restore (see + // internal/controller/postgrescluster/pgbackrest.go, reconcileCloudBasedDataSource). + // + // So, if `sourceCluster` is nil and `DataSource.PGBackRest` is not, + // this is a cloud based datasource restore and only the configuration from + // `dataSource.pgbackrest` section should be included. + if sourceCluster == nil && + cluster.Spec.DataSource != nil && + cluster.Spec.DataSource.PGBackRest != nil { + + sources = append([]corev1.VolumeProjection{}, + cluster.Spec.DataSource.PGBackRest.Configuration...) + } + + // mount any provided configuration files to the restore Job Pod + if len(cluster.Spec.Config.Files) != 0 { + additionalConfigVolumeMount := postgres.AdditionalConfigVolumeMount() + additionalConfigVolume := corev1.Volume{Name: additionalConfigVolumeMount.Name} + additionalConfigVolume.Projected = &corev1.ProjectedVolumeSource{ + Sources: append(sources, cluster.Spec.Config.Files...), } - if !containerFound { - return errors.Errorf("Unable to find container %q when adding pgBackRest configration", - name) + for i := range pod.Containers { + container := &pod.Containers[i] + + if container.Name == naming.PGBackRestRestoreContainerName { + container.VolumeMounts = append(container.VolumeMounts, additionalConfigVolumeMount) + } } - template.Spec.Containers[index].VolumeMounts = - append(template.Spec.Containers[index].VolumeMounts, - v1.VolumeMount{ - Name: ConfigVol, - MountPath: ConfigDir, - }) + pod.Volumes = append(pod.Volumes, additionalConfigVolume) } - return nil + addConfigVolumeAndMounts(pod, append(sources, configmap, secret)) } -// AddSSHToPod populates a Pod template Spec with with the container and volumes needed to enable -// SSH within a Pod. It will also mount the SSH configuration to any additional containers specified. -func AddSSHToPod(postgresCluster *v1beta1.PostgresCluster, template *v1.PodTemplateSpec, - enableSSHD bool, resources v1.ResourceRequirements, - additionalVolumeMountContainers ...string) error { - - sshConfigs := []v1.VolumeProjection{} - // stores all SSH configurations (ConfigMaps & Secrets) - if postgresCluster.Spec.Backups.PGBackRest.RepoHost == nil || - postgresCluster.Spec.Backups.PGBackRest.RepoHost.SSHConfiguration == nil { - sshConfigs = append(sshConfigs, v1.VolumeProjection{ - ConfigMap: &v1.ConfigMapProjection{ - LocalObjectReference: v1.LocalObjectReference{ - Name: naming.PGBackRestSSHConfig(postgresCluster).Name, - }, - }, - }) - } else { - sshConfigs = append(sshConfigs, v1.VolumeProjection{ - ConfigMap: postgresCluster.Spec.Backups.PGBackRest.RepoHost.SSHConfiguration, - }) - } - if postgresCluster.Spec.Backups.PGBackRest.RepoHost == nil || - postgresCluster.Spec.Backups.PGBackRest.RepoHost.SSHSecret == nil { - sshConfigs = append(sshConfigs, v1.VolumeProjection{ - Secret: &v1.SecretProjection{ - LocalObjectReference: v1.LocalObjectReference{ - Name: naming.PGBackRestSSHSecret(postgresCluster).Name, - }, - }, - }) - } else { - sshConfigs = append(sshConfigs, v1.VolumeProjection{ - Secret: postgresCluster.Spec.Backups.PGBackRest.RepoHost.SSHSecret, - }) +// addConfigVolumeAndMounts adds the config projections to pod as the +// configuration volume. It mounts that volume to the database container and +// all pgBackRest containers in pod. +func addConfigVolumeAndMounts( + pod *corev1.PodSpec, config []corev1.VolumeProjection, +) { + configVolumeMount := corev1.VolumeMount{ + Name: "pgbackrest-config", + MountPath: configDirectory, + ReadOnly: true, } - template.Spec.Volumes = append(template.Spec.Volumes, v1.Volume{ - Name: naming.PGBackRestSSHVolume, - VolumeSource: v1.VolumeSource{ - Projected: &v1.ProjectedVolumeSource{ - Sources: sshConfigs, - DefaultMode: initialize.Int32(0o040), - }, + + configVolume := corev1.Volume{ + Name: configVolumeMount.Name, + VolumeSource: corev1.VolumeSource{ + Projected: &corev1.ProjectedVolumeSource{Sources: config}, }, - }) + } + + for i := range pod.Containers { + container := &pod.Containers[i] - sshVolumeMount := v1.VolumeMount{ - Name: naming.PGBackRestSSHVolume, - MountPath: sshConfigPath, + switch container.Name { + case + naming.ContainerDatabase, + naming.ContainerPGBackRestConfig, + naming.PGBackRestRepoContainerName, + naming.PGBackRestRestoreContainerName: + + container.VolumeMounts = append(container.VolumeMounts, configVolumeMount) + } + } + + pod.Volumes = append(pod.Volumes, configVolume) +} + +// addServerContainerAndVolume adds the TLS server container and certificate +// projections to pod. Any PostgreSQL data and WAL volumes in pod are also mounted. +func addServerContainerAndVolume( + ctx context.Context, + cluster *v1beta1.PostgresCluster, pod *corev1.PodSpec, + certificates []corev1.VolumeProjection, resources *corev1.ResourceRequirements, +) { + serverVolumeMount := corev1.VolumeMount{ + Name: "pgbackrest-server", + MountPath: serverMountPath, ReadOnly: true, } - // Only add the SSHD container if requested. Sometimes (e.g. when running a restore Job) it is - // not necessary to run a full SSHD server, but the various SSH configs are still needed. - if enableSSHD { - container := v1.Container{ - Command: []string{"/usr/sbin/sshd", "-D", "-e"}, - Image: config.PGBackRestContainerImage(postgresCluster), - LivenessProbe: &v1.Probe{ - Handler: v1.Handler{ - TCPSocket: &v1.TCPSocketAction{ - Port: intstr.FromInt(2022), - }, + serverVolume := corev1.Volume{ + Name: serverVolumeMount.Name, + VolumeSource: corev1.VolumeSource{ + Projected: &corev1.ProjectedVolumeSource{Sources: certificates}, + }, + } + + container := corev1.Container{ + Name: naming.PGBackRestRepoContainerName, + Command: []string{"pgbackrest", "server"}, + Image: config.PGBackRestContainerImage(cluster), + ImagePullPolicy: cluster.Spec.ImagePullPolicy, + SecurityContext: initialize.RestrictedSecurityContext(), + + LivenessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + Exec: &corev1.ExecAction{ + Command: []string{"pgbackrest", "server-ping"}, }, }, - Name: naming.PGBackRestRepoContainerName, - VolumeMounts: []v1.VolumeMount{sshVolumeMount}, - SecurityContext: initialize.RestrictedSecurityContext(), - Resources: resources, - } + }, - // Mount PostgreSQL volumes if they are present in the template. - postgresMounts := map[string]corev1.VolumeMount{ - postgres.DataVolumeMount().Name: postgres.DataVolumeMount(), - postgres.WALVolumeMount().Name: postgres.WALVolumeMount(), - } - for i := range template.Spec.Volumes { - if mount, ok := postgresMounts[template.Spec.Volumes[i].Name]; ok { - container.VolumeMounts = append(container.VolumeMounts, mount) - } - } + VolumeMounts: []corev1.VolumeMount{serverVolumeMount}, + } - template.Spec.Containers = append(template.Spec.Containers, container) + if resources != nil { + container.Resources = *resources } - for _, name := range additionalVolumeMountContainers { - var containerFound bool - var index int - for index = range template.Spec.Containers { - if template.Spec.Containers[index].Name == name { - containerFound = true - break + // Mount PostgreSQL volumes that are present in pod. + postgresMounts := map[string]corev1.VolumeMount{ + postgres.DataVolumeMount().Name: postgres.DataVolumeMount(), + postgres.WALVolumeMount().Name: postgres.WALVolumeMount(), + } + if feature.Enabled(ctx, feature.TablespaceVolumes) { + for _, instance := range cluster.Spec.InstanceSets { + for _, vol := range instance.TablespaceVolumes { + tablespaceVolumeMount := postgres.TablespaceVolumeMount(vol.Name) + postgresMounts[tablespaceVolumeMount.Name] = tablespaceVolumeMount } } - if !containerFound { - return errors.Errorf("Unable to find container %q when adding pgBackRest to Pod", - name) + } + for i := range pod.Volumes { + if mount, ok := postgresMounts[pod.Volumes[i].Name]; ok { + container.VolumeMounts = append(container.VolumeMounts, mount) } - template.Spec.Containers[index].VolumeMounts = - append(template.Spec.Containers[index].VolumeMounts, sshVolumeMount) } - return nil + reloader := corev1.Container{ + Name: naming.ContainerPGBackRestConfig, + Command: reloadCommand(naming.ContainerPGBackRestConfig), + Image: container.Image, + ImagePullPolicy: container.ImagePullPolicy, + SecurityContext: initialize.RestrictedSecurityContext(), + + // The configuration mount is appended by [addConfigVolumeAndMounts]. + VolumeMounts: []corev1.VolumeMount{serverVolumeMount}, + } + + if sidecars := cluster.Spec.Backups.PGBackRest.Sidecars; sidecars != nil && + sidecars.PGBackRestConfig != nil && + sidecars.PGBackRestConfig.Resources != nil { + reloader.Resources = *sidecars.PGBackRestConfig.Resources + } + + pod.Containers = append(pod.Containers, container, reloader) + pod.Volumes = append(pod.Volumes, serverVolume) +} + +// AddServerToInstancePod adds the TLS server container and volume to pod for +// an instance of cluster. Any PostgreSQL volumes must already be in pod. +func AddServerToInstancePod( + ctx context.Context, + cluster *v1beta1.PostgresCluster, pod *corev1.PodSpec, + instanceCertificateSecretName string, +) { + certificates := []corev1.VolumeProjection{{ + Secret: &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: instanceCertificateSecretName, + }, + Items: instanceServerCertificates(), + }, + }} + + var resources *corev1.ResourceRequirements + if sidecars := cluster.Spec.Backups.PGBackRest.Sidecars; sidecars != nil && sidecars.PGBackRest != nil { + resources = sidecars.PGBackRest.Resources + } + + addServerContainerAndVolume(ctx, cluster, pod, certificates, resources) +} + +// AddServerToRepoPod adds the TLS server container and volume to pod for +// the dedicated repository host of cluster. +func AddServerToRepoPod( + ctx context.Context, + cluster *v1beta1.PostgresCluster, pod *corev1.PodSpec, +) { + certificates := []corev1.VolumeProjection{{ + Secret: &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: naming.PGBackRestSecret(cluster).Name, + }, + Items: repositoryServerCertificates(), + }, + }} + + var resources *corev1.ResourceRequirements + if cluster.Spec.Backups.PGBackRest.RepoHost != nil { + resources = &cluster.Spec.Backups.PGBackRest.RepoHost.Resources + } + + addServerContainerAndVolume(ctx, cluster, pod, certificates, resources) +} + +// InstanceCertificates populates the shared Secret with certificates needed to run pgBackRest. +func InstanceCertificates(ctx context.Context, + inCluster *v1beta1.PostgresCluster, + inRoot pki.Certificate, + inDNS pki.Certificate, inDNSKey pki.PrivateKey, + outInstanceCertificates *corev1.Secret, +) error { + var err error + + initialize.Map(&outInstanceCertificates.Data) + + if err == nil { + outInstanceCertificates.Data[certInstanceSecretKey], err = certFile(inDNS) + } + if err == nil { + outInstanceCertificates.Data[certInstancePrivateKeySecretKey], err = certFile(inDNSKey) + } + + return err } // ReplicaCreateCommand returns the command that can initialize the PostgreSQL @@ -253,10 +430,17 @@ func ReplicaCreateCommand( "--stanza=" + DefaultStanzaName, "--repo=" + strings.TrimPrefix(repoName, "repo"), "--link-map=pg_wal=" + postgres.WALDirectory(cluster, instance), + + // Do not create a recovery signal file on PostgreSQL v12 or later; + // Patroni creates a standby signal file which takes precedence. + // Patroni manages recovery.conf prior to PostgreSQL v12. + // - https://github.com/pgbackrest/pgbackrest/blob/release/2.38/src/command/restore/restore.c#L1824 + // - https://www.postgresql.org/docs/12/runtime-config-wal.html + "--type=standby", } } - if cluster.Spec.Standby != nil && cluster.Spec.Standby.Enabled { + if cluster.Spec.Standby != nil && cluster.Spec.Standby.Enabled && cluster.Spec.Standby.RepoName != "" { // Patroni initializes standby clusters using the same command it uses // for any replica. Assume the repository in the spec has a stanza // and can be used to restore. The repository name is validated by the @@ -277,3 +461,113 @@ func ReplicaCreateCommand( return nil } + +// RepoVolumeMount returns the name and mount path of the pgBackRest repo volume. +func RepoVolumeMount() corev1.VolumeMount { + return corev1.VolumeMount{Name: "pgbackrest-repo", MountPath: repoMountPath} +} + +// RestoreConfig populates targetConfigMap and targetSecret with values needed +// to restore a cluster from repositories defined in sourceConfigMap and sourceSecret. +func RestoreConfig( + sourceConfigMap, targetConfigMap *corev1.ConfigMap, + sourceSecret, targetSecret *corev1.Secret, +) { + initialize.Map(&targetConfigMap.Data) + + // Use the repository definitions from the source cluster. + // + // TODO(cbandy): This is the *entire* instance configuration from another + // cluster. For now, the stanza options are "pg1-path", "pg1-port", and + // "pg1-socket-path" and these are safe enough to use across different + // clusters running the same PostgreSQL version. When that list grows, + // consider changing this to use local stanza options and remote repository options. + targetConfigMap.Data[CMInstanceKey] = sourceConfigMap.Data[CMInstanceKey] + + if sourceSecret != nil && targetSecret != nil { + initialize.Map(&targetSecret.Data) + + // - https://golang.org/issue/45038 + bytesClone := func(b []byte) []byte { return append([]byte(nil), b...) } + + // Use the CA and client certificate from the source cluster. + for _, item := range clientCertificates() { + targetSecret.Data[item.Key] = bytesClone(sourceSecret.Data[item.Key]) + } + } +} + +// Secret populates the pgBackRest Secret. +func Secret(ctx context.Context, + inCluster *v1beta1.PostgresCluster, + inRepoHost *appsv1.StatefulSet, + inRoot *pki.RootCertificateAuthority, + inSecret *corev1.Secret, + outSecret *corev1.Secret, +) error { + var err error + + // Save the CA and generate a TLS client certificate for the entire cluster. + if inRepoHost != nil { + initialize.Map(&outSecret.Data) + + // The server verifies its "tls-server-auth" option contains the common + // name (CN) of the certificate presented by a client. The entire + // cluster uses a single client certificate so the "tls-server-auth" + // option can stay the same when PostgreSQL instances and repository + // hosts are added or removed. + leaf := &pki.LeafCertificate{} + commonName := clientCommonName(inCluster) + dnsNames := []string{commonName} + + if err == nil { + // Unmarshal and validate the stored leaf. These first errors can + // be ignored because they result in an invalid leaf which is then + // correctly regenerated. + _ = leaf.Certificate.UnmarshalText(inSecret.Data[certClientSecretKey]) + _ = leaf.PrivateKey.UnmarshalText(inSecret.Data[certClientPrivateKeySecretKey]) + + leaf, err = inRoot.RegenerateLeafWhenNecessary(leaf, commonName, dnsNames) + err = errors.WithStack(err) + } + + if err == nil { + outSecret.Data[certAuthoritySecretKey], err = certFile(inRoot.Certificate) + } + if err == nil { + outSecret.Data[certClientPrivateKeySecretKey], err = certFile(leaf.PrivateKey) + } + if err == nil { + outSecret.Data[certClientSecretKey], err = certFile(leaf.Certificate) + } + } + + // Generate a TLS server certificate for each repository host. + if inRepoHost != nil { + // The client verifies the "pg-host" or "repo-host" option it used is + // present in the DNS names of the server certificate. + leaf := &pki.LeafCertificate{} + dnsNames := naming.RepoHostPodDNSNames(ctx, inRepoHost) + commonName := dnsNames[0] // FQDN + + if err == nil { + // Unmarshal and validate the stored leaf. These first errors can + // be ignored because they result in an invalid leaf which is then + // correctly regenerated. + _ = leaf.Certificate.UnmarshalText(inSecret.Data[certRepoSecretKey]) + _ = leaf.PrivateKey.UnmarshalText(inSecret.Data[certRepoPrivateKeySecretKey]) + + leaf, err = inRoot.RegenerateLeafWhenNecessary(leaf, commonName, dnsNames) + err = errors.WithStack(err) + } + + if err == nil { + outSecret.Data[certRepoPrivateKeySecretKey], err = certFile(leaf.PrivateKey) + } + if err == nil { + outSecret.Data[certRepoSecretKey], err = certFile(leaf.Certificate) + } + } + + return err +} diff --git a/internal/pgbackrest/reconcile_test.go b/internal/pgbackrest/reconcile_test.go index a98ae420b9..4957d58f7b 100644 --- a/internal/pgbackrest/reconcile_test.go +++ b/internal/pgbackrest/reconcile_test.go @@ -1,31 +1,27 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbackrest import ( + "context" "fmt" + "reflect" "testing" - "github.com/crunchydata/postgres-operator/internal/naming" - "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" + "github.com/google/go-cmp/cmp/cmpopts" "gotest.tools/v3/assert" - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/equality" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/crunchydata/postgres-operator/internal/feature" + "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/internal/pki" + "github.com/crunchydata/postgres-operator/internal/testing/cmp" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) func TestAddRepoVolumesToPod(t *testing.T) { @@ -33,31 +29,41 @@ func TestAddRepoVolumesToPod(t *testing.T) { postgresCluster := &v1beta1.PostgresCluster{ObjectMeta: metav1.ObjectMeta{Name: "hippo"}} testsCases := []struct { - repos []v1beta1.PGBackRestRepo - containers []v1.Container - testMap map[string]string + repos []v1beta1.PGBackRestRepo + containers []corev1.Container + initContainers []corev1.Container + testMap map[string]string }{{ repos: []v1beta1.PGBackRestRepo{ {Name: "repo1", Volume: &v1beta1.RepoPVC{}}, {Name: "repo2", Volume: &v1beta1.RepoPVC{}}, }, - containers: []v1.Container{{Name: "database"}, {Name: "pgbackrest"}}, - testMap: map[string]string{}, + initContainers: []corev1.Container{{Name: "pgbackrest-log-dir"}}, + containers: []corev1.Container{{Name: "database"}, {Name: "pgbackrest"}}, + testMap: map[string]string{}, }, { repos: []v1beta1.PGBackRestRepo{ {Name: "repo1", Volume: &v1beta1.RepoPVC{}}, {Name: "repo2", Volume: &v1beta1.RepoPVC{}}, }, - containers: []v1.Container{{Name: "database"}}, - testMap: map[string]string{}, + initContainers: []corev1.Container{{Name: "pgbackrest-log-dir"}}, + containers: []corev1.Container{{Name: "database"}}, + testMap: map[string]string{}, + }, { + repos: []v1beta1.PGBackRestRepo{{Name: "repo1", Volume: &v1beta1.RepoPVC{}}}, + initContainers: []corev1.Container{{Name: "pgbackrest-log-dir"}}, + containers: []corev1.Container{{Name: "database"}, {Name: "pgbackrest"}}, + testMap: map[string]string{}, }, { - repos: []v1beta1.PGBackRestRepo{{Name: "repo1", Volume: &v1beta1.RepoPVC{}}}, - containers: []v1.Container{{Name: "database"}, {Name: "pgbackrest"}}, - testMap: map[string]string{}, + repos: []v1beta1.PGBackRestRepo{{Name: "repo1", Volume: &v1beta1.RepoPVC{}}}, + initContainers: []corev1.Container{{Name: "pgbackrest-log-dir"}}, + containers: []corev1.Container{{Name: "database"}}, + testMap: map[string]string{}, }, { - repos: []v1beta1.PGBackRestRepo{{Name: "repo1", Volume: &v1beta1.RepoPVC{}}}, - containers: []v1.Container{{Name: "database"}}, - testMap: map[string]string{}, + repos: []v1beta1.PGBackRestRepo{{Name: "repo1", Volume: &v1beta1.RepoPVC{}}}, + initContainers: []corev1.Container{}, + containers: []corev1.Container{{Name: "database"}}, + testMap: map[string]string{}, }, // rerun the same tests, but this time simulate an existing PVC { @@ -65,7 +71,8 @@ func TestAddRepoVolumesToPod(t *testing.T) { {Name: "repo1", Volume: &v1beta1.RepoPVC{}}, {Name: "repo2", Volume: &v1beta1.RepoPVC{}}, }, - containers: []v1.Container{{Name: "database"}, {Name: "pgbackrest"}}, + initContainers: []corev1.Container{{Name: "pgbackrest-log-dir"}}, + containers: []corev1.Container{{Name: "database"}, {Name: "pgbackrest"}}, testMap: map[string]string{ "repo1": "hippo-repo1", }, @@ -74,19 +81,29 @@ func TestAddRepoVolumesToPod(t *testing.T) { {Name: "repo1", Volume: &v1beta1.RepoPVC{}}, {Name: "repo2", Volume: &v1beta1.RepoPVC{}}, }, - containers: []v1.Container{{Name: "database"}}, + initContainers: []corev1.Container{{Name: "pgbackrest-log-dir"}}, + containers: []corev1.Container{{Name: "database"}}, + testMap: map[string]string{ + "repo1": "hippo-repo1", + }, + }, { + repos: []v1beta1.PGBackRestRepo{{Name: "repo1", Volume: &v1beta1.RepoPVC{}}}, + initContainers: []corev1.Container{{Name: "pgbackrest-log-dir"}}, + containers: []corev1.Container{{Name: "database"}, {Name: "pgbackrest"}}, testMap: map[string]string{ "repo1": "hippo-repo1", }, }, { - repos: []v1beta1.PGBackRestRepo{{Name: "repo1", Volume: &v1beta1.RepoPVC{}}}, - containers: []v1.Container{{Name: "database"}, {Name: "pgbackrest"}}, + repos: []v1beta1.PGBackRestRepo{{Name: "repo1", Volume: &v1beta1.RepoPVC{}}}, + initContainers: []corev1.Container{{Name: "pgbackrest-log-dir"}}, + containers: []corev1.Container{{Name: "database"}}, testMap: map[string]string{ "repo1": "hippo-repo1", }, }, { - repos: []v1beta1.PGBackRestRepo{{Name: "repo1", Volume: &v1beta1.RepoPVC{}}}, - containers: []v1.Container{{Name: "database"}}, + repos: []v1beta1.PGBackRestRepo{{Name: "repo1", Volume: &v1beta1.RepoPVC{}}}, + initContainers: []corev1.Container{}, + containers: []corev1.Container{{Name: "database"}}, testMap: map[string]string{ "repo1": "hippo-repo1", }, @@ -95,39 +112,56 @@ func TestAddRepoVolumesToPod(t *testing.T) { for _, tc := range testsCases { t.Run(fmt.Sprintf("repos=%d, containers=%d", len(tc.repos), len(tc.containers)), func(t *testing.T) { postgresCluster.Spec.Backups.PGBackRest.Repos = tc.repos - template := &v1.PodTemplateSpec{ - Spec: v1.PodSpec{ - Containers: tc.containers, + template := &corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + InitContainers: tc.initContainers, + Containers: tc.containers, }, } err := AddRepoVolumesToPod(postgresCluster, template, tc.testMap, getContainerNames(tc.containers)...) - assert.NilError(t, err) - - // verify volumes and volume mounts - for _, r := range tc.repos { - var foundVolume bool - for _, v := range template.Spec.Volumes { - if v.Name == r.Name && v.VolumeSource.PersistentVolumeClaim.ClaimName == - naming.PGBackRestRepoVolume(postgresCluster, r.Name).Name { - foundVolume = true - break + if len(tc.initContainers) == 0 { + assert.Error(t, err, "Unable to find init container \"pgbackrest-log-dir\" when adding pgBackRest repo volumes") + } else { + assert.NilError(t, err) + + // verify volumes and volume mounts + for _, r := range tc.repos { + var foundVolume bool + for _, v := range template.Spec.Volumes { + if v.Name == r.Name && v.VolumeSource.PersistentVolumeClaim.ClaimName == + naming.PGBackRestRepoVolume(postgresCluster, r.Name).Name { + foundVolume = true + break + } } - } - if !foundVolume { - t.Error(fmt.Errorf("volume %s is missing or invalid", r.Name)) - } + if !foundVolume { + t.Errorf("volume %s is missing or invalid", r.Name) + } - for _, c := range template.Spec.Containers { - var foundVolumeMount bool - for _, vm := range c.VolumeMounts { - if vm.Name == r.Name && vm.MountPath == "/pgbackrest/"+r.Name { - foundVolumeMount = true - break + for _, c := range template.Spec.Containers { + var foundVolumeMount bool + for _, vm := range c.VolumeMounts { + if vm.Name == r.Name && vm.MountPath == "/pgbackrest/"+r.Name { + foundVolumeMount = true + break + } + } + if !foundVolumeMount { + t.Errorf("container volume mount %s is missing or invalid", r.Name) } } - if !foundVolumeMount { - t.Error(fmt.Errorf("volume mount %s is missing or invalid", r.Name)) + for _, c := range template.Spec.InitContainers { + var foundVolumeMount bool + for _, vm := range c.VolumeMounts { + if vm.Name == r.Name && vm.MountPath == "/pgbackrest/"+r.Name { + foundVolumeMount = true + break + } + } + if !foundVolumeMount { + t.Errorf("init container volume mount %s is missing or invalid", r.Name) + } } } } @@ -135,222 +169,796 @@ func TestAddRepoVolumesToPod(t *testing.T) { } } -func TestAddConfigsToPod(t *testing.T) { +func TestAddConfigToInstancePod(t *testing.T) { + cluster := v1beta1.PostgresCluster{} + cluster.Name = "hippo" + cluster.Default() - postgresCluster := &v1beta1.PostgresCluster{ObjectMeta: metav1.ObjectMeta{Name: "hippo"}} + pod := corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "database"}, + {Name: "other"}, + {Name: "pgbackrest"}, + }, + } - testCases := []struct { - configs []v1.VolumeProjection - containers []v1.Container - }{{ - configs: []v1.VolumeProjection{ - {ConfigMap: &v1.ConfigMapProjection{ - LocalObjectReference: v1.LocalObjectReference{Name: "cust-config.conf"}}}, - {Secret: &v1.SecretProjection{ - LocalObjectReference: v1.LocalObjectReference{Name: "cust-secret.conf"}}}}, - containers: []v1.Container{{Name: "database"}, {Name: "pgbackrest"}}, - }, { - configs: []v1.VolumeProjection{ - {ConfigMap: &v1.ConfigMapProjection{ - LocalObjectReference: v1.LocalObjectReference{Name: "cust-config.conf"}}}, - {Secret: &v1.SecretProjection{ - LocalObjectReference: v1.LocalObjectReference{Name: "cust-secret.conf"}}}}, - containers: []v1.Container{{Name: "pgbackrest"}}, - }, { - configs: []v1.VolumeProjection{}, - containers: []v1.Container{{Name: "database"}, {Name: "pgbackrest"}}, - }, { - configs: []v1.VolumeProjection{}, - containers: []v1.Container{{Name: "pgbackrest"}}, - }} - - for _, tc := range testCases { - t.Run(fmt.Sprintf("configs=%d, containers=%d", len(tc.configs), len(tc.containers)), func(t *testing.T) { - postgresCluster.Spec.Backups.PGBackRest.Configuration = tc.configs - template := &v1.PodTemplateSpec{ - Spec: v1.PodSpec{ - Containers: tc.containers, - }, - } + alwaysExpect := func(t testing.TB, result *corev1.PodSpec) { + // Only Containers and Volumes fields have changed. + assert.DeepEqual(t, pod, *result, cmpopts.IgnoreFields(pod, "Containers", "Volumes")) - err := AddConfigsToPod(postgresCluster, template, CMInstanceKey, - getContainerNames(tc.containers)...) - assert.NilError(t, err) - - // check that the backrest config volume exists - var configVol *v1.Volume - var foundConfigVol bool - for i, v := range template.Spec.Volumes { - if v.Name == ConfigVol { - foundConfigVol = true - configVol = &template.Spec.Volumes[i] - break - } - } - if !foundConfigVol { - t.Error(fmt.Errorf("volume %s is missing", ConfigVol)) - } + // Only database and pgBackRest containers have mounts. + assert.Assert(t, cmp.MarshalMatches(result.Containers, ` +- name: database + resources: {} + volumeMounts: + - mountPath: /etc/pgbackrest/conf.d + name: pgbackrest-config + readOnly: true +- name: other + resources: {} +- name: pgbackrest + resources: {} + volumeMounts: + - mountPath: /etc/pgbackrest/conf.d + name: pgbackrest-config + readOnly: true + `)) + } - // check that the backrest config volume contains default configs - var foundDefaultConfigMapVol bool - cmName := naming.PGBackRestConfig(postgresCluster).Name - for _, s := range configVol.Projected.Sources { - if s.ConfigMap != nil && s.ConfigMap.Name == cmName { - foundDefaultConfigMapVol = true - break - } - } - if !foundDefaultConfigMapVol { - t.Error(fmt.Errorf("ConfigMap %s is missing", cmName)) - } + t.Run("CustomProjections", func(t *testing.T) { + custom := corev1.ConfigMapProjection{} + custom.Name = "custom-configmap" - // verify custom configs are present in the backrest config volume - for _, c := range tc.configs { - var foundCustomConfig bool - for _, s := range configVol.Projected.Sources { - if equality.Semantic.DeepEqual(c, s) { - foundCustomConfig = true - break - } - } - assert.Assert(t, foundCustomConfig) - } + cluster := cluster.DeepCopy() + cluster.Spec.Backups.PGBackRest.Configuration = []corev1.VolumeProjection{ + {ConfigMap: &custom}, + } - // verify the containers specified have the proper volume mounts - for _, c := range template.Spec.Containers { - var foundVolumeMount bool - for _, vm := range c.VolumeMounts { - if vm.Name == ConfigVol && vm.MountPath == ConfigDir { - foundVolumeMount = true - break - } - } - assert.Assert(t, foundVolumeMount) - } - }) - } -} + out := pod.DeepCopy() + AddConfigToInstancePod(cluster, out) + alwaysExpect(t, out) -func TestAddSSHToPod(t *testing.T) { + // Instance configuration files after custom projections. + assert.Assert(t, cmp.MarshalMatches(out.Volumes, ` +- name: pgbackrest-config + projected: + sources: + - configMap: + name: custom-configmap + - configMap: + items: + - key: pgbackrest_instance.conf + path: pgbackrest_instance.conf + - key: config-hash + path: config-hash + - key: pgbackrest-server.conf + path: ~postgres-operator_server.conf + name: hippo-pgbackrest-config + - secret: + items: + - key: pgbackrest.ca-roots + path: ~postgres-operator/tls-ca.crt + - key: pgbackrest-client.crt + path: ~postgres-operator/client-tls.crt + - key: pgbackrest-client.key + mode: 384 + path: ~postgres-operator/client-tls.key + name: hippo-pgbackrest + `)) + }) - postgresClusterBase := &v1beta1.PostgresCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "hippo", - }, - Spec: v1beta1.PostgresClusterSpec{ - Backups: v1beta1.Backups{ - PGBackRest: v1beta1.PGBackRestArchive{}, + t.Run("NoVolumeRepo", func(t *testing.T) { + cluster := cluster.DeepCopy() + cluster.Spec.Backups.PGBackRest.Repos = nil + + out := pod.DeepCopy() + AddConfigToInstancePod(cluster, out) + alwaysExpect(t, out) + + // Instance configuration and certificates. + assert.Assert(t, cmp.MarshalMatches(out.Volumes, ` +- name: pgbackrest-config + projected: + sources: + - configMap: + items: + - key: pgbackrest_instance.conf + path: pgbackrest_instance.conf + - key: config-hash + path: config-hash + - key: pgbackrest-server.conf + path: ~postgres-operator_server.conf + name: hippo-pgbackrest-config + - secret: + items: + - key: pgbackrest.ca-roots + path: ~postgres-operator/tls-ca.crt + - key: pgbackrest-client.crt + path: ~postgres-operator/client-tls.crt + - key: pgbackrest-client.key + mode: 384 + path: ~postgres-operator/client-tls.key + name: hippo-pgbackrest + `)) + }) + + t.Run("OneVolumeRepo", func(t *testing.T) { + cluster := cluster.DeepCopy() + cluster.Spec.Backups.PGBackRest.Repos = []v1beta1.PGBackRestRepo{ + { + Name: "repo1", + Volume: new(v1beta1.RepoPVC), }, + } + + out := pod.DeepCopy() + AddConfigToInstancePod(cluster, out) + alwaysExpect(t, out) + + // Instance configuration files, server config, and optional client certificates. + assert.Assert(t, cmp.MarshalMatches(out.Volumes, ` +- name: pgbackrest-config + projected: + sources: + - configMap: + items: + - key: pgbackrest_instance.conf + path: pgbackrest_instance.conf + - key: config-hash + path: config-hash + - key: pgbackrest-server.conf + path: ~postgres-operator_server.conf + name: hippo-pgbackrest-config + - secret: + items: + - key: pgbackrest.ca-roots + path: ~postgres-operator/tls-ca.crt + - key: pgbackrest-client.crt + path: ~postgres-operator/client-tls.crt + - key: pgbackrest-client.key + mode: 384 + path: ~postgres-operator/client-tls.key + name: hippo-pgbackrest + `)) + }) +} + +func TestAddConfigToRepoPod(t *testing.T) { + cluster := v1beta1.PostgresCluster{} + cluster.Name = "hippo" + cluster.Default() + + pod := corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "other"}, + {Name: "pgbackrest"}, }, } - resources := v1.ResourceRequirements{ - Requests: v1.ResourceList{ - v1.ResourceCPU: resource.MustParse("250m"), - v1.ResourceMemory: resource.MustParse("128Mi"), + alwaysExpect := func(t testing.TB, result *corev1.PodSpec) { + // Only Containers and Volumes fields have changed. + assert.DeepEqual(t, pod, *result, cmpopts.IgnoreFields(pod, "Containers", "Volumes")) + + // Only pgBackRest containers have mounts. + assert.Assert(t, cmp.MarshalMatches(result.Containers, ` +- name: other + resources: {} +- name: pgbackrest + resources: {} + volumeMounts: + - mountPath: /etc/pgbackrest/conf.d + name: pgbackrest-config + readOnly: true + `)) + } + + t.Run("CustomProjections", func(t *testing.T) { + custom := corev1.ConfigMapProjection{} + custom.Name = "custom-configmap" + + cluster := cluster.DeepCopy() + cluster.Spec.Backups.PGBackRest.Configuration = []corev1.VolumeProjection{ + {ConfigMap: &custom}, + } + + out := pod.DeepCopy() + AddConfigToRepoPod(cluster, out) + alwaysExpect(t, out) + + // Repository configuration files, server config, and client certificates + // after custom projections. + assert.Assert(t, cmp.MarshalMatches(out.Volumes, ` +- name: pgbackrest-config + projected: + sources: + - configMap: + name: custom-configmap + - configMap: + items: + - key: pgbackrest_repo.conf + path: pgbackrest_repo.conf + - key: config-hash + path: config-hash + - key: pgbackrest-server.conf + path: ~postgres-operator_server.conf + name: hippo-pgbackrest-config + - secret: + items: + - key: pgbackrest.ca-roots + path: ~postgres-operator/tls-ca.crt + - key: pgbackrest-client.crt + path: ~postgres-operator/client-tls.crt + - key: pgbackrest-client.key + mode: 384 + path: ~postgres-operator/client-tls.key + name: hippo-pgbackrest + `)) + }) +} + +func TestAddConfigToRestorePod(t *testing.T) { + cluster := v1beta1.PostgresCluster{} + cluster.Name = "source" + cluster.Default() + + pod := corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "other"}, + {Name: "pgbackrest"}, }, } - testCases := []struct { - sshConfig *v1.ConfigMapProjection - sshSecret *v1.SecretProjection - additionalSSHContainers []v1.Container - }{{ - sshConfig: &v1.ConfigMapProjection{ - LocalObjectReference: v1.LocalObjectReference{Name: "cust-ssh-config.conf"}}, - sshSecret: &v1.SecretProjection{ - LocalObjectReference: v1.LocalObjectReference{Name: "cust-ssh-secret.conf"}}, - additionalSSHContainers: []v1.Container{{Name: "database"}}, - }, { - additionalSSHContainers: []v1.Container{{Name: "database"}}, - }} + alwaysExpect := func(t testing.TB, result *corev1.PodSpec) { + // Only Containers and Volumes fields have changed. + assert.DeepEqual(t, pod, *result, cmpopts.IgnoreFields(pod, "Containers", "Volumes")) + + // Only pgBackRest containers have mounts. + assert.Assert(t, cmp.MarshalMatches(result.Containers, ` +- name: other + resources: {} +- name: pgbackrest + resources: {} + volumeMounts: + - mountPath: /etc/pgbackrest/conf.d + name: pgbackrest-config + readOnly: true + `)) + } + + t.Run("CustomProjections", func(t *testing.T) { + custom := corev1.ConfigMapProjection{} + custom.Name = "custom-configmap" - for _, tc := range testCases { + cluster := cluster.DeepCopy() + cluster.Spec.Backups.PGBackRest.Configuration = []corev1.VolumeProjection{ + {ConfigMap: &custom}, + } - customConfig := (tc.sshConfig != nil) - customSecret := (tc.sshSecret != nil) - testRunStr := fmt.Sprintf("customConfig=%t, customSecret=%t, additionalSSHContainers=%d", - customConfig, customSecret, len(tc.additionalSSHContainers)) + custom2 := corev1.SecretProjection{} + custom2.Name = "source-custom-secret" - postgresCluster := postgresClusterBase.DeepCopy() + sourceCluster := cluster.DeepCopy() + sourceCluster.Spec.Backups.PGBackRest.Configuration = []corev1.VolumeProjection{ + {Secret: &custom2}, + } - if customConfig || customSecret { - if postgresCluster.Spec.Backups.PGBackRest.RepoHost == nil { - postgresCluster.Spec.Backups.PGBackRest.RepoHost = &v1beta1.PGBackRestRepoHost{} - } - postgresCluster.Spec.Backups.PGBackRest.RepoHost.SSHConfiguration = tc.sshConfig - postgresCluster.Spec.Backups.PGBackRest.RepoHost.SSHSecret = tc.sshSecret + out := pod.DeepCopy() + AddConfigToRestorePod(cluster, sourceCluster, out) + alwaysExpect(t, out) + + // Instance configuration files and optional client certificates + // after custom projections. + assert.Assert(t, cmp.MarshalMatches(out.Volumes, ` +- name: pgbackrest-config + projected: + sources: + - configMap: + name: custom-configmap + - secret: + name: source-custom-secret + - configMap: + items: + - key: pgbackrest_instance.conf + path: pgbackrest_instance.conf + name: source-pgbackrest-config + - secret: + items: + - key: pgbackrest.ca-roots + path: ~postgres-operator/tls-ca.crt + - key: pgbackrest-client.crt + path: ~postgres-operator/client-tls.crt + - key: pgbackrest-client.key + mode: 384 + path: ~postgres-operator/client-tls.key + name: source-pgbackrest + optional: true + `)) + }) + + t.Run("CloudBasedDataSourceProjections", func(t *testing.T) { + custom := corev1.SecretProjection{} + custom.Name = "custom-secret" + + cluster := cluster.DeepCopy() + cluster.Spec.DataSource = &v1beta1.DataSource{ + PGBackRest: &v1beta1.PGBackRestDataSource{ + Configuration: []corev1.VolumeProjection{{Secret: &custom}}, + }, + } + + out := pod.DeepCopy() + AddConfigToRestorePod(cluster, nil, out) + alwaysExpect(t, out) + + // Instance configuration files and optional client certificates + // after custom projections. + assert.Assert(t, cmp.MarshalMatches(out.Volumes, ` +- name: pgbackrest-config + projected: + sources: + - secret: + name: custom-secret + - configMap: + items: + - key: pgbackrest_instance.conf + path: pgbackrest_instance.conf + name: source-pgbackrest-config + - secret: + items: + - key: pgbackrest.ca-roots + path: ~postgres-operator/tls-ca.crt + - key: pgbackrest-client.crt + path: ~postgres-operator/client-tls.crt + - key: pgbackrest-client.key + mode: 384 + path: ~postgres-operator/client-tls.key + name: source-pgbackrest + optional: true + `)) + }) + + t.Run("CustomFiles", func(t *testing.T) { + custom := corev1.ConfigMapProjection{} + custom.Name = "custom-configmap-files" + + cluster := cluster.DeepCopy() + cluster.Spec.Config.Files = []corev1.VolumeProjection{ + {ConfigMap: &custom}, } - t.Run(testRunStr, func(t *testing.T) { + sourceCluster := cluster.DeepCopy() + + out := pod.DeepCopy() + AddConfigToRestorePod(cluster, sourceCluster, out) + alwaysExpect(t, out) + + // Instance configuration files and optional configuration files + // after custom projections. + assert.Assert(t, cmp.MarshalMatches(out.Volumes, ` +- name: postgres-config + projected: + sources: + - configMap: + name: custom-configmap-files +- name: pgbackrest-config + projected: + sources: + - configMap: + items: + - key: pgbackrest_instance.conf + path: pgbackrest_instance.conf + name: source-pgbackrest-config + - secret: + items: + - key: pgbackrest.ca-roots + path: ~postgres-operator/tls-ca.crt + - key: pgbackrest-client.crt + path: ~postgres-operator/client-tls.crt + - key: pgbackrest-client.key + mode: 384 + path: ~postgres-operator/client-tls.key + name: source-pgbackrest + optional: true + `)) + }) +} + +func TestAddServerToInstancePod(t *testing.T) { + t.Parallel() + + ctx := context.Background() + cluster := v1beta1.PostgresCluster{} + cluster.Name = "hippo" + cluster.Default() + + pod := corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "database"}, + {Name: "other"}, + }, + Volumes: []corev1.Volume{ + {Name: "other"}, + {Name: "postgres-data"}, + {Name: "postgres-wal"}, + }, + } - template := &v1.PodTemplateSpec{ - Spec: v1.PodSpec{ - Containers: tc.additionalSSHContainers, + t.Run("CustomResources", func(t *testing.T) { + cluster := cluster.DeepCopy() + cluster.Spec.Backups.PGBackRest.Sidecars = &v1beta1.PGBackRestSidecars{ + PGBackRest: &v1beta1.Sidecar{ + Resources: &corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("5m"), + }, }, - } + }, + PGBackRestConfig: &v1beta1.Sidecar{ + Resources: &corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("17m"), + }, + }, + }, + } - err := AddSSHToPod(postgresCluster, template, true, resources, - getContainerNames(tc.additionalSSHContainers)...) - assert.NilError(t, err) - - // verify the ssh volume - var foundSSHVolume bool - var sshVolume v1.Volume - for _, v := range template.Spec.Volumes { - if v.Name == naming.PGBackRestSSHVolume { - foundSSHVolume = true - sshVolume = v - break - } - } - assert.Assert(t, foundSSHVolume) - - // verify the ssh config and secret - var foundSSHConfigVolume, foundSSHSecretVolume bool - defaultConfigName := naming.PGBackRestSSHConfig(postgresCluster).Name - defaultSecretName := naming.PGBackRestSSHSecret(postgresCluster).Name - for _, s := range sshVolume.Projected.Sources { - if s.ConfigMap != nil { - if (!customConfig && s.ConfigMap.Name == defaultConfigName) || - (customConfig && s.ConfigMap.Name == tc.sshConfig.Name) { - foundSSHConfigVolume = true - } - } else if s.Secret != nil { - if (!customSecret && s.Secret.Name == defaultSecretName) || - (customSecret && s.Secret.Name == tc.sshSecret.Name) { - foundSSHSecretVolume = true - } - } - } - assert.Assert(t, foundSSHConfigVolume) - assert.Assert(t, foundSSHSecretVolume) - - // verify that pgbackrest container is present and that the proper SSH volume mount in - // present in all containers - var foundSSHContainer bool - for _, c := range template.Spec.Containers { - if c.Name == naming.PGBackRestRepoContainerName { - foundSSHContainer = true - // verify proper resources are present and correct - assert.DeepEqual(t, c.Resources, resources) - } - var foundVolumeMount bool - for _, vm := range c.VolumeMounts { - if vm.Name == naming.PGBackRestSSHVolume && vm.MountPath == sshConfigPath && - vm.ReadOnly == true { - foundVolumeMount = true - break - } - } - assert.Assert(t, foundVolumeMount) - } - assert.Assert(t, foundSSHContainer) - }) + out := pod.DeepCopy() + AddServerToInstancePod(ctx, cluster, out, "instance-secret-name") + + // Only Containers and Volumes fields have changed. + assert.DeepEqual(t, pod, *out, cmpopts.IgnoreFields(pod, "Containers", "Volumes")) + + // The TLS server is added while other containers are untouched. + // It has PostgreSQL volumes mounted while other volumes are ignored. + assert.Assert(t, cmp.MarshalMatches(out.Containers, ` +- name: database + resources: {} +- name: other + resources: {} +- command: + - pgbackrest + - server + livenessProbe: + exec: + command: + - pgbackrest + - server-ping + name: pgbackrest + resources: + requests: + cpu: 5m + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /etc/pgbackrest/server + name: pgbackrest-server + readOnly: true + - mountPath: /pgdata + name: postgres-data + - mountPath: /pgwal + name: postgres-wal +- command: + - bash + - -ceu + - -- + - |- + monitor() { + exec {fd}<> <(:||:) + until read -r -t 5 -u "${fd}"; do + if + [[ "${filename}" -nt "/proc/self/fd/${fd}" ]] && + pkill -HUP --exact --parent=0 pgbackrest + then + exec {fd}>&- && exec {fd}<> <(:||:) + stat --dereference --format='Loaded configuration dated %y' "${filename}" + elif + { [[ "${directory}" -nt "/proc/self/fd/${fd}" ]] || + [[ "${authority}" -nt "/proc/self/fd/${fd}" ]] + } && + pkill -HUP --exact --parent=0 pgbackrest + then + exec {fd}>&- && exec {fd}<> <(:||:) + stat --format='Loaded certificates dated %y' "${directory}" + fi + done + }; export directory="$1" authority="$2" filename="$3"; export -f monitor; exec -a "$0" bash -ceu monitor + - pgbackrest-config + - /etc/pgbackrest/server + - /etc/pgbackrest/conf.d/~postgres-operator/tls-ca.crt + - /etc/pgbackrest/conf.d/~postgres-operator_server.conf + name: pgbackrest-config + resources: + limits: + cpu: 17m + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /etc/pgbackrest/server + name: pgbackrest-server + readOnly: true + `)) + + // The server certificate comes from the instance Secret. + // Other volumes are untouched. + assert.Assert(t, cmp.MarshalMatches(out.Volumes, ` +- name: other +- name: postgres-data +- name: postgres-wal +- name: pgbackrest-server + projected: + sources: + - secret: + items: + - key: pgbackrest-server.crt + path: server-tls.crt + - key: pgbackrest-server.key + mode: 384 + path: server-tls.key + name: instance-secret-name + `)) + }) + + t.Run("AddTablespaces", func(t *testing.T) { + gate := feature.NewGate() + assert.NilError(t, gate.SetFromMap(map[string]bool{ + feature.TablespaceVolumes: true, + })) + ctx := feature.NewContext(ctx, gate) + + clusterWithTablespaces := cluster.DeepCopy() + clusterWithTablespaces.Spec.InstanceSets = []v1beta1.PostgresInstanceSetSpec{ + { + TablespaceVolumes: []v1beta1.TablespaceVolume{ + {Name: "trial"}, + {Name: "castle"}, + }, + }, + } + + out := pod.DeepCopy() + out.Volumes = append(out.Volumes, corev1.Volume{Name: "tablespace-trial"}, corev1.Volume{Name: "tablespace-castle"}) + AddServerToInstancePod(ctx, clusterWithTablespaces, out, "instance-secret-name") + + // Only Containers and Volumes fields have changed. + assert.DeepEqual(t, pod, *out, cmpopts.IgnoreFields(pod, "Containers", "Volumes")) + assert.Assert(t, cmp.MarshalMatches(out.Containers, ` +- name: database + resources: {} +- name: other + resources: {} +- command: + - pgbackrest + - server + livenessProbe: + exec: + command: + - pgbackrest + - server-ping + name: pgbackrest + resources: {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /etc/pgbackrest/server + name: pgbackrest-server + readOnly: true + - mountPath: /pgdata + name: postgres-data + - mountPath: /pgwal + name: postgres-wal + - mountPath: /tablespaces/trial + name: tablespace-trial + - mountPath: /tablespaces/castle + name: tablespace-castle +- command: + - bash + - -ceu + - -- + - |- + monitor() { + exec {fd}<> <(:||:) + until read -r -t 5 -u "${fd}"; do + if + [[ "${filename}" -nt "/proc/self/fd/${fd}" ]] && + pkill -HUP --exact --parent=0 pgbackrest + then + exec {fd}>&- && exec {fd}<> <(:||:) + stat --dereference --format='Loaded configuration dated %y' "${filename}" + elif + { [[ "${directory}" -nt "/proc/self/fd/${fd}" ]] || + [[ "${authority}" -nt "/proc/self/fd/${fd}" ]] + } && + pkill -HUP --exact --parent=0 pgbackrest + then + exec {fd}>&- && exec {fd}<> <(:||:) + stat --format='Loaded certificates dated %y' "${directory}" + fi + done + }; export directory="$1" authority="$2" filename="$3"; export -f monitor; exec -a "$0" bash -ceu monitor + - pgbackrest-config + - /etc/pgbackrest/server + - /etc/pgbackrest/conf.d/~postgres-operator/tls-ca.crt + - /etc/pgbackrest/conf.d/~postgres-operator_server.conf + name: pgbackrest-config + resources: {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /etc/pgbackrest/server + name: pgbackrest-server + readOnly: true + `)) + }) +} + +func TestAddServerToRepoPod(t *testing.T) { + t.Parallel() + + ctx := context.Background() + cluster := v1beta1.PostgresCluster{} + cluster.Name = "hippo" + cluster.Default() + + pod := corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "other"}, + }, } + + t.Run("CustomResources", func(t *testing.T) { + cluster := cluster.DeepCopy() + cluster.Spec.Backups.PGBackRest.RepoHost = &v1beta1.PGBackRestRepoHost{ + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("5m"), + }, + }, + } + cluster.Spec.Backups.PGBackRest.Sidecars = &v1beta1.PGBackRestSidecars{ + PGBackRestConfig: &v1beta1.Sidecar{ + Resources: &corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("19m"), + }, + }, + }, + } + + out := pod.DeepCopy() + AddServerToRepoPod(ctx, cluster, out) + + // Only Containers and Volumes fields have changed. + assert.DeepEqual(t, pod, *out, cmpopts.IgnoreFields(pod, "Containers", "Volumes")) + + // The TLS server is added while other containers are untouched. + assert.Assert(t, cmp.MarshalMatches(out.Containers, ` +- name: other + resources: {} +- command: + - pgbackrest + - server + livenessProbe: + exec: + command: + - pgbackrest + - server-ping + name: pgbackrest + resources: + requests: + cpu: 5m + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /etc/pgbackrest/server + name: pgbackrest-server + readOnly: true +- command: + - bash + - -ceu + - -- + - |- + monitor() { + exec {fd}<> <(:||:) + until read -r -t 5 -u "${fd}"; do + if + [[ "${filename}" -nt "/proc/self/fd/${fd}" ]] && + pkill -HUP --exact --parent=0 pgbackrest + then + exec {fd}>&- && exec {fd}<> <(:||:) + stat --dereference --format='Loaded configuration dated %y' "${filename}" + elif + { [[ "${directory}" -nt "/proc/self/fd/${fd}" ]] || + [[ "${authority}" -nt "/proc/self/fd/${fd}" ]] + } && + pkill -HUP --exact --parent=0 pgbackrest + then + exec {fd}>&- && exec {fd}<> <(:||:) + stat --format='Loaded certificates dated %y' "${directory}" + fi + done + }; export directory="$1" authority="$2" filename="$3"; export -f monitor; exec -a "$0" bash -ceu monitor + - pgbackrest-config + - /etc/pgbackrest/server + - /etc/pgbackrest/conf.d/~postgres-operator/tls-ca.crt + - /etc/pgbackrest/conf.d/~postgres-operator_server.conf + name: pgbackrest-config + resources: + limits: + cpu: 19m + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /etc/pgbackrest/server + name: pgbackrest-server + readOnly: true + `)) + + // The server certificate comes from the pgBackRest Secret. + assert.Assert(t, cmp.MarshalMatches(out.Volumes, ` +- name: pgbackrest-server + projected: + sources: + - secret: + items: + - key: pgbackrest-repo-host.crt + path: server-tls.crt + - key: pgbackrest-repo-host.key + mode: 384 + path: server-tls.key + name: hippo-pgbackrest + `)) + }) } -func getContainerNames(containers []v1.Container) []string { +func getContainerNames(containers []corev1.Container) []string { names := make([]string, len(containers)) for i, c := range containers { names[i] = c.Name @@ -387,7 +995,7 @@ func TestReplicaCreateCommand(t *testing.T) { assert.DeepEqual(t, ReplicaCreateCommand(cluster, instance), []string{ "pgbackrest", "restore", "--delta", "--stanza=db", "--repo=2", - "--link-map=pg_wal=/pgdata/pg0_wal", + "--link-map=pg_wal=/pgdata/pg0_wal", "--type=standby", }) }) @@ -400,7 +1008,68 @@ func TestReplicaCreateCommand(t *testing.T) { assert.DeepEqual(t, ReplicaCreateCommand(cluster, instance), []string{ "pgbackrest", "restore", "--delta", "--stanza=db", "--repo=7", - "--link-map=pg_wal=/pgdata/pg0_wal", + "--link-map=pg_wal=/pgdata/pg0_wal", "--type=standby", }) }) } + +func TestSecret(t *testing.T) { + t.Parallel() + + ctx := context.Background() + cluster := new(v1beta1.PostgresCluster) + existing := new(corev1.Secret) + intent := new(corev1.Secret) + + root, err := pki.NewRootCertificateAuthority() + assert.NilError(t, err) + + t.Run("NoRepoHost", func(t *testing.T) { + // Nothing happens when there is no repository host. + constant := intent.DeepCopy() + assert.NilError(t, Secret(ctx, cluster, nil, root, existing, intent)) + assert.DeepEqual(t, constant, intent) + }) + + host := new(appsv1.StatefulSet) + host.Namespace = "ns1" + host.Name = "some-repo" + host.Spec.ServiceName = "some-domain" + + // The existing Secret does not change. + constant := existing.DeepCopy() + assert.NilError(t, Secret(ctx, cluster, host, root, existing, intent)) + assert.DeepEqual(t, constant, existing) + + // There is a leaf certificate and private key for the repository host. + leaf := &pki.LeafCertificate{} + assert.NilError(t, leaf.Certificate.UnmarshalText(intent.Data["pgbackrest-repo-host.crt"])) + assert.NilError(t, leaf.PrivateKey.UnmarshalText(intent.Data["pgbackrest-repo-host.key"])) + + assert.DeepEqual(t, leaf.Certificate.DNSNames(), []string{ + leaf.Certificate.CommonName(), + "some-repo-0.some-domain.ns1.svc", + "some-repo-0.some-domain.ns1", + "some-repo-0.some-domain", + }) + + // Assuming the intent is written, no change when called again. + existing.Data = intent.Data + before := intent.DeepCopy() + assert.NilError(t, Secret(ctx, cluster, host, root, existing, intent)) + assert.DeepEqual(t, before, intent) + + t.Run("Rotation", func(t *testing.T) { + // The leaf certificate is regenerated when the root authority changes. + root2, err := pki.NewRootCertificateAuthority() + assert.NilError(t, err) + assert.NilError(t, Secret(ctx, cluster, host, root2, existing, intent)) + + leaf2 := &pki.LeafCertificate{} + assert.NilError(t, leaf2.Certificate.UnmarshalText(intent.Data["pgbackrest-repo-host.crt"])) + assert.NilError(t, leaf2.PrivateKey.UnmarshalText(intent.Data["pgbackrest-repo-host.key"])) + + assert.Assert(t, !reflect.DeepEqual(leaf.Certificate, leaf2.Certificate)) + assert.Assert(t, !reflect.DeepEqual(leaf.PrivateKey, leaf2.PrivateKey)) + }) +} diff --git a/internal/pgbackrest/restore.md b/internal/pgbackrest/restore.md new file mode 100644 index 0000000000..8828576921 --- /dev/null +++ b/internal/pgbackrest/restore.md @@ -0,0 +1,111 @@ + + +## Target Action + +The `--target-action` option of `pgbackrest restore` almost translates to the +PostgreSQL `recovery_target_action` parameter but not exactly. The behavior of +that parameter also depends on the PostgreSQL version and on other parameters. + +For PostgreSQL 9.5 through 15, + + - The PostgreSQL documentation states that for `recovery_target_action` + "the default is `pause`," but that is only the case when `hot_standby=on`. + + - The PostgreSQL documentation states that when `hot_standby=off` "a setting + of `pause` will act the same as `shutdown`," but that cannot be configured + through pgBackRest. + +The default value of `hot_standby` is `off` prior to PostgreSQL 10 and `on` since. + +### PostgreSQL 15, 14, 13, 12 + +[12]: https://www.postgresql.org/docs/12/runtime-config-wal.html +[commit]: https://git.postgresql.org/gitweb/?p=postgresql.git;h=2dedf4d9a899b36d1a8ed29be5efbd1b31a8fe85 + +| --target-action | recovery_target_action | hot_standby=off | hot_standby=on (default) | +|------------------|------------------------|-----------------|--------------------------| +| _not configured_ | _not configured_ | shutdown | pause | +| `pause` | _not configured_ | shutdown | pause | +| _not possible_ | `pause` | shutdown | pause | +| `promote` | `promote` | promote | promote | +| `shutdown` | `shutdown` | shutdown | shutdown | + + +### PostgreSQL 11, 10 + +[11]: https://www.postgresql.org/docs/11/recovery-target-settings.html +[10]: https://www.postgresql.org/docs/10/runtime-config-replication.html + +| --target-action | recovery_target_action | hot_standby=off | hot_standby=on (default) | +|------------------|------------------------|-----------------|--------------------------| +| _not configured_ | _not configured_ | promote | pause | +| `pause` | _not configured_ | promote | pause | +| _not possible_ | `pause` | shutdown | pause | +| `promote` | `promote` | promote | promote | +| `shutdown` | `shutdown` | shutdown | shutdown | + + +### PostgreSQL 9.6, 9.5 + +[9.6]: https://www.postgresql.org/docs/9.6/recovery-target-settings.html + +| --target-action | recovery_target_action | hot_standby=off (default) | hot_standby=on | +|------------------|------------------------|---------------------------|----------------| +| _not configured_ | _not configured_ | promote | pause | +| `pause` | _not configured_ | promote | pause | +| _not possible_ | `pause` | shutdown | pause | +| `promote` | `promote` | promote | promote | +| `shutdown` | `shutdown` | shutdown | shutdown | + + +### PostgreSQL 9.4, 9.3, 9.2, 9.1 + +[9.4]: https://www.postgresql.org/docs/9.4/recovery-target-settings.html +[9.4]: https://www.postgresql.org/docs/9.4/runtime-config-replication.html + +| --target-action | pause_at_recovery_target | hot_standby=off (default) | hot_standby=on | +|------------------|--------------------------|---------------------------|----------------| +| _not configured_ | _not configured_ | promote | pause | +| `pause` | _not configured_ | promote | pause | +| _not possible_ | `true` | promote | pause | +| `promote` | `false` | promote | promote | + + + diff --git a/internal/pgbackrest/ssh_config.go b/internal/pgbackrest/ssh_config.go deleted file mode 100644 index a8aa3c990d..0000000000 --- a/internal/pgbackrest/ssh_config.go +++ /dev/null @@ -1,291 +0,0 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package pgbackrest - -import ( - "context" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "fmt" - - "github.com/crunchydata/postgres-operator/internal/initialize" - "github.com/crunchydata/postgres-operator/internal/naming" - "github.com/crunchydata/postgres-operator/internal/pki" - "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" - "golang.org/x/crypto/ssh" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - - // knownHostsKey is the name of the 'known_hosts' file - knownHostsKey = "ssh_known_hosts" - - // mount path for SSH configuration - sshConfigPath = "/etc/ssh" - - // config file for the SSH client - sshConfig = "ssh_config" - // config file for the SSHD service - sshdConfig = "sshd_config" - - // private key file name - privateKey = "id_ecdsa" - // public key file name - publicKey = "id_ecdsa.pub" - // SSH configuration volume - sshConfigVol = "sshd" -) - -// sshKey stores byte slices that represent private and public ssh keys -// used to populate the postgrescluster's SSH secret -type sshKey struct { - Private []byte - Public []byte -} - -// CreateSSHConfigMapIntent creates a configmap struct with SSHD service and SSH client -// configuration settings in the data field. -func CreateSSHConfigMapIntent(postgresCluster *v1beta1.PostgresCluster) v1.ConfigMap { - - meta := naming.PGBackRestSSHConfig(postgresCluster) - meta.Annotations = naming.Merge( - postgresCluster.Spec.Metadata.GetAnnotationsOrNil(), - postgresCluster.Spec.Backups.PGBackRest.Metadata.GetAnnotationsOrNil()) - meta.Labels = naming.Merge(postgresCluster.Spec.Metadata.GetLabelsOrNil(), - postgresCluster.Spec.Backups.PGBackRest.Metadata.GetLabelsOrNil(), - naming.PGBackRestDedicatedLabels(postgresCluster.GetName()), - ) - - cm := v1.ConfigMap{ - TypeMeta: metav1.TypeMeta{ - Kind: "ConfigMap", - APIVersion: "v1", - }, - ObjectMeta: meta, - } - - // create an empty map for the config data - initialize.StringMap(&cm.Data) - - // if the SSH config data map is not ok, populate with the configuration string - if _, ok := cm.Data[sshConfig]; !ok { - cm.Data[sshConfig] = getSSHConfigString() - } - - // if the SSHD config data map is not ok, populate with the configuration string - if _, ok := cm.Data[sshdConfig]; !ok { - cm.Data[sshdConfig] = getSSHDConfigString() - } - - return cm -} - -// CreateSSHSecretIntent creates the secret containing the new public private key pair to use -// when connecting to and from the pgBackRest repo pod. -func CreateSSHSecretIntent(postgresCluster *v1beta1.PostgresCluster, - currentSSHSecret *v1.Secret, serviceName, serviceNamespace string) (v1.Secret, error) { - - meta := naming.PGBackRestSSHSecret(postgresCluster) - meta.Annotations = naming.Merge( - postgresCluster.Spec.Metadata.GetAnnotationsOrNil(), - postgresCluster.Spec.Backups.PGBackRest.Metadata.GetAnnotationsOrNil()) - meta.Labels = naming.Merge(postgresCluster.Spec.Metadata.GetLabelsOrNil(), - postgresCluster.Spec.Backups.PGBackRest.Metadata.GetLabelsOrNil(), - naming.PGBackRestDedicatedLabels(postgresCluster.GetName()), - ) - - secret := v1.Secret{ - TypeMeta: metav1.TypeMeta{ - Kind: "Secret", - APIVersion: "v1", - }, - ObjectMeta: meta, - Type: "Opaque", - } - - var privKeyExists, pubKeyExists bool - if currentSSHSecret != nil { - _, privKeyExists = currentSSHSecret.Data[privateKey] - _, pubKeyExists = currentSSHSecret.Data[publicKey] - } - var keys sshKey - var err error - if pubKeyExists && privKeyExists { - keys = sshKey{ - Private: currentSSHSecret.Data[privateKey], - Public: currentSSHSecret.Data[publicKey], - } - } else { - // get the key byte slices - keys, err = getKeys() - if err != nil { - return secret, err - } - } - - // create an empty map for the key data - initialize.ByteMap(&secret.Data) - // if the public key data map is not ok, populate with the public key - if _, ok := secret.Data[publicKey]; !ok { - secret.Data[publicKey] = keys.Public - } - - // if the private key data map is not ok, populate with the private key - if _, ok := secret.Data[privateKey]; !ok { - secret.Data[privateKey] = keys.Private - } - - // if the known_hosts is not ok, populate with the knownHosts key - if _, ok := secret.Data[knownHostsKey]; !ok { - secret.Data[knownHostsKey] = []byte(fmt.Sprintf( - "*.%s.%s.svc.%s %s", serviceName, - serviceNamespace, naming.KubernetesClusterDomain(context.Background()), - string(keys.Public))) - } - - return secret, nil -} - -// SSHConfigVolumeAndMount creates a volume and mount configuration from the SSHD configuration configmap -// and secret that will be used by the postgrescluster when connecting to the pgBackRest repo pod -func SSHConfigVolumeAndMount(sshConfigMap *v1.ConfigMap, sshSecret *v1.Secret, pod *v1.PodSpec, containerName string) { - // Note: the 'container' string will be 'database' for the PostgreSQL database container, - // otherwise it will be 'backrest' - var ( - sshConfigVP []v1.VolumeProjection - ) - - volume := v1.Volume{Name: sshConfigVol} - volume.Projected = &v1.ProjectedVolumeSource{} - - // Add our projections after those specified in the CR. Items later in the - // list take precedence over earlier items (that is, last write wins). - // - https://docs.openshift.com/container-platform/latest/nodes/containers/nodes-containers-projected-volumes.html - // - https://kubernetes.io/docs/concepts/storage/volumes/#projected - volume.Projected.Sources = append( - sshConfigVP, - v1.VolumeProjection{ - ConfigMap: &v1.ConfigMapProjection{ - LocalObjectReference: v1.LocalObjectReference{ - Name: sshConfigMap.Name, - }, - Items: []v1.KeyToPath{{ - Key: sshConfig, - Path: "./" + sshConfig, - }, { - Key: sshdConfig, - Path: "./" + sshdConfig, - }}, - }, - Secret: &v1.SecretProjection{ - LocalObjectReference: v1.LocalObjectReference{ - Name: sshConfigMap.Name, - }, - Items: []v1.KeyToPath{{ - Key: privateKey, - Path: "./" + privateKey, - }, { - Key: publicKey, - Path: "./" + publicKey, - }}, - }, - }, - ) - - mount := v1.VolumeMount{ - Name: volume.Name, - MountPath: sshConfigPath, - ReadOnly: true, - } - - pod.Volumes = mergeVolumes(pod.Volumes, volume) - - container := findOrAppendContainer(&pod.Containers, containerName) - - container.VolumeMounts = mergeVolumeMounts(container.VolumeMounts, mount) -} - -// getSSHDConfigString returns a string consisting of the basic required configuration -// for the SSHD service -func getSSHDConfigString() string { - - // please note that the ForceCommand setting ensures nss_wrapper env vars are set when - // executing commands as required for OpenShift compatibility: - // https://access.redhat.com/articles/4859371 - configString := `AuthorizedKeysFile /etc/ssh/id_ecdsa.pub -ForceCommand NSS_WRAPPER_SUBDIR=postgres . /opt/crunchy/bin/nss_wrapper_env.sh && $SSH_ORIGINAL_COMMAND -HostKey /etc/ssh/id_ecdsa -PasswordAuthentication no -PermitRootLogin no -PidFile /tmp/sshd.pid -Port 2022 -PubkeyAuthentication yes -StrictModes no -` - return configString -} - -// getSSHDConfigString returns a string consisting of the basic required configuration -// for the SSH client -func getSSHConfigString() string { - - configString := `Host * -StrictHostKeyChecking yes -IdentityFile /etc/ssh/id_ecdsa -Port 2022 -User postgres -` - return configString -} - -// getKeys returns public/private byte slices of a ECDSA keypair using a P-521 curve -// formatted to be readable by OpenSSH -func getKeys() (sshKey, error) { - var keys sshKey - - ecdsaPriv, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader) - if err != nil { - return sshKey{}, err - } - - pkiPriv := pki.NewPrivateKey(ecdsaPriv) - - keys.Private, err = pkiPriv.MarshalText() - if err != nil { - return sshKey{}, err - } - keys.Public, err = getECDSAPublicKey(&pkiPriv.PrivateKey.PublicKey) - if err != nil { - return sshKey{}, err - } - - return keys, nil - -} - -// getECDSAPublicKey returns the ECDSA public key -// serialized for inclusion in an OpenSSH authorized_keys file -func getECDSAPublicKey(key *ecdsa.PublicKey) ([]byte, error) { - pubKey, err := ssh.NewPublicKey(key) - if err != nil { - return nil, err - } - - return ssh.MarshalAuthorizedKey(pubKey), nil -} diff --git a/internal/pgbackrest/ssh_config_test.go b/internal/pgbackrest/ssh_config_test.go deleted file mode 100644 index 4d726d7cd4..0000000000 --- a/internal/pgbackrest/ssh_config_test.go +++ /dev/null @@ -1,242 +0,0 @@ -// +build envtest - -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package pgbackrest - -import ( - "context" - "crypto/x509" - "encoding/pem" - "fmt" - - "reflect" - "strings" - "testing" - - "golang.org/x/crypto/ssh" - "gotest.tools/v3/assert" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/rand" - "sigs.k8s.io/controller-runtime/pkg/client" - - "github.com/crunchydata/postgres-operator/internal/naming" - "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" -) - -// TestKeys validates public/private byte slices returned by -// getKeys() are of the expected type and use the expected curve -func TestKeys(t *testing.T) { - - testKeys, err := getKeys() - assert.NilError(t, err) - - t.Run("test private key", func(t *testing.T) { - block, _ := pem.Decode(testKeys.Private) - - if assert.Check(t, block != nil) { - private, err := x509.ParseECPrivateKey(block.Bytes) - - assert.NilError(t, err) - assert.Equal(t, fmt.Sprintf("%T", private), "*ecdsa.PrivateKey") - assert.Equal(t, private.Params().BitSize, 521) - } - }) - - t.Run("test public key", func(t *testing.T) { - pub, _, _, _, err := ssh.ParseAuthorizedKey(testKeys.Public) - - assert.NilError(t, err) - assert.Equal(t, pub.Type(), "ecdsa-sha2-nistp521") - assert.Equal(t, fmt.Sprintf("%T", pub), "*ssh.ecdsaPublicKey") - }) - -} - -// TestSSHDConfiguration verifies the default SSH/SSHD configurations -// are created. These include the secret containing the public and private -// keys, the configmap containing the SSH client config file and SSHD -// sshd_config file, their respective contents, the project volume and -// the volume mount -func TestSSHDConfiguration(t *testing.T) { - - // set cluster name and namespace values in postgrescluster spec - postgresCluster := &v1beta1.PostgresCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: testclustername, - Namespace: "postgres-operator-test-" + rand.String(6), - }, - } - - // the initially created configmap - var sshCMInitial v1.ConfigMap - // the returned configmap - var sshCMReturned v1.ConfigMap - // pod spec for testing projected volumes and volume mounts - pod := &v1.PodSpec{} - // initially created secret - var secretInitial v1.Secret - // returned secret - var secretReturned v1.Secret - - t.Run("ssh configmap and secret checks", func(t *testing.T) { - - // setup the test environment and ensure a clean teardown - testEnv, testClient := setupTestEnv(t) - - // define the cleanup steps to run once the tests complete - t.Cleanup(func() { - teardownTestEnv(t, testEnv) - }) - - ns := &v1.Namespace{} - ns.Name = naming.PGBackRestConfig(postgresCluster).Namespace - assert.NilError(t, testClient.Create(context.Background(), ns)) - t.Cleanup(func() { assert.Check(t, testClient.Delete(context.Background(), ns)) }) - - t.Run("create ssh configmap struct", func(t *testing.T) { - sshCMInitial = CreateSSHConfigMapIntent(postgresCluster) - - // check that there is configmap data - assert.Assert(t, sshCMInitial.Data != nil) - }) - - t.Run("create ssh secret struct", func(t *testing.T) { - - // declare this locally so ':=' operation will not result in a - // locally scoped 'secretInitial' variable - var err error - - secretInitial, err = CreateSSHSecretIntent(postgresCluster, nil, - naming.ClusterPodService(postgresCluster).Name, ns.GetName()) - - assert.NilError(t, err) - - // check that there is configmap data - assert.Assert(t, secretInitial.Data != nil) - }) - - t.Run("create ssh configmap", func(t *testing.T) { - - // create the configmap - err := testClient.Patch(context.Background(), &sshCMInitial, client.Apply, client.ForceOwnership, client.FieldOwner(testFieldOwner)) - - assert.NilError(t, err) - }) - - t.Run("create ssh secret", func(t *testing.T) { - - // create the secret - err := testClient.Patch(context.Background(), &secretInitial, client.Apply, client.ForceOwnership, client.FieldOwner(testFieldOwner)) - - assert.NilError(t, err) - }) - - t.Run("get ssh configmap", func(t *testing.T) { - - objectKey := client.ObjectKey{ - Namespace: naming.PGBackRestSSHConfig(postgresCluster).Namespace, - Name: naming.PGBackRestSSHConfig(postgresCluster).Name, - } - - err := testClient.Get(context.Background(), objectKey, &sshCMReturned) - - assert.NilError(t, err) - }) - - t.Run("get ssh secret", func(t *testing.T) { - - objectKey := client.ObjectKey{ - Namespace: naming.PGBackRestSSHSecret(postgresCluster).Namespace, - Name: naming.PGBackRestSSHSecret(postgresCluster).Name, - } - - err := testClient.Get(context.Background(), objectKey, &secretReturned) - - assert.NilError(t, err) - }) - - // finally, verify initial and returned match - assert.Assert(t, reflect.DeepEqual(sshCMInitial.Data, sshCMReturned.Data)) - assert.Assert(t, reflect.DeepEqual(secretInitial.Data, secretReturned.Data)) - - }) - - t.Run("check ssh config", func(t *testing.T) { - - assert.Equal(t, getCMData(sshCMReturned, sshConfig), - `Host * -StrictHostKeyChecking yes -IdentityFile /etc/ssh/id_ecdsa -Port 2022 -User postgres -`) - }) - - t.Run("check sshd config", func(t *testing.T) { - - assert.Equal(t, getCMData(sshCMReturned, sshdConfig), - `AuthorizedKeysFile /etc/ssh/id_ecdsa.pub -ForceCommand NSS_WRAPPER_SUBDIR=postgres . /opt/crunchy/bin/nss_wrapper_env.sh && $SSH_ORIGINAL_COMMAND -HostKey /etc/ssh/id_ecdsa -PasswordAuthentication no -PermitRootLogin no -PidFile /tmp/sshd.pid -Port 2022 -PubkeyAuthentication yes -StrictModes no -`) - }) - - t.Run("check sshd volume", func(t *testing.T) { - - SSHConfigVolumeAndMount(&sshCMReturned, &secretReturned, pod, "database") - - assert.Assert(t, simpleMarshalContains(&pod.Volumes, strings.TrimSpace(` - - name: sshd - projected: - sources: - - configMap: - items: - - key: ssh_config - path: ./ssh_config - - key: sshd_config - path: ./sshd_config - name: `+postgresCluster.GetName()+`-ssh-config - secret: - items: - - key: id_ecdsa - path: ./id_ecdsa - - key: id_ecdsa.pub - path: ./id_ecdsa.pub - name: `+postgresCluster.GetName()+`-ssh-config -`)+"\n")) - }) - - t.Run("check sshd volume mount", func(t *testing.T) { - - SSHConfigVolumeAndMount(&sshCMReturned, &secretReturned, pod, "database") - - container := findOrAppendContainer(&pod.Containers, "database") - - assert.Assert(t, simpleMarshalContains(container.VolumeMounts, strings.TrimSpace(` - - mountPath: /etc/ssh - name: sshd - readOnly: true - `)+"\n")) - }) -} diff --git a/internal/pgbackrest/tls-server.md b/internal/pgbackrest/tls-server.md new file mode 100644 index 0000000000..b572cc1ea4 --- /dev/null +++ b/internal/pgbackrest/tls-server.md @@ -0,0 +1,97 @@ + + +# pgBackRest TLS Server + +A handful of pgBackRest features require connectivity between `pgbackrest` processes +on different pods: + +- [dedicated repository host](https://pgbackrest.org/user-guide.html#repo-host) +- [backup from standby](https://pgbackrest.org/user-guide.html#standby-backup) + +When a PostgresCluster is configured to store backups on a PVC, the dedicated +repository host is used to make that PVC available to all PostgreSQL instances +in the cluster. Regardless of whether the repo host has a defined PVC, it +functions as the server for the pgBackRest clients that run on the Instances. + +The repository host runs a `pgbackrest` server that is secured through TLS and +[certificates][]. When performing backups, it connects to `pgbackrest` servers +running on PostgreSQL instances (as sidecars). Restore jobs connect to the +repository host to fetch files. PostgreSQL calls `pgbackrest` which connects +to the repository host to [send and receive WAL files][archiving]. + +[archiving]: https://www.postgresql.org/docs/current/continuous-archiving.html +[certificates]: certificates.md + + +The `pgbackrest` command acts as a TLS client and connects to a pgBackRest TLS +server when `pg-host-type=tls` and/or `repo-host-type=tls`. The default for these is `ssh`: + +- https://github.com/pgbackrest/pgbackrest/blob/release/2.38/src/config/parse.auto.c#L3771 +- https://github.com/pgbackrest/pgbackrest/blob/release/2.38/src/config/parse.auto.c#L6137 + + +The pgBackRest TLS server is configured through the `tls-server-*` [options](config.md). +In pgBackRest 2.38, changing any of these options or changing certificate contents +requires a reload of the server, as shown in the "Setup TLS Server" section of the +documentation, with the command configured as + +``` +ExecReload=kill -HUP $MAINPID +``` + +- https://pgbackrest.org/user-guide-rhel.html#repo-host/setup-tls + +- `tls-server-address`, `tls-server-port`
+ The network address and port on which to listen. pgBackRest 2.38 listens on + the *first* address returned by `getaddrinfo()`. There is no way to listen on + all interfaces. + + - https://github.com/pgbackrest/pgbackrest/blob/release/2.38/src/common/io/socket/server.c#L172 + - https://github.com/pgbackrest/pgbackrest/blob/release/2.38/src/common/io/socket/common.c#L87 + +- `tls-server-cert-file`, `tls-server-key-file`
+ The [certificate chain][certificates] and private key pair used to encrypt connections. + +- `tls-server-ca-file`
+ The certificate used to verify client [certificates][]. + [Required](https://github.com/pgbackrest/pgbackrest/blob/release/2.38/src/config/parse.auto.c#L8767). + +- `tls-server-auth`
+ A map/hash/dictionary of certificate common names and the stanzas they are authorized + to interact with. + [Required](https://github.com/pgbackrest/pgbackrest/blob/release/2.38/src/config/parse.auto.c#L8751). + + +In pgBackRest 2.38, as mentioned above, sending SIGHUP causes a configuration reload. + +- https://github.com/pgbackrest/pgbackrest/blob/release/2.38/src/command/server/server.c#L178 + +``` +P00 DETAIL: configuration reload begin +P00 INFO: server command begin 2.38... +P00 DETAIL: configuration reload end +``` + +Sending SIGINT to the TLS server causes it to exit with code 63, TermError. + +- https://github.com/pgbackrest/pgbackrest/blob/release/2.38/src/common/exit.c#L73-L75 +- https://github.com/pgbackrest/pgbackrest/blob/release/2.38/src/common/exit.c#L62 +- https://github.com/pgbackrest/pgbackrest/blob/release/2.38/src/common/error.auto.c#L48 + + +``` +P00 INFO: server command end: terminated on signal [SIGINT] +``` + +Sending SIGTERM exits the signal loop and lead to the command termination. + +- https://github.com/pgbackrest/pgbackrest/blob/release/2.38/src/command/server/server.c#L194 + + +``` +P00 INFO: server command end: completed successfully +``` diff --git a/internal/pgbackrest/util.go b/internal/pgbackrest/util.go index 1adbb6bfde..4fc2266c56 100644 --- a/internal/pgbackrest/util.go +++ b/internal/pgbackrest/util.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbackrest @@ -20,18 +9,19 @@ import ( "hash/fnv" "io" - "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" "github.com/pkg/errors" "k8s.io/apimachinery/pkg/util/rand" + + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) // maxPGBackrestRepos is the maximum number of repositories that can be configured according to the // multi-repository solution implemented within pgBackRest const maxPGBackrestRepos = 4 -// DedicatedRepoHostEnabled determines whether not a pgBackRest dedicated repository host is -// enabled according to the provided PostgresCluster -func DedicatedRepoHostEnabled(postgresCluster *v1beta1.PostgresCluster) bool { +// RepoHostVolumeDefined determines whether not at least one pgBackRest dedicated +// repository host volume has been defined in the PostgresCluster manifest. +func RepoHostVolumeDefined(postgresCluster *v1beta1.PostgresCluster) bool { for _, repo := range postgresCluster.Spec.Backups.PGBackRest.Repos { if repo.Volume != nil { return true diff --git a/internal/pgbackrest/util_test.go b/internal/pgbackrest/util_test.go index b4f1754bd7..eb0f4dec29 100644 --- a/internal/pgbackrest/util_test.go +++ b/internal/pgbackrest/util_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbackrest @@ -21,9 +10,10 @@ import ( "strconv" "testing" - "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" "gotest.tools/v3/assert" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) func TestCalculateConfigHashes(t *testing.T) { diff --git a/internal/pgbouncer/assertions_test.go b/internal/pgbouncer/assertions_test.go deleted file mode 100644 index 345d8448ad..0000000000 --- a/internal/pgbouncer/assertions_test.go +++ /dev/null @@ -1,31 +0,0 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package pgbouncer - -import ( - "gotest.tools/v3/assert/cmp" - "sigs.k8s.io/yaml" -) - -func marshalEquals(actual interface{}, expected string) cmp.Comparison { - b, err := yaml.Marshal(actual) - return func() cmp.Result { - if err != nil { - return cmp.ResultFromError(err) - } - return cmp.DeepEqual(string(b), expected)() - } -} diff --git a/internal/pgbouncer/certificates.go b/internal/pgbouncer/certificates.go index 7431f90c8f..31f91c503a 100644 --- a/internal/pgbouncer/certificates.go +++ b/internal/pgbouncer/certificates.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbouncer @@ -20,23 +9,24 @@ import ( ) const ( - certBackendDirectory = configDirectory + "/~postgres-operator-backend" - certFrontendDirectory = configDirectory + "/~postgres-operator-frontend" + tlsAuthoritySecretKey = "ca.crt" + tlsCertificateSecretKey = corev1.TLSCertKey + tlsPrivateKeySecretKey = corev1.TLSPrivateKeyKey - certBackendAuthorityAbsolutePath = certBackendDirectory + "/" + certBackendAuthorityProjectionPath - certBackendAuthorityProjectionPath = "ca.crt" + certBackendAuthorityAbsolutePath = configDirectory + "/" + certBackendAuthorityProjectionPath + certBackendAuthorityProjectionPath = "~postgres-operator/backend-ca.crt" - certFrontendAuthorityAbsolutePath = certFrontendDirectory + "/" + certFrontendAuthorityProjectionPath - certFrontendPrivateKeyAbsolutePath = certFrontendDirectory + "/" + certFrontendPrivateKeyProjectionPath - certFrontendAbsolutePath = certFrontendDirectory + "/" + certFrontendProjectionPath + certFrontendAuthorityAbsolutePath = configDirectory + "/" + certFrontendAuthorityProjectionPath + certFrontendPrivateKeyAbsolutePath = configDirectory + "/" + certFrontendPrivateKeyProjectionPath + certFrontendAbsolutePath = configDirectory + "/" + certFrontendProjectionPath - certFrontendAuthorityProjectionPath = "ca.crt" - certFrontendPrivateKeyProjectionPath = "tls.key" - certFrontendProjectionPath = "tls.crt" + certFrontendAuthorityProjectionPath = "~postgres-operator/frontend-ca.crt" + certFrontendPrivateKeyProjectionPath = "~postgres-operator/frontend-tls.key" + certFrontendProjectionPath = "~postgres-operator/frontend-tls.crt" - certFrontendAuthoritySecretKey = "pgbouncer-frontend.ca-roots" // #nosec G101 this is a name, not a credential - certFrontendPrivateKeySecretKey = "pgbouncer-frontend.key" // #nosec G101 this is a name, not a credential - certFrontendSecretKey = "pgbouncer-frontend.crt" // #nosec G101 this is a name, not a credential + certFrontendAuthoritySecretKey = "pgbouncer-frontend.ca-roots" + certFrontendPrivateKeySecretKey = "pgbouncer-frontend.key" + certFrontendSecretKey = "pgbouncer-frontend.crt" ) // backendAuthority creates a volume projection of the PostgreSQL server @@ -46,11 +36,20 @@ func backendAuthority(postgres *corev1.SecretProjection) corev1.VolumeProjection result := postgres.DeepCopy() for i := range result.Items { - if result.Items[i].Path == certBackendAuthorityProjectionPath { + // The PostgreSQL server projection expects Path to match typical Keys. + if result.Items[i].Path == tlsAuthoritySecretKey { + result.Items[i].Path = certBackendAuthorityProjectionPath items = append(items, result.Items[i]) } } + if len(items) == 0 { + items = []corev1.KeyToPath{{ + Key: tlsAuthoritySecretKey, + Path: certBackendAuthorityProjectionPath, + }} + } + result.Items = items return corev1.VolumeProjection{Secret: result} } @@ -59,10 +58,8 @@ func backendAuthority(postgres *corev1.SecretProjection) corev1.VolumeProjection func frontendCertificate( custom *corev1.SecretProjection, secret *corev1.Secret, ) corev1.VolumeProjection { - result := custom - - if result == nil { - result = &corev1.SecretProjection{ + if custom == nil { + return corev1.VolumeProjection{Secret: &corev1.SecretProjection{ LocalObjectReference: corev1.LocalObjectReference{ Name: secret.Name, }, @@ -80,8 +77,53 @@ func frontendCertificate( Path: certFrontendProjectionPath, }, }, + }} + } + + // The custom projection may have more or less than the three items we need + // to mount. Search for items that have the Path we expect and mount them at + // the path we need. When no items are specified, the Key serves as the Path. + + // TODO(cbandy): A more structured field or validating webhook would ensure + // that the necessary values are specified. + + var items []corev1.KeyToPath + result := custom.DeepCopy() + + for i := range result.Items { + // The custom projection expects Path to match typical Keys. + switch result.Items[i].Path { + case tlsAuthoritySecretKey: + result.Items[i].Path = certFrontendAuthorityProjectionPath + items = append(items, result.Items[i]) + + case tlsCertificateSecretKey: + result.Items[i].Path = certFrontendProjectionPath + items = append(items, result.Items[i]) + + case tlsPrivateKeySecretKey: + result.Items[i].Path = certFrontendPrivateKeyProjectionPath + items = append(items, result.Items[i]) + } + } + + if len(items) == 0 { + items = []corev1.KeyToPath{ + { + Key: tlsAuthoritySecretKey, + Path: certFrontendAuthorityProjectionPath, + }, + { + Key: tlsPrivateKeySecretKey, + Path: certFrontendPrivateKeyProjectionPath, + }, + { + Key: tlsCertificateSecretKey, + Path: certFrontendProjectionPath, + }, } } + result.Items = items return corev1.VolumeProjection{Secret: result} } diff --git a/internal/pgbouncer/certificates_test.go b/internal/pgbouncer/certificates_test.go index cb1bbb62a1..5955c3de9c 100644 --- a/internal/pgbouncer/certificates_test.go +++ b/internal/pgbouncer/certificates_test.go @@ -1,44 +1,43 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbouncer import ( - "strings" "testing" "gotest.tools/v3/assert" corev1 "k8s.io/api/core/v1" + + "github.com/crunchydata/postgres-operator/internal/testing/cmp" ) func TestBackendAuthority(t *testing.T) { + // No items; assume Key matches Path. projection := &corev1.SecretProjection{ LocalObjectReference: corev1.LocalObjectReference{Name: "some-name"}, - Items: []corev1.KeyToPath{ - {Key: "some-crt-key", Path: "tls.crt"}, - {Key: "some-ca-key", Path: "ca.crt"}, - }, } + assert.Assert(t, cmp.MarshalMatches(backendAuthority(projection), ` +secret: + items: + - key: ca.crt + path: ~postgres-operator/backend-ca.crt + name: some-name + `)) - assert.Assert(t, marshalEquals(backendAuthority(projection), strings.Trim(` + // Some items; use only the CA Path. + projection.Items = []corev1.KeyToPath{ + {Key: "some-crt-key", Path: "tls.crt"}, + {Key: "some-ca-key", Path: "ca.crt"}, + } + assert.Assert(t, cmp.MarshalMatches(backendAuthority(projection), ` secret: items: - key: some-ca-key - path: ca.crt + path: ~postgres-operator/backend-ca.crt name: some-name - `, "\t\n")+"\n")) + `)) } func TestFrontendCertificate(t *testing.T) { @@ -46,32 +45,53 @@ func TestFrontendCertificate(t *testing.T) { secret.Name = "op-secret" t.Run("Generated", func(t *testing.T) { - assert.Assert(t, marshalEquals(frontendCertificate(nil, secret), strings.Trim(` + assert.Assert(t, cmp.MarshalMatches(frontendCertificate(nil, secret), ` secret: items: - key: pgbouncer-frontend.ca-roots - path: ca.crt + path: ~postgres-operator/frontend-ca.crt - key: pgbouncer-frontend.key - path: tls.key + path: ~postgres-operator/frontend-tls.key - key: pgbouncer-frontend.crt - path: tls.crt + path: ~postgres-operator/frontend-tls.crt name: op-secret - `, "\t\n")+"\n")) + `)) }) t.Run("Custom", func(t *testing.T) { custom := new(corev1.SecretProjection) custom.Name = "some-other" + + // No items; assume Key matches Path. + assert.Assert(t, cmp.MarshalMatches(frontendCertificate(custom, secret), ` +secret: + items: + - key: ca.crt + path: ~postgres-operator/frontend-ca.crt + - key: tls.key + path: ~postgres-operator/frontend-tls.key + - key: tls.crt + path: ~postgres-operator/frontend-tls.crt + name: some-other + `)) + + // Some items; use only the TLS Paths. custom.Items = []corev1.KeyToPath{ {Key: "any", Path: "thing"}, + {Key: "some-ca-key", Path: "ca.crt"}, + {Key: "some-cert-key", Path: "tls.crt"}, + {Key: "some-key-key", Path: "tls.key"}, } - - assert.Assert(t, marshalEquals(frontendCertificate(custom, secret), strings.Trim(` + assert.Assert(t, cmp.MarshalMatches(frontendCertificate(custom, secret), ` secret: items: - - key: any - path: thing + - key: some-ca-key + path: ~postgres-operator/frontend-ca.crt + - key: some-cert-key + path: ~postgres-operator/frontend-tls.crt + - key: some-key-key + path: ~postgres-operator/frontend-tls.key name: some-other - `, "\t\n")+"\n")) + `)) }) } diff --git a/internal/pgbouncer/config.go b/internal/pgbouncer/config.go index 7eac3f9a48..a203144817 100644 --- a/internal/pgbouncer/config.go +++ b/internal/pgbouncer/config.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbouncer @@ -166,26 +155,14 @@ func clusterINI(cluster *v1beta1.PostgresCluster) string { users := iniValueSet(cluster.Spec.Proxy.PGBouncer.Config.Users) - // First, include any custom configuration file with verbosity turned up. - // PgBouncer will log a DEBUG message before it processes each line of that - // file, providing context when an "%include" is wrong. - // - https://github.com/pgbouncer/pgbouncer/issues/584 + // Include any custom configuration file, then apply global settings, then + // pool definitions. result := iniGeneratedWarning + "\n[pgbouncer]" + - "\nverbose = 1" + - "\n%include " + emptyFileAbsolutePath - - // Next, apply global settings with verbosity restored. - verbose := global["verbose"] - delete(global, "verbose") - if len(verbose) == 0 { - verbose = "0" - } - result += "\n\n[pgbouncer]\n" + - iniValueSet{"verbose": verbose}.String() + "\n" + global.String() + "\n%include " + emptyFileAbsolutePath + + "\n\n[pgbouncer]\n" + global.String() + + "\n[databases]\n" + databases.String() - // Finally, apply pool definitions. - result += "\n[databases]\n" + databases.String() if len(users) > 0 { result += "\n[users]\n" + users.String() } @@ -255,16 +232,19 @@ func reloadCommand(name string) []string { // Use a Bash loop to periodically check the mtime of the mounted // configuration volume. When it changes, signal PgBouncer and print the // observed timestamp. - // NOTE(cbandy): Using `sleep & wait` below used over 75Mi of memory on - // OpenShift 4.7.2. + // + // Coreutils `sleep` uses a lot of memory, so the following opens a file + // descriptor and uses the timeout of the builtin `read` to wait. That same + // descriptor gets closed and reopened to use the builtin `[ -nt` to check + // mtimes. + // - https://unix.stackexchange.com/a/407383 const script = ` -declare -r directory="${directory:-$1}" -while sleep 5s; do - mounted=$(stat --format=%y "${directory}") - if [ "${mounted}" != "${loaded-}" ] && pkill --signal HUP --exact pgbouncer +exec {fd}<> <(:||:) +while read -r -t 5 -u "${fd}" ||:; do + if [[ "${directory}" -nt "/proc/self/fd/${fd}" ]] && pkill -HUP --exact pgbouncer then - loaded="${mounted}" - echo Loaded configuration dated "${loaded}" + exec {fd}>&- && exec {fd}<> <(:||:) + stat --format='Loaded configuration dated %y' "${directory}" fi done ` diff --git a/internal/pgbouncer/config.md b/internal/pgbouncer/config.md index b3f09fd246..abfec12518 100644 --- a/internal/pgbouncer/config.md +++ b/internal/pgbouncer/config.md @@ -1,16 +1,7 @@ PgBouncer is configured through INI files. It will reload these files when diff --git a/internal/pgbouncer/config_test.go b/internal/pgbouncer/config_test.go index 2e582dc21f..7a96da571c 100644 --- a/internal/pgbouncer/config_test.go +++ b/internal/pgbouncer/config_test.go @@ -1,22 +1,11 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbouncer import ( - "io/ioutil" + "os" "os/exec" "path/filepath" "strings" @@ -27,6 +16,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/yaml" + "github.com/crunchydata/postgres-operator/internal/testing/cmp" + "github.com/crunchydata/postgres-operator/internal/testing/require" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -68,24 +59,21 @@ func TestClusterINI(t *testing.T) { # Your changes will not be saved. [pgbouncer] -verbose = 1 %include /etc/pgbouncer/pgbouncer.ini [pgbouncer] -verbose = 0 - auth_file = /etc/pgbouncer/~postgres-operator/users.txt auth_query = SELECT username, password from pgbouncer.get_auth($1) auth_user = _crunchypgbouncer -client_tls_ca_file = /etc/pgbouncer/~postgres-operator-frontend/ca.crt -client_tls_cert_file = /etc/pgbouncer/~postgres-operator-frontend/tls.crt -client_tls_key_file = /etc/pgbouncer/~postgres-operator-frontend/tls.key +client_tls_ca_file = /etc/pgbouncer/~postgres-operator/frontend-ca.crt +client_tls_cert_file = /etc/pgbouncer/~postgres-operator/frontend-tls.crt +client_tls_key_file = /etc/pgbouncer/~postgres-operator/frontend-tls.key client_tls_sslmode = require conffile = /etc/pgbouncer/~postgres-operator.ini ignore_startup_parameters = extra_float_digits listen_addr = * listen_port = 8888 -server_tls_ca_file = /etc/pgbouncer/~postgres-operator-backend/ca.crt +server_tls_ca_file = /etc/pgbouncer/~postgres-operator/backend-ca.crt server_tls_sslmode = verify-full unix_socket_dir = @@ -111,26 +99,24 @@ unix_socket_dir = # Your changes will not be saved. [pgbouncer] -verbose = 1 %include /etc/pgbouncer/pgbouncer.ini [pgbouncer] -verbose = whomp - auth_file = /etc/pgbouncer/~postgres-operator/users.txt auth_query = SELECT username, password from pgbouncer.get_auth($1) auth_user = _crunchypgbouncer -client_tls_ca_file = /etc/pgbouncer/~postgres-operator-frontend/ca.crt -client_tls_cert_file = /etc/pgbouncer/~postgres-operator-frontend/tls.crt -client_tls_key_file = /etc/pgbouncer/~postgres-operator-frontend/tls.key +client_tls_ca_file = /etc/pgbouncer/~postgres-operator/frontend-ca.crt +client_tls_cert_file = /etc/pgbouncer/~postgres-operator/frontend-tls.crt +client_tls_key_file = /etc/pgbouncer/~postgres-operator/frontend-tls.key client_tls_sslmode = require conffile = /etc/pgbouncer/~postgres-operator.ini ignore_startup_parameters = custom listen_addr = * listen_port = 8888 -server_tls_ca_file = /etc/pgbouncer/~postgres-operator-backend/ca.crt +server_tls_ca_file = /etc/pgbouncer/~postgres-operator/backend-ca.crt server_tls_sslmode = verify-full unix_socket_dir = +verbose = whomp [databases] appdb = conn=str @@ -154,7 +140,7 @@ func TestPodConfigFiles(t *testing.T) { t.Run("Default", func(t *testing.T) { projections := podConfigFiles(config, configmap, secret) - assert.Assert(t, marshalEquals(projections, strings.Trim(` + assert.Assert(t, cmp.MarshalMatches(projections, ` - configMap: items: - key: pgbouncer-empty @@ -170,7 +156,7 @@ func TestPodConfigFiles(t *testing.T) { - key: pgbouncer-users.txt path: ~postgres-operator/users.txt name: some-shh - `, "\t\n")+"\n")) + `)) }) t.Run("CustomFiles", func(t *testing.T) { @@ -187,7 +173,7 @@ func TestPodConfigFiles(t *testing.T) { } projections := podConfigFiles(config, configmap, secret) - assert.Assert(t, marshalEquals(projections, strings.Trim(` + assert.Assert(t, cmp.MarshalMatches(projections, ` - configMap: items: - key: pgbouncer-empty @@ -210,20 +196,12 @@ func TestPodConfigFiles(t *testing.T) { - key: pgbouncer-users.txt path: ~postgres-operator/users.txt name: some-shh - `, "\t\n")+"\n")) + `)) }) } func TestReloadCommand(t *testing.T) { - shellcheck, err := exec.LookPath("shellcheck") - if err != nil { - t.Skip(`requires "shellcheck" executable`) - } else { - output, err := exec.Command(shellcheck, "--version").CombinedOutput() - assert.NilError(t, err) - t.Logf("using %q:\n%s", shellcheck, output) - } - + shellcheck := require.ShellCheck(t) command := reloadCommand("some-name") // Expect a bash command with an inline script. @@ -233,7 +211,7 @@ func TestReloadCommand(t *testing.T) { // Write out that inline script. dir := t.TempDir() file := filepath.Join(dir, "script.bash") - assert.NilError(t, ioutil.WriteFile(file, []byte(command[3]), 0o600)) + assert.NilError(t, os.WriteFile(file, []byte(command[3]), 0o600)) // Expect shellcheck to be happy. cmd := exec.Command(shellcheck, "--enable=all", file) diff --git a/internal/pgbouncer/postgres.go b/internal/pgbouncer/postgres.go index d17a45cc1d..cbc2e29916 100644 --- a/internal/pgbouncer/postgres.go +++ b/internal/pgbouncer/postgres.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbouncer @@ -35,16 +24,6 @@ const ( // - https://github.com/pgbouncer/pgbouncer/issues/568 // - https://github.com/pgbouncer/pgbouncer/issues/302#issuecomment-815097248 postgresqlUser = "_crunchypgbouncer" - - // sqlCurrentAndFutureDatabases returns all the database names where - // PgBouncer should be enabled or disabled. It includes the "template1" - // database so that PgBouncer is automatically enabled in future databases. - // The "template0" database is explicitly excluded to ensure it is never - // manipulated. - // - https://www.postgresql.org/docs/current/managing-databases.html - sqlCurrentAndFutureDatabases = "" + - `SELECT datname FROM pg_catalog.pg_database` + - ` WHERE datallowconn AND datname NOT IN ('template0')` ) // sqlAuthenticationQuery returns the SECURITY DEFINER function that allows @@ -83,8 +62,7 @@ func DisableInPostgreSQL(ctx context.Context, exec postgres.Executor) error { // First, remove PgBouncer objects from all databases and database templates. // The PgBouncer user is removed later. - stdout, stderr, err := exec.ExecInDatabasesFromQuery(ctx, - sqlCurrentAndFutureDatabases, + stdout, stderr, err := exec.ExecInAllDatabases(ctx, strings.Join([]string{ // Quiet NOTICE messages from IF EXISTS statements. // - https://www.postgresql.org/docs/current/runtime-config-client.html @@ -146,8 +124,7 @@ func EnableInPostgreSQL( ) error { log := logging.FromContext(ctx) - stdout, stderr, err := exec.ExecInDatabasesFromQuery(ctx, - sqlCurrentAndFutureDatabases, + stdout, stderr, err := exec.ExecInAllDatabases(ctx, strings.Join([]string{ // Quiet NOTICE messages from IF NOT EXISTS statements. // - https://www.postgresql.org/docs/current/runtime-config-client.html @@ -226,7 +203,7 @@ func generatePassword() (plaintext, verifier string, err error) { // - https://www.pgbouncer.org/config.html#authentication-file-format // - https://github.com/pgbouncer/pgbouncer/issues/508#issuecomment-713339834 - plaintext, err = util.GeneratePassword(32) + plaintext, err = util.GenerateASCIIPassword(32) if err == nil { verifier, err = password.NewSCRAMPassword(plaintext).Build() } diff --git a/internal/pgbouncer/postgres_test.go b/internal/pgbouncer/postgres_test.go index 043f7fa582..f2ce419753 100644 --- a/internal/pgbouncer/postgres_test.go +++ b/internal/pgbouncer/postgres_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbouncer @@ -19,7 +8,6 @@ import ( "context" "errors" "io" - "io/ioutil" "strings" "testing" @@ -53,11 +41,11 @@ func TestDisableInPostgreSQL(t *testing.T) { ) error { assert.Assert(t, stdout != nil, "should capture stdout") assert.Assert(t, stderr != nil, "should capture stderr") - gomega.NewWithT(t).Expect(command).To(gomega.ContainElement( - `SELECT datname FROM pg_catalog.pg_database WHERE datallowconn AND datname NOT IN ('template0')`, + assert.Assert(t, strings.Contains(strings.Join(command, "\n"), + `SELECT datname FROM pg_catalog.pg_database`, ), "expected all databases and templates") - b, err := ioutil.ReadAll(stdin) + b, err := io.ReadAll(stdin) assert.NilError(t, err) assert.Equal(t, string(b), strings.TrimSpace(` SET client_min_messages = WARNING; @@ -100,7 +88,7 @@ COMMIT;`)) `SELECT pg_catalog.current_database()`, ), "expected the default database") - b, err := ioutil.ReadAll(stdin) + b, err := io.ReadAll(stdin) assert.NilError(t, err) assert.Equal(t, string(b), `SET client_min_messages = WARNING; DROP ROLE IF EXISTS :"username";`) gomega.NewWithT(t).Expect(command).To(gomega.ContainElements( @@ -139,11 +127,11 @@ func TestEnableInPostgreSQL(t *testing.T) { ) error { assert.Assert(t, stdout != nil, "should capture stdout") assert.Assert(t, stderr != nil, "should capture stderr") - gomega.NewWithT(t).Expect(command).To(gomega.ContainElement( - `SELECT datname FROM pg_catalog.pg_database WHERE datallowconn AND datname NOT IN ('template0')`, + assert.Assert(t, strings.Contains(strings.Join(command, "\n"), + `SELECT datname FROM pg_catalog.pg_database`, ), "expected all databases and templates") - b, err := ioutil.ReadAll(stdin) + b, err := io.ReadAll(stdin) assert.NilError(t, err) assert.Equal(t, string(b), strings.TrimSpace(` SET client_min_messages = WARNING; diff --git a/internal/pgbouncer/reconcile.go b/internal/pgbouncer/reconcile.go index c351f8fcbb..999d6524a5 100644 --- a/internal/pgbouncer/reconcile.go +++ b/internal/pgbouncer/reconcile.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbouncer @@ -23,6 +12,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" "github.com/crunchydata/postgres-operator/internal/config" + "github.com/crunchydata/postgres-operator/internal/feature" "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/internal/pki" @@ -40,7 +30,7 @@ func ConfigMap( return } - initialize.StringMap(&outConfigMap.Data) + initialize.Map(&outConfigMap.Data) outConfigMap.Data[emptyConfigMapKey] = "" outConfigMap.Data[iniFileConfigMapKey] = clusterINI(inCluster) @@ -60,7 +50,7 @@ func Secret(ctx context.Context, } var err error - initialize.ByteMap(&outSecret.Data) + initialize.Map(&outSecret.Data) // Use the existing password and verifier. Generate both when either is missing. // NOTE(cbandy): We don't have a function to compare a plaintext password @@ -82,21 +72,19 @@ func Secret(ctx context.Context, } if inCluster.Spec.Proxy.PGBouncer.CustomTLSSecret == nil { - leaf := pki.NewLeafCertificate("", nil, nil) - leaf.DNSNames = naming.ServiceDNSNames(ctx, inService) - leaf.CommonName = leaf.DNSNames[0] // FQDN + leaf := &pki.LeafCertificate{} + dnsNames := naming.ServiceDNSNames(ctx, inService) + dnsFQDN := dnsNames[0] if err == nil { - var parse error - if data, ok := inSecret.Data[certFrontendSecretKey]; parse == nil && ok { - leaf.Certificate, parse = pki.ParseCertificate(data) - } - if data, ok := inSecret.Data[certFrontendPrivateKeySecretKey]; parse == nil && ok { - leaf.PrivateKey, parse = pki.ParsePrivateKey(data) - } - if parse != nil || pki.LeafCertIsBad(ctx, leaf, inRoot, inCluster.Namespace) { - err = errors.WithStack(leaf.Generate(inRoot)) - } + // Unmarshal and validate the stored leaf. These first errors can + // be ignored because they result in an invalid leaf which is then + // correctly regenerated. + _ = leaf.Certificate.UnmarshalText(inSecret.Data[certFrontendSecretKey]) + _ = leaf.PrivateKey.UnmarshalText(inSecret.Data[certFrontendPrivateKeySecretKey]) + + leaf, err = inRoot.RegenerateLeafWhenNecessary(leaf, dnsFQDN, dnsNames) + err = errors.WithStack(err) } if err == nil { @@ -115,6 +103,7 @@ func Secret(ctx context.Context, // Pod populates a PodSpec with the container and volumes needed to run PgBouncer. func Pod( + ctx context.Context, inCluster *v1beta1.PostgresCluster, inConfigMap *corev1.ConfigMap, inPostgreSQLCertificate *corev1.SecretProjection, @@ -126,34 +115,25 @@ func Pod( return } - backend := corev1.Volume{Name: "pgbouncer-backend-tls"} - backend.Projected = &corev1.ProjectedVolumeSource{ - Sources: []corev1.VolumeProjection{ - backendAuthority(inPostgreSQLCertificate), - }, - } - - frontend := corev1.Volume{Name: "pgbouncer-frontend-tls"} - frontend.Projected = &corev1.ProjectedVolumeSource{ - Sources: []corev1.VolumeProjection{ - frontendCertificate( - inCluster.Spec.Proxy.PGBouncer.CustomTLSSecret, inSecret), - }, + configVolumeMount := corev1.VolumeMount{ + Name: "pgbouncer-config", MountPath: configDirectory, ReadOnly: true, } - - configVol := corev1.Volume{Name: "pgbouncer-config"} - configVol.Projected = &corev1.ProjectedVolumeSource{ - Sources: podConfigFiles( - inCluster.Spec.Proxy.PGBouncer.Config, inConfigMap, inSecret), + configVolume := corev1.Volume{Name: configVolumeMount.Name} + configVolume.Projected = &corev1.ProjectedVolumeSource{ + Sources: append(append([]corev1.VolumeProjection{}, + podConfigFiles(inCluster.Spec.Proxy.PGBouncer.Config, inConfigMap, inSecret)...), + frontendCertificate(inCluster.Spec.Proxy.PGBouncer.CustomTLSSecret, inSecret), + backendAuthority(inPostgreSQLCertificate), + ), } container := corev1.Container{ Name: naming.ContainerPGBouncer, - Command: []string{"pgbouncer", iniFileAbsolutePath}, - Image: config.PGBouncerContainerImage(inCluster), - Resources: inCluster.Spec.Proxy.PGBouncer.Resources, - + Command: []string{"pgbouncer", iniFileAbsolutePath}, + Image: config.PGBouncerContainerImage(inCluster), + ImagePullPolicy: inCluster.Spec.ImagePullPolicy, + Resources: inCluster.Spec.Proxy.PGBouncer.Resources, SecurityContext: initialize.RestrictedSecurityContext(), Ports: []corev1.ContainerPort{{ @@ -161,24 +141,8 @@ func Pod( ContainerPort: *inCluster.Spec.Proxy.PGBouncer.Port, Protocol: corev1.ProtocolTCP, }}, - } - container.VolumeMounts = []corev1.VolumeMount{ - { - Name: configVol.Name, - MountPath: configDirectory, - ReadOnly: true, - }, - { - Name: backend.Name, - MountPath: certBackendDirectory, - ReadOnly: true, - }, - { - Name: frontend.Name, - MountPath: certFrontendDirectory, - ReadOnly: true, - }, + VolumeMounts: []corev1.VolumeMount{configVolumeMount}, } // TODO container.LivenessProbe? @@ -187,16 +151,12 @@ func Pod( reloader := corev1.Container{ Name: naming.ContainerPGBouncerConfig, - Command: reloadCommand(naming.ContainerPGBouncerConfig), - Image: config.PGBouncerContainerImage(inCluster), - + Command: reloadCommand(naming.ContainerPGBouncerConfig), + Image: container.Image, + ImagePullPolicy: container.ImagePullPolicy, SecurityContext: initialize.RestrictedSecurityContext(), - VolumeMounts: []corev1.VolumeMount{{ - Name: configVol.Name, - MountPath: configDirectory, - ReadOnly: true, - }}, + VolumeMounts: []corev1.VolumeMount{configVolumeMount}, } // Let the PgBouncer container drive the QoS of the pod. Set resources only @@ -210,9 +170,23 @@ func Pod( } } + // When resources are explicitly set, overwrite the above. + if inCluster.Spec.Proxy.PGBouncer.Sidecars != nil && + inCluster.Spec.Proxy.PGBouncer.Sidecars.PGBouncerConfig != nil && + inCluster.Spec.Proxy.PGBouncer.Sidecars.PGBouncerConfig.Resources != nil { + reloader.Resources = *inCluster.Spec.Proxy.PGBouncer.Sidecars.PGBouncerConfig.Resources + } + outPod.Containers = []corev1.Container{container, reloader} - outPod.Volumes = []corev1.Volume{backend, configVol, frontend} + // If the PGBouncerSidecars feature gate is enabled and custom pgBouncer + // sidecars are defined, add the defined container to the Pod. + if feature.Enabled(ctx, feature.PGBouncerSidecars) && + inCluster.Spec.Proxy.PGBouncer.Containers != nil { + outPod.Containers = append(outPod.Containers, inCluster.Spec.Proxy.PGBouncer.Containers...) + } + + outPod.Volumes = []corev1.Volume{configVolume} } // PostgreSQL populates outHBAs with any records needed to run PgBouncer. diff --git a/internal/pgbouncer/reconcile_test.go b/internal/pgbouncer/reconcile_test.go index 9b45bfa6a6..a53de8cf64 100644 --- a/internal/pgbouncer/reconcile_test.go +++ b/internal/pgbouncer/reconcile_test.go @@ -1,32 +1,22 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbouncer import ( "context" - "strings" "testing" - "github.com/google/go-cmp/cmp" + gocmp "github.com/google/go-cmp/cmp" "gotest.tools/v3/assert" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" + "github.com/crunchydata/postgres-operator/internal/feature" "github.com/crunchydata/postgres-operator/internal/pki" "github.com/crunchydata/postgres-operator/internal/postgres" + "github.com/crunchydata/postgres-operator/internal/testing/cmp" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -68,8 +58,8 @@ func TestSecret(t *testing.T) { existing := new(corev1.Secret) intent := new(corev1.Secret) - root := pki.NewRootCertificateAuthority() - assert.NilError(t, root.Generate()) + root, err := pki.NewRootCertificateAuthority() + assert.NilError(t, err) t.Run("Disabled", func(t *testing.T) { // Nothing happens when PgBouncer is disabled. @@ -103,13 +93,16 @@ func TestSecret(t *testing.T) { func TestPod(t *testing.T) { t.Parallel() + features := feature.NewGate() + ctx := feature.NewContext(context.Background(), features) + cluster := new(v1beta1.PostgresCluster) configMap := new(corev1.ConfigMap) primaryCertificate := new(corev1.SecretProjection) secret := new(corev1.Secret) pod := new(corev1.PodSpec) - call := func() { Pod(cluster, configMap, primaryCertificate, secret, pod) } + call := func() { Pod(ctx, cluster, configMap, primaryCertificate, secret, pod) } t.Run("Disabled", func(t *testing.T) { before := pod.DeepCopy() @@ -126,7 +119,7 @@ func TestPod(t *testing.T) { call() - assert.Assert(t, marshalEquals(pod, strings.Trim(` + assert.Assert(t, cmp.MarshalMatches(pod, ` containers: - command: - pgbouncer @@ -139,32 +132,30 @@ containers: resources: {} securityContext: allowPrivilegeEscalation: false + capabilities: + drop: + - ALL privileged: false readOnlyRootFilesystem: true runAsNonRoot: true + seccompProfile: + type: RuntimeDefault volumeMounts: - mountPath: /etc/pgbouncer name: pgbouncer-config readOnly: true - - mountPath: /etc/pgbouncer/~postgres-operator-backend - name: pgbouncer-backend-tls - readOnly: true - - mountPath: /etc/pgbouncer/~postgres-operator-frontend - name: pgbouncer-frontend-tls - readOnly: true - command: - bash - -ceu - -- - |- monitor() { - declare -r directory="${directory:-$1}" - while sleep 5s; do - mounted=$(stat --format=%y "${directory}") - if [ "${mounted}" != "${loaded-}" ] && pkill --signal HUP --exact pgbouncer + exec {fd}<> <(:||:) + while read -r -t 5 -u "${fd}" ||:; do + if [[ "${directory}" -nt "/proc/self/fd/${fd}" ]] && pkill -HUP --exact pgbouncer then - loaded="${mounted}" - echo Loaded configuration dated "${loaded}" + exec {fd}>&- && exec {fd}<> <(:||:) + stat --format='Loaded configuration dated %y' "${directory}" fi done }; export directory="$1"; export -f monitor; exec -a "$0" bash -ceu monitor @@ -174,18 +165,19 @@ containers: resources: {} securityContext: allowPrivilegeEscalation: false + capabilities: + drop: + - ALL privileged: false readOnlyRootFilesystem: true runAsNonRoot: true + seccompProfile: + type: RuntimeDefault volumeMounts: - mountPath: /etc/pgbouncer name: pgbouncer-config readOnly: true volumes: -- name: pgbouncer-backend-tls - projected: - sources: - - secret: {} - name: pgbouncer-config projected: sources: @@ -201,18 +193,19 @@ volumes: items: - key: pgbouncer-users.txt path: ~postgres-operator/users.txt -- name: pgbouncer-frontend-tls - projected: - sources: - secret: items: - key: pgbouncer-frontend.ca-roots - path: ca.crt + path: ~postgres-operator/frontend-ca.crt - key: pgbouncer-frontend.key - path: tls.key + path: ~postgres-operator/frontend-tls.key - key: pgbouncer-frontend.crt - path: tls.crt - `, "\t\n")+"\n")) + path: ~postgres-operator/frontend-tls.crt + - secret: + items: + - key: ca.crt + path: ~postgres-operator/backend-ca.crt + `)) // No change when called again. before := pod.DeepCopy() @@ -221,6 +214,7 @@ volumes: }) t.Run("Customizations", func(t *testing.T) { + cluster.Spec.ImagePullPolicy = corev1.PullAlways cluster.Spec.Proxy.PGBouncer.Image = "image-town" cluster.Spec.Proxy.PGBouncer.Resources.Requests = corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse("100m"), @@ -228,19 +222,20 @@ volumes: cluster.Spec.Proxy.PGBouncer.CustomTLSSecret = &corev1.SecretProjection{ LocalObjectReference: corev1.LocalObjectReference{Name: "tls-name"}, Items: []corev1.KeyToPath{ - {Key: "k1", Path: "p1"}, + {Key: "k1", Path: "tls.crt"}, + {Key: "k2", Path: "tls.key"}, }, } call() - assert.Assert(t, marshalEquals(pod, - strings.Trim(` + assert.Assert(t, cmp.MarshalMatches(pod, ` containers: - command: - pgbouncer - /etc/pgbouncer/~postgres-operator.ini image: image-town + imagePullPolicy: Always name: pgbouncer ports: - containerPort: 5432 @@ -251,38 +246,37 @@ containers: cpu: 100m securityContext: allowPrivilegeEscalation: false + capabilities: + drop: + - ALL privileged: false readOnlyRootFilesystem: true runAsNonRoot: true + seccompProfile: + type: RuntimeDefault volumeMounts: - mountPath: /etc/pgbouncer name: pgbouncer-config readOnly: true - - mountPath: /etc/pgbouncer/~postgres-operator-backend - name: pgbouncer-backend-tls - readOnly: true - - mountPath: /etc/pgbouncer/~postgres-operator-frontend - name: pgbouncer-frontend-tls - readOnly: true - command: - bash - -ceu - -- - |- monitor() { - declare -r directory="${directory:-$1}" - while sleep 5s; do - mounted=$(stat --format=%y "${directory}") - if [ "${mounted}" != "${loaded-}" ] && pkill --signal HUP --exact pgbouncer + exec {fd}<> <(:||:) + while read -r -t 5 -u "${fd}" ||:; do + if [[ "${directory}" -nt "/proc/self/fd/${fd}" ]] && pkill -HUP --exact pgbouncer then - loaded="${mounted}" - echo Loaded configuration dated "${loaded}" + exec {fd}>&- && exec {fd}<> <(:||:) + stat --format='Loaded configuration dated %y' "${directory}" fi done }; export directory="$1"; export -f monitor; exec -a "$0" bash -ceu monitor - pgbouncer-config - /etc/pgbouncer image: image-town + imagePullPolicy: Always name: pgbouncer-config resources: limits: @@ -290,18 +284,19 @@ containers: memory: 16Mi securityContext: allowPrivilegeEscalation: false + capabilities: + drop: + - ALL privileged: false readOnlyRootFilesystem: true runAsNonRoot: true + seccompProfile: + type: RuntimeDefault volumeMounts: - mountPath: /etc/pgbouncer name: pgbouncer-config readOnly: true volumes: -- name: pgbouncer-backend-tls - projected: - sources: - - secret: {} - name: pgbouncer-config projected: sources: @@ -317,15 +312,157 @@ volumes: items: - key: pgbouncer-users.txt path: ~postgres-operator/users.txt -- name: pgbouncer-frontend-tls + - secret: + items: + - key: k1 + path: ~postgres-operator/frontend-tls.crt + - key: k2 + path: ~postgres-operator/frontend-tls.key + name: tls-name + - secret: + items: + - key: ca.crt + path: ~postgres-operator/backend-ca.crt + `)) + }) + + t.Run("Sidecar customization", func(t *testing.T) { + cluster.Spec.Proxy.PGBouncer.Sidecars = &v1beta1.PGBouncerSidecars{ + PGBouncerConfig: &v1beta1.Sidecar{ + Resources: &corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("200m"), + }, + }, + }, + } + + call() + + assert.Assert(t, cmp.MarshalMatches(pod, ` +containers: +- command: + - pgbouncer + - /etc/pgbouncer/~postgres-operator.ini + image: image-town + imagePullPolicy: Always + name: pgbouncer + ports: + - containerPort: 5432 + name: pgbouncer + protocol: TCP + resources: + requests: + cpu: 100m + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /etc/pgbouncer + name: pgbouncer-config + readOnly: true +- command: + - bash + - -ceu + - -- + - |- + monitor() { + exec {fd}<> <(:||:) + while read -r -t 5 -u "${fd}" ||:; do + if [[ "${directory}" -nt "/proc/self/fd/${fd}" ]] && pkill -HUP --exact pgbouncer + then + exec {fd}>&- && exec {fd}<> <(:||:) + stat --format='Loaded configuration dated %y' "${directory}" + fi + done + }; export directory="$1"; export -f monitor; exec -a "$0" bash -ceu monitor + - pgbouncer-config + - /etc/pgbouncer + image: image-town + imagePullPolicy: Always + name: pgbouncer-config + resources: + requests: + cpu: 200m + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /etc/pgbouncer + name: pgbouncer-config + readOnly: true +volumes: +- name: pgbouncer-config projected: sources: + - configMap: + items: + - key: pgbouncer-empty + path: pgbouncer.ini + - configMap: + items: + - key: pgbouncer.ini + path: ~postgres-operator.ini + - secret: + items: + - key: pgbouncer-users.txt + path: ~postgres-operator/users.txt - secret: items: - key: k1 - path: p1 + path: ~postgres-operator/frontend-tls.crt + - key: k2 + path: ~postgres-operator/frontend-tls.key name: tls-name - `, "\t\n")+"\n")) + - secret: + items: + - key: ca.crt + path: ~postgres-operator/backend-ca.crt + `)) + }) + + t.Run("WithCustomSidecarContainer", func(t *testing.T) { + cluster.Spec.Proxy.PGBouncer.Containers = []corev1.Container{ + {Name: "customsidecar1"}, + } + + t.Run("SidecarNotEnabled", func(t *testing.T) { + + call() + assert.Equal(t, len(pod.Containers), 2, "expected 2 containers in Pod, got %d", len(pod.Containers)) + }) + + t.Run("SidecarEnabled", func(t *testing.T) { + assert.NilError(t, features.SetFromMap(map[string]bool{ + feature.PGBouncerSidecars: true, + })) + call() + + assert.Equal(t, len(pod.Containers), 3, "expected 3 containers in Pod, got %d", len(pod.Containers)) + + var found bool + for i := range pod.Containers { + if pod.Containers[i].Name == "customsidecar1" { + found = true + break + } + } + assert.Assert(t, found, "expected custom sidecar 'customsidecar1', but container not found") + }) }) } @@ -354,6 +491,6 @@ func TestPostgreSQL(t *testing.T) { Mandatory: postgresqlHBAs(), }, // postgres.HostBasedAuthentication has unexported fields. Call String() to compare. - cmp.Transformer("", postgres.HostBasedAuthentication.String)) + gocmp.Transformer("", postgres.HostBasedAuthentication.String)) }) } diff --git a/internal/pgmonitor/api.go b/internal/pgmonitor/api.go deleted file mode 100644 index 012053a6bd..0000000000 --- a/internal/pgmonitor/api.go +++ /dev/null @@ -1,58 +0,0 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package pgmonitor - -import ( - "bytes" - "context" - "fmt" - "io" - "strings" - - "github.com/crunchydata/postgres-operator/internal/logging" -) - -type Executor func( - ctx context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, -) error - -// GetExporterSQL takes the PostgreSQL version and returns the corresponding -// setup.sql file that is defined in the exporter container -func (exec Executor) GetExporterSetupSQL(ctx context.Context, version int) (string, string, error) { - log := logging.FromContext(ctx) - - var stdout, stderr bytes.Buffer - var sql string - err := exec(ctx, nil, &stdout, &stderr, - []string{"cat", fmt.Sprintf("/opt/cpm/conf/pg%d/setup.sql", version)}...) - - log.V(1).Info("sql received from exporter", "stdout", stdout.String(), "stderr", stderr.String()) - - if err == nil { - // TODO: Revisit how pgbackrest_info.sh is used with pgMonitor. - // pgMonitor queries expect a path to a script that runs pgBackRest - // info and provides json output. In the queries yaml for pgBackRest - // the default path is `/usr/bin/pgbackrest-info.sh`. We update - // the path to point to the script in our database image. - sql = strings.ReplaceAll(stdout.String(), - "/usr/bin/pgbackrest-info.sh", - "/opt/crunchy/bin/postgres-ha/pgbackrest/pgbackrest_info.sh") - } - - log.V(1).Info("updated pgMonitor default configration", "sql", sql) - - return sql, stderr.String(), err -} diff --git a/internal/pgmonitor/api_test.go b/internal/pgmonitor/api_test.go deleted file mode 100644 index 2ecdd34f4a..0000000000 --- a/internal/pgmonitor/api_test.go +++ /dev/null @@ -1,92 +0,0 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package pgmonitor - -import ( - "context" - "errors" - "fmt" - "io" - "strings" - "testing" - - "gotest.tools/v3/assert" -) - -func TestExecutorGetExporterSetupSQL(t *testing.T) { - t.Run("Arguments", func(t *testing.T) { - version := 12 - called := false - exec := func( - ctx context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, - ) error { - called = true - assert.DeepEqual(t, command, strings.Fields( - fmt.Sprintf("cat /opt/cpm/conf/pg%d/setup.sql", version), - )) - - assert.Assert(t, stdin == nil, "expected no stdin, got %T", stdin) - assert.Assert(t, stderr != nil, "should capture stderr") - assert.Assert(t, stdout != nil, "should capture stdout") - return nil - } - - _, _, _ = Executor(exec).GetExporterSetupSQL(context.Background(), version) - assert.Assert(t, called) - }) - - t.Run("Error", func(t *testing.T) { - expected := errors.New("boom") - _, _, actual := Executor(func( - context.Context, io.Reader, io.Writer, io.Writer, ...string) error { - return expected - }).GetExporterSetupSQL(context.Background(), 0) - - assert.Equal(t, expected, actual) - }) - - t.Run("Result", func(t *testing.T) { - stdout, _, _ := Executor(func( - _ context.Context, _ io.Reader, stdout, stderr io.Writer, _ ...string) error { - _, _ = stdout.Write([]byte("")) - return nil - }).GetExporterSetupSQL(context.Background(), 0) - assert.Assert(t, stdout == "") - - stdout, _, _ = Executor(func( - _ context.Context, _ io.Reader, stdout, stderr io.Writer, _ ...string) error { - _, _ = stdout.Write([]byte("something")) - return nil - }).GetExporterSetupSQL(context.Background(), 0) - assert.Assert(t, stdout != "") - - _, stderr, _ := Executor(func( - _ context.Context, _ io.Reader, stdout, stderr io.Writer, _ ...string) error { - _, _ = stderr.Write([]byte("")) - return nil - }).GetExporterSetupSQL(context.Background(), 0) - - assert.Assert(t, stderr == "") - - _, stderr, _ = Executor(func( - _ context.Context, _ io.Reader, stdout, stderr io.Writer, _ ...string) error { - _, _ = stderr.Write([]byte("something")) - return nil - }).GetExporterSetupSQL(context.Background(), 0) - assert.Assert(t, stderr != "") - - }) -} diff --git a/internal/pgmonitor/exporter.go b/internal/pgmonitor/exporter.go new file mode 100644 index 0000000000..9d7a1fc3c6 --- /dev/null +++ b/internal/pgmonitor/exporter.go @@ -0,0 +1,183 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package pgmonitor + +import ( + "context" + "fmt" + "os" + "strings" + + "github.com/crunchydata/postgres-operator/internal/logging" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +const ( + ExporterPort = int32(9187) + + // TODO: With the current implementation of the crunchy-postgres-exporter + // it makes sense to hard-code the database. When moving away from the + // crunchy-postgres-exporter start.sh script we should re-evaluate always + // setting the exporter database to `postgres`. + ExporterDB = "postgres" + + // The exporter connects to all databases over loopback using a password. + // Kubernetes guarantees localhost resolves to loopback: + // https://kubernetes.io/docs/concepts/cluster-administration/networking/ + // https://releases.k8s.io/v1.21.0/pkg/kubelet/kubelet_pods.go#L343 + ExporterHost = "localhost" +) + +// postgres_exporter command flags +var ( + ExporterWebConfigFileFlag = "--web.config.file=/web-config/web-config.yml" + ExporterDeactivateStatBGWriterFlag = "--no-collector.stat_bgwriter" +) + +// Defaults for certain values used in queries.yml +// TODO(dsessler7): make these values configurable via spec +var DefaultValuesForQueries = map[string]string{ + "PGBACKREST_INFO_THROTTLE_MINUTES": "10", + "PG_STAT_STATEMENTS_LIMIT": "20", + "PG_STAT_STATEMENTS_THROTTLE_MINUTES": "-1", +} + +// GenerateDefaultExporterQueries generates the default queries used by exporter +func GenerateDefaultExporterQueries(ctx context.Context, cluster *v1beta1.PostgresCluster) string { + log := logging.FromContext(ctx) + var queries string + baseQueries := []string{"backrest", "global", "global_dbsize", "per_db", "nodemx"} + queriesConfigDir := GetQueriesConfigDir(ctx) + + // TODO: When we add pgbouncer support we will do something like the following: + // if pgbouncerEnabled() { + // baseQueries = append(baseQueries, "pgbouncer") + // } + + for _, queryType := range baseQueries { + queriesContents, err := os.ReadFile(fmt.Sprintf("%s/queries_%s.yml", queriesConfigDir, queryType)) + if err != nil { + // log an error, but continue to next iteration + log.Error(err, fmt.Sprintf("Query file queries_%s.yml does not exist (it should)...", queryType)) + continue + } + queries += string(queriesContents) + "\n" + } + + // Add general queries for specific postgres version + queriesGeneral, err := os.ReadFile(fmt.Sprintf("%s/pg%d/queries_general.yml", queriesConfigDir, cluster.Spec.PostgresVersion)) + if err != nil { + // log an error, but continue + log.Error(err, fmt.Sprintf("Query file %s/pg%d/queries_general.yml does not exist (it should)...", queriesConfigDir, cluster.Spec.PostgresVersion)) + } else { + queries += string(queriesGeneral) + "\n" + } + + // Add pg_stat_statement queries for specific postgres version + queriesPgStatStatements, err := os.ReadFile(fmt.Sprintf("%s/pg%d/queries_pg_stat_statements.yml", queriesConfigDir, cluster.Spec.PostgresVersion)) + if err != nil { + // log an error, but continue + log.Error(err, fmt.Sprintf("Query file %s/pg%d/queries_pg_stat_statements.yml not loaded.", queriesConfigDir, cluster.Spec.PostgresVersion)) + } else { + queries += string(queriesPgStatStatements) + "\n" + } + + // If postgres version >= 12, add pg_stat_statements_reset queries + if cluster.Spec.PostgresVersion >= 12 { + queriesPgStatStatementsReset, err := os.ReadFile(fmt.Sprintf("%s/pg%d/queries_pg_stat_statements_reset_info.yml", queriesConfigDir, cluster.Spec.PostgresVersion)) + if err != nil { + // log an error, but continue + log.Error(err, fmt.Sprintf("Query file %s/pg%d/queries_pg_stat_statements_reset_info.yml not loaded.", queriesConfigDir, cluster.Spec.PostgresVersion)) + } else { + queries += string(queriesPgStatStatementsReset) + "\n" + } + } + + // Find and replace default values in queries + for k, v := range DefaultValuesForQueries { + queries = strings.ReplaceAll(queries, fmt.Sprintf("#%s#", k), v) + } + + // TODO: Add ability to exclude certain user-specified queries + + return queries +} + +// ExporterStartCommand generates an entrypoint that will create a master queries file and +// start the postgres_exporter. It will repeat those steps if it notices a change in +// the source queries files. +func ExporterStartCommand(builtinCollectors bool, commandFlags ...string) []string { + script := []string{ + // Older images do not have the command on the PATH. + `PATH="$PATH:$(echo /opt/cpm/bin/postgres_exporter-*)"`, + + // Set up temporary file to hold postgres_exporter process id + `POSTGRES_EXPORTER_PIDFILE=/tmp/postgres_exporter.pid`, + + `postgres_exporter_flags=(`, + `'--extend.query-path=/tmp/queries.yml'`, + fmt.Sprintf(`'--web.listen-address=:%d'`, ExporterPort), + `"$@")`, + } + + // Append flags that disable built-in collectors. Find flags in the help + // output and return them with "--[no-]" replaced by "--no-" or "--". + if !builtinCollectors { + script = append(script, + `postgres_exporter_flags+=($(`, + `postgres_exporter --help 2>&1 | while read -r w _; do case "${w}" in`, + `'--[no-]collector.'*) echo "--no-${w#*-]}";;`, + `'--[no-]disable'*'metrics') echo "--${w#*-]}";;`, + `esac; done))`, + ) + } + + script = append(script, + // declare function that will combine custom queries file and default + // queries and start the postgres_exporter + `start_postgres_exporter() {`, + ` cat /conf/* > /tmp/queries.yml`, + ` echo "Starting postgres_exporter with the following flags..."`, + ` echo "${postgres_exporter_flags[@]}"`, + ` postgres_exporter "${postgres_exporter_flags[@]}" &`, + ` echo $! > $POSTGRES_EXPORTER_PIDFILE`, + `}`, + + // run function to combine queries files and start postgres_exporter + `start_postgres_exporter`, + + // Create a file descriptor with a no-op process that will not get + // cleaned up + `exec {fd}<> <(:||:)`, + + // Set up loop. Use read's timeout setting instead of sleep, + // which uses up a lot of memory + `while read -r -t 3 -u "${fd}" ||:; do`, + + // If either directories' modify time is newer than our file descriptor's, + // something must have changed, so kill the postgres_exporter + ` if ([ "/conf" -nt "/proc/self/fd/${fd}" ] || [ "/opt/crunchy/password" -nt "/proc/self/fd/${fd}" ]) \`, + ` && kill $(head -1 ${POSTGRES_EXPORTER_PIDFILE?});`, + ` then`, + // When something changes we want to get rid of the old file descriptor, get a fresh one + // and restart the loop + ` echo "Something changed..."`, + ` exec {fd}>&- && exec {fd}<> <(:||:)`, + ` stat --format='Latest queries file dated %y' "/conf"`, + ` stat --format='Latest password file dated %y' "/opt/crunchy/password"`, + ` fi`, + + // If postgres_exporter is not running, restart it + // Use the recorded pid as a proxy for checking if postgres_exporter is running + ` if [[ ! -e /proc/$(head -1 ${POSTGRES_EXPORTER_PIDFILE?}) ]] ; then`, + ` start_postgres_exporter`, + ` fi`, + `done`, + ) + + return append([]string{ + "bash", "-ceu", "--", strings.Join(script, "\n"), "postgres_exporter_watcher", + }, commandFlags...) +} diff --git a/internal/pgmonitor/exporter_test.go b/internal/pgmonitor/exporter_test.go new file mode 100644 index 0000000000..5ba14e0993 --- /dev/null +++ b/internal/pgmonitor/exporter_test.go @@ -0,0 +1,90 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package pgmonitor + +import ( + "context" + "os" + "strings" + "testing" + + "gotest.tools/v3/assert" + "sigs.k8s.io/yaml" + + "github.com/crunchydata/postgres-operator/internal/testing/cmp" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +func TestGenerateDefaultExporterQueries(t *testing.T) { + if os.Getenv("QUERIES_CONFIG_DIR") == "" { + t.Skip("QUERIES_CONFIG_DIR must be set") + } + + ctx := context.Background() + cluster := &v1beta1.PostgresCluster{} + + t.Run("PG<=11", func(t *testing.T) { + cluster.Spec.PostgresVersion = 11 + queries := GenerateDefaultExporterQueries(ctx, cluster) + assert.Assert(t, !strings.Contains(queries, "ccp_pg_stat_statements_reset"), + "Queries contain 'ccp_pg_stat_statements_reset' query when they should not.") + }) + + t.Run("PG>=12", func(t *testing.T) { + cluster.Spec.PostgresVersion = 12 + queries := GenerateDefaultExporterQueries(ctx, cluster) + assert.Assert(t, strings.Contains(queries, "ccp_pg_stat_statements_reset"), + "Queries do not contain 'ccp_pg_stat_statements_reset' query when they should.") + }) +} + +func TestExporterStartCommand(t *testing.T) { + for _, tt := range []struct { + Name string + Collectors bool + Flags []string + Expect func(t *testing.T, command []string, script string) + }{ + { + Name: "NoCollectorsNoFlags", + Expect: func(t *testing.T, _ []string, script string) { + assert.Assert(t, cmp.Contains(script, "--[no-]collector")) + }, + }, + { + Name: "WithCollectorsNoFlags", + Collectors: true, + Expect: func(t *testing.T, _ []string, script string) { + assert.Assert(t, !strings.Contains(script, "collector")) + }, + }, + { + Name: "MultipleFlags", + Flags: []string{"--firstTestFlag", "--secondTestFlag"}, + Expect: func(t *testing.T, command []string, _ string) { + assert.DeepEqual(t, command[4:], []string{"postgres_exporter_watcher", "--firstTestFlag", "--secondTestFlag"}) + }, + }, + } { + t.Run(tt.Name, func(t *testing.T) { + command := ExporterStartCommand(tt.Collectors, tt.Flags...) + assert.DeepEqual(t, command[:3], []string{"bash", "-ceu", "--"}) + assert.Assert(t, len(command) > 3) + script := command[3] + + assert.Assert(t, cmp.Contains(script, "'--extend.query-path=/tmp/queries.yml'")) + assert.Assert(t, cmp.Contains(script, "'--web.listen-address=:9187'")) + + tt.Expect(t, command, script) + + t.Run("PrettyYAML", func(t *testing.T) { + b, err := yaml.Marshal(script) + assert.NilError(t, err) + assert.Assert(t, strings.HasPrefix(string(b), `|`), + "expected literal block scalar, got:\n%s", b) + }) + }) + } +} diff --git a/internal/pgmonitor/postgres.go b/internal/pgmonitor/postgres.go index a76c6e7f26..8aed164a18 100644 --- a/internal/pgmonitor/postgres.go +++ b/internal/pgmonitor/postgres.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgmonitor @@ -19,39 +8,27 @@ import ( "context" "strings" + corev1 "k8s.io/api/core/v1" + "github.com/crunchydata/postgres-operator/internal/logging" "github.com/crunchydata/postgres-operator/internal/postgres" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" - corev1 "k8s.io/api/core/v1" ) const ( // MonitoringUser is a Postgres user created by pgMonitor configuration MonitoringUser = "ccp_monitoring" - - // TODO jmckulk: copied from pgbouncer; candidate for common package? - // sqlCurrentAndFutureDatabases returns all the database names where pgMonitor - // functions should be enabled or disabled. It includes the "template1" - // database so that exporter is automatically enabled in future databases. - // The "template0" database is explicitly excluded to ensure it is never - // manipulated. - // - https://www.postgresql.org/docs/current/managing-databases.html - sqlCurrentAndFutureDatabases = "" + - `SELECT datname FROM pg_catalog.pg_database` + - ` WHERE datallowconn AND datname NOT IN ('template0')` ) // PostgreSQLHBAs provides the Postgres HBA rules for allowing the monitoring // exporter to be accessible func PostgreSQLHBAs(inCluster *v1beta1.PostgresCluster, outHBAs *postgres.HBAs) { if ExporterEnabled(inCluster) { - // Kubernetes does guarantee localhost resolves to loopback: - // https://kubernetes.io/docs/concepts/cluster-administration/networking/ - // https://releases.k8s.io/v1.21.0/pkg/kubelet/kubelet_pods.go#L343 - outHBAs.Mandatory = append(outHBAs.Mandatory, *postgres.NewHBA().TCP(). - User(MonitoringUser).Network("127.0.0.0/8").Method("md5")) - outHBAs.Mandatory = append(outHBAs.Mandatory, *postgres.NewHBA().TCP(). - User(MonitoringUser).Network("::1/128").Method("md5")) + // Limit the monitoring user to local connections using SCRAM. + outHBAs.Mandatory = append(outHBAs.Mandatory, + *postgres.NewHBA().TCP().User(MonitoringUser).Method("scram-sha-256").Network("127.0.0.0/8"), + *postgres.NewHBA().TCP().User(MonitoringUser).Method("scram-sha-256").Network("::1/128"), + *postgres.NewHBA().TCP().User(MonitoringUser).Method("reject")) } } @@ -62,14 +39,9 @@ func PostgreSQLParameters(inCluster *v1beta1.PostgresCluster, outParameters *pos // Exporter expects that shared_preload_libraries are installed // pg_stat_statements: https://access.crunchydata.com/documentation/pgmonitor/latest/exporter/ // pgnodemx: https://github.com/CrunchyData/pgnodemx - libraries := []string{"pg_stat_statements", "pgnodemx"} - - defined, found := outParameters.Mandatory.Get("shared_preload_libraries") - if found { - libraries = append(libraries, defined) - } - - outParameters.Mandatory.Add("shared_preload_libraries", strings.Join(libraries, ",")) + outParameters.Mandatory.AppendToList("shared_preload_libraries", "pg_stat_statements", "pgnodemx") + outParameters.Mandatory.Add("pgnodemx.kdapi_path", + postgres.DownwardAPIVolumeMount().MountPath) } } @@ -80,9 +52,7 @@ func PostgreSQLParameters(inCluster *v1beta1.PostgresCluster, outParameters *pos func DisableExporterInPostgreSQL(ctx context.Context, exec postgres.Executor) error { log := logging.FromContext(ctx) - stdout, stderr, err := postgres.Executor(exec).ExecInDatabasesFromQuery(ctx, - `SELECT pg_catalog.current_database();`, - strings.TrimSpace(` + stdout, stderr, err := exec.Exec(ctx, strings.NewReader(` SELECT pg_catalog.format('ALTER ROLE %I NOLOGIN', :'username') WHERE EXISTS (SELECT 1 FROM pg_catalog.pg_roles WHERE rolname = :'username') \gexec`), @@ -103,23 +73,36 @@ func EnableExporterInPostgreSQL(ctx context.Context, exec postgres.Executor, monitoringSecret *corev1.Secret, database, setup string) error { log := logging.FromContext(ctx) - stdout, stderr, err := postgres.Executor(exec).ExecInDatabasesFromQuery(ctx, - sqlCurrentAndFutureDatabases, + stdout, stderr, err := exec.ExecInAllDatabases(ctx, strings.Join([]string{ + // Quiet NOTICE messages from IF EXISTS statements. + // - https://www.postgresql.org/docs/current/runtime-config-client.html + `SET client_min_messages = WARNING;`, + // Exporter expects that extension(s) to be installed in all databases // pg_stat_statements: https://access.crunchydata.com/documentation/pgmonitor/latest/exporter/ "CREATE EXTENSION IF NOT EXISTS pg_stat_statements;", + + // Run idempotent update + "ALTER EXTENSION pg_stat_statements UPDATE;", }, "\n"), - nil, + map[string]string{ + "ON_ERROR_STOP": "on", // Abort when any one statement fails. + "QUIET": "on", // Do not print successful commands to stdout. + }, ) log.V(1).Info("applied pgMonitor objects", "database", "current and future databases", "stdout", stdout, "stderr", stderr) // NOTE: Setup is run last to ensure that the setup sql is used in the hash if err == nil { - stdout, stderr, err = postgres.Executor(exec).ExecInDatabasesFromQuery(ctx, - database, + stdout, stderr, err = exec.ExecInDatabasesFromQuery(ctx, + `SELECT :'database'`, strings.Join([]string{ + // Quiet NOTICE messages from IF EXISTS statements. + // - https://www.postgresql.org/docs/current/runtime-config-client.html + `SET client_min_messages = WARNING;`, + // Setup.sql file from the exporter image. sql is specific // to the PostgreSQL version setup, @@ -127,17 +110,24 @@ func EnableExporterInPostgreSQL(ctx context.Context, exec postgres.Executor, // pgnodemx: https://github.com/CrunchyData/pgnodemx // The `monitor` schema is hard-coded in the setup SQL files // from pgMonitor configuration - // https://github.com/CrunchyData/pgmonitor/blob/master/exporter/postgres/queries_nodemx.yml + // https://github.com/CrunchyData/pgmonitor/blob/master/postgres_exporter/common/queries_nodemx.yml "CREATE EXTENSION IF NOT EXISTS pgnodemx WITH SCHEMA monitor;", + // Run idempotent update + "ALTER EXTENSION pgnodemx UPDATE;", + // ccp_monitoring user is created in Setup.sql without a // password; update the password and ensure that the ROLE // can login to the database `ALTER ROLE :"username" LOGIN PASSWORD :'verifier';`, }, "\n"), map[string]string{ + "database": database, "username": MonitoringUser, "verifier": string(monitoringSecret.Data["verifier"]), + + "ON_ERROR_STOP": "on", // Abort when any one statement fails. + "QUIET": "on", // Do not print successful commands to stdout. }, ) diff --git a/internal/pgmonitor/postgres_test.go b/internal/pgmonitor/postgres_test.go index 3ccae9368f..655fa936ae 100644 --- a/internal/pgmonitor/postgres_test.go +++ b/internal/pgmonitor/postgres_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgmonitor @@ -19,9 +8,10 @@ import ( "strings" "testing" + "gotest.tools/v3/assert" + "github.com/crunchydata/postgres-operator/internal/postgres" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" - "gotest.tools/v3/assert" ) func TestPostgreSQLHBA(t *testing.T) { @@ -45,8 +35,10 @@ func TestPostgreSQLHBA(t *testing.T) { outHBAs := postgres.HBAs{} PostgreSQLHBAs(inCluster, &outHBAs) - assert.Equal(t, outHBAs.Mandatory[0].String(), `host all "ccp_monitoring" "127.0.0.0/8" md5`) - assert.Equal(t, outHBAs.Mandatory[1].String(), `host all "ccp_monitoring" "::1/128" md5`) + assert.Equal(t, len(outHBAs.Mandatory), 3) + assert.Equal(t, outHBAs.Mandatory[0].String(), `host all "ccp_monitoring" "127.0.0.0/8" scram-sha-256`) + assert.Equal(t, outHBAs.Mandatory[1].String(), `host all "ccp_monitoring" "::1/128" scram-sha-256`) + assert.Equal(t, outHBAs.Mandatory[2].String(), `host all "ccp_monitoring" all reject`) }) } diff --git a/internal/pgmonitor/util.go b/internal/pgmonitor/util.go index 3fdf6bc2b3..f5606ccd08 100644 --- a/internal/pgmonitor/util.go +++ b/internal/pgmonitor/util.go @@ -1,24 +1,30 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgmonitor import ( + "context" + "os" + + "github.com/crunchydata/postgres-operator/internal/logging" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) +func GetQueriesConfigDir(ctx context.Context) string { + log := logging.FromContext(ctx) + // The QUERIES_CONFIG_DIR environment variable can be used to tell postgres-operator where to + // find the setup.sql and queries.yml files when running the postgres-operator binary locally + if queriesConfigDir := os.Getenv("QUERIES_CONFIG_DIR"); queriesConfigDir != "" { + log.Info("Directory for setup.sql and queries files set by QUERIES_CONFIG_DIR env var. " + + "This should only be used when running the postgres-operator binary locally.") + return queriesConfigDir + } + + return "/opt/crunchy/conf" +} + // ExporterEnabled returns true if the monitoring exporter is enabled func ExporterEnabled(cluster *v1beta1.PostgresCluster) bool { if cluster.Spec.Monitoring == nil { diff --git a/internal/pgmonitor/util_test.go b/internal/pgmonitor/util_test.go index 5f5bca6ddf..8d16d74bae 100644 --- a/internal/pgmonitor/util_test.go +++ b/internal/pgmonitor/util_test.go @@ -1,25 +1,15 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgmonitor import ( "testing" - "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" "gotest.tools/v3/assert" + + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) func TestExporterEnabled(t *testing.T) { diff --git a/internal/pki/common.go b/internal/pki/common.go index 57a8e6f6ab..fbe9421f8b 100644 --- a/internal/pki/common.go +++ b/internal/pki/common.go @@ -1,54 +1,95 @@ -package pki - -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +package pki import ( "crypto/ecdsa" "crypto/elliptic" "crypto/rand" "crypto/x509" + "crypto/x509/pkix" "math/big" "time" ) -const ( - // beforeInterval sets a starting time for the issuance of a certificate, - // which is defaulted to an hour earlier - beforeInterval = -1 * time.Hour +// certificateSignatureAlgorithm is ECDSA with SHA-384, the recommended +// signature algorithm with the P-256 curve. +const certificateSignatureAlgorithm = x509.ECDSAWithSHA384 - // certificateSignatureAlgorithm sets the default signature algorithm to use - // for our certificates, which is the ECDSA with SHA-384. This is the - // recommended signature algorithm with the P-256 curve. - certificateSignatureAlgorithm = x509.ECDSAWithSHA384 +// currentTime returns the current local time. It is a variable so it can be +// replaced during testing. +var currentTime = time.Now - // serialNumberBits is the number of bits to allow in the generation of a - // random serial number - serialNumberBits = 128 -) - -// generateKey generates a ECDSA keypair using a P-256 curve. This curve is -// roughly equivalent to a RSA 3072 bit key, but requires less bits to achieve +// generateKey returns a random ECDSA key using a P-256 curve. This curve is +// roughly equivalent to an RSA 3072-bit key but requires less bits to achieve // the equivalent cryptographic strength. Additionally, ECDSA is FIPS 140-2 // compliant. func generateKey() (*ecdsa.PrivateKey, error) { return ecdsa.GenerateKey(elliptic.P256(), rand.Reader) } -// generateSerialNumber generates a random serial number that can be used for -// uniquely identifying a certificate +// generateSerialNumber returns a random 128-bit integer. func generateSerialNumber() (*big.Int, error) { - return rand.Int(rand.Reader, new(big.Int).Lsh(big.NewInt(1), serialNumberBits)) + return rand.Int(rand.Reader, new(big.Int).Lsh(big.NewInt(1), 128)) +} + +func generateLeafCertificate( + signer *x509.Certificate, signerPrivate *ecdsa.PrivateKey, + signeePublic *ecdsa.PublicKey, serialNumber *big.Int, + commonName string, dnsNames []string, +) (*x509.Certificate, error) { + const leafExpiration = time.Hour * 24 * 365 + const leafStartValid = time.Hour * -1 + + now := currentTime() + template := &x509.Certificate{ + BasicConstraintsValid: true, + DNSNames: dnsNames, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment, + NotBefore: now.Add(leafStartValid), + NotAfter: now.Add(leafExpiration), + SerialNumber: serialNumber, + SignatureAlgorithm: certificateSignatureAlgorithm, + Subject: pkix.Name{ + CommonName: commonName, + }, + } + + bytes, err := x509.CreateCertificate(rand.Reader, template, signer, + signeePublic, signerPrivate) + + parsed, _ := x509.ParseCertificate(bytes) + return parsed, err +} + +func generateRootCertificate( + privateKey *ecdsa.PrivateKey, serialNumber *big.Int, +) (*x509.Certificate, error) { + const rootCommonName = "postgres-operator-ca" + const rootExpiration = time.Hour * 24 * 365 * 10 + const rootStartValid = time.Hour * -1 + + now := currentTime() + template := &x509.Certificate{ + BasicConstraintsValid: true, + IsCA: true, + KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign, + MaxPathLenZero: true, // there are no intermediate certificates + NotBefore: now.Add(rootStartValid), + NotAfter: now.Add(rootExpiration), + SerialNumber: serialNumber, + SignatureAlgorithm: certificateSignatureAlgorithm, + Subject: pkix.Name{ + CommonName: rootCommonName, + }, + } + + // A root certificate is self-signed, so pass in the template twice. + bytes, err := x509.CreateCertificate(rand.Reader, template, template, + privateKey.Public(), privateKey) + + parsed, _ := x509.ParseCertificate(bytes) + return parsed, err } diff --git a/internal/pki/doc.go b/internal/pki/doc.go index 866926bfc9..71f8c0a1bc 100644 --- a/internal/pki/doc.go +++ b/internal/pki/doc.go @@ -1,26 +1,13 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 // Package pki provides types and functions to support the public key -// infrastructure of the Postgres Operator. It enforces a three layer system +// infrastructure of the Postgres Operator. It enforces a two layer system // of certificate authorities and certificates. // -// NewRootCertificateAuthority().Generate() creates a new root CA. -// ParseRootCertificateAuthority() loads an existing root certificate and key. -// -// NewLeafCertificate().Generate() creates a new leaf certificate. +// NewRootCertificateAuthority() creates a new root CA. +// GenerateLeafCertificate() creates a new leaf certificate. // // Certificate and PrivateKey are primitives that can be marshaled. package pki diff --git a/internal/pki/encoding.go b/internal/pki/encoding.go index c5ef890410..2d2cd851e3 100644 --- a/internal/pki/encoding.go +++ b/internal/pki/encoding.go @@ -1,177 +1,95 @@ -package pki - -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +package pki import ( "crypto/ecdsa" "crypto/x509" + "encoding" "encoding/pem" "fmt" ) const ( - // pemCertificateType is part of the PEM header that identifies it as a x509 - // certificate - pemCertificateType = "CERTIFICATE" + // pemLabelCertificate is the textual encoding label for an X.509 certificate + // according to RFC 7468. See https://tools.ietf.org/html/rfc7468. + pemLabelCertificate = "CERTIFICATE" - // pemPrivateKeyType is part of the PEM header that identifies the private - // key. This is presently hard coded to ECDSA keys - pemPrivateKeyType = "EC PRIVATE KEY" + // pemLabelECDSAKey is the textual encoding label for an elliptic curve private key + // according to RFC 5915. See https://tools.ietf.org/html/rfc5915. + pemLabelECDSAKey = "EC PRIVATE KEY" ) -// Certificate is a higher-level structure that encapsulates the x509 machinery -// around a certificate. -type Certificate struct { - // Certificate is the byte encoded value for the certificate - Certificate []byte -} +var ( + _ encoding.TextMarshaler = Certificate{} + _ encoding.TextMarshaler = (*Certificate)(nil) + _ encoding.TextUnmarshaler = (*Certificate)(nil) +) -// MarshalText encodes a x509 certificate into PEM format -func (c *Certificate) MarshalText() ([]byte, error) { - block := &pem.Block{ - Type: pemCertificateType, - Bytes: c.Certificate, +// MarshalText returns a PEM encoding of c that OpenSSL understands. +func (c Certificate) MarshalText() ([]byte, error) { + if c.x509 == nil || len(c.x509.Raw) == 0 { + _, err := x509.ParseCertificate(nil) + return nil, err } - return pem.EncodeToMemory(block), nil + return pem.EncodeToMemory(&pem.Block{ + Type: pemLabelCertificate, + Bytes: c.x509.Raw, + }), nil } -// UnmarshalText decodes a x509 certificate from PEM format +// UnmarshalText populates c from its PEM encoding. func (c *Certificate) UnmarshalText(data []byte) error { block, _ := pem.Decode(data) - // if block is nil, that means it is invalid PEM - if block == nil { - return fmt.Errorf("%w: malformed data", ErrInvalidPEM) - } - - // if the type of the PEM block is not a certificate, return an error - if block.Type != pemCertificateType { - return fmt.Errorf("%w: not type %s", ErrInvalidPEM, pemCertificateType) - } - - // everything checks out, at least in terms of PEM. Place encoded bytes in - // object - c.Certificate = block.Bytes - - return nil -} - -// PrivateKey encapsulates functionality around marshalling a ECDSA private key. -type PrivateKey struct { - // PrivateKey is the private key - PrivateKey *ecdsa.PrivateKey - - // marshalECPrivateKey turns a ECDSA private key into DER format, which is an - // intermediate form prior to turning it into a PEM block - marshalECPrivateKey func(*ecdsa.PrivateKey) ([]byte, error) -} - -// MarshalText encodes the private key in PEM format -func (c *PrivateKey) MarshalText() ([]byte, error) { - if c.marshalECPrivateKey == nil { - return []byte{}, fmt.Errorf("%w: marshalECPrivateKey", ErrFunctionNotImplemented) + if block == nil || block.Type != pemLabelCertificate { + return fmt.Errorf("not a PEM-encoded certificate") } - // first, convert private key to DER format - der, err := c.marshalECPrivateKey(c.PrivateKey) - - if err != nil { - return []byte{}, err + parsed, err := x509.ParseCertificate(block.Bytes) + if err == nil { + c.x509 = parsed } - - // encode the private key. in the future, once PKCS #8 encryption is supported - // in go, we can encrypt the private key - return c.marshalPrivateKey(der), nil + return err } -// UnmarshalText decodes a private key from PEM format -func (c *PrivateKey) UnmarshalText(data []byte) error { - block, _ := pem.Decode(data) - - // if block is nil, that means it is invalid PEM - if block == nil { - return fmt.Errorf("%w: malformed data", ErrInvalidPEM) - } +var ( + _ encoding.TextMarshaler = PrivateKey{} + _ encoding.TextMarshaler = (*PrivateKey)(nil) + _ encoding.TextUnmarshaler = (*PrivateKey)(nil) +) - // if the type of the PEM block is not private key, return an error - if block.Type != pemPrivateKeyType { - return fmt.Errorf("%w: not type %s", ErrInvalidPEM, pemPrivateKeyType) +// MarshalText returns a PEM encoding of k that OpenSSL understands. +func (k PrivateKey) MarshalText() ([]byte, error) { + if k.ecdsa == nil { + k.ecdsa = new(ecdsa.PrivateKey) } - // store the DER; in the future, this is where we would decrypt the DER once - // PKCS #8 encryption is supported in Go - der := block.Bytes - - // determine if the data actually represents a ECDSA private key - privateKey, err := x509.ParseECPrivateKey(der) - + der, err := x509.MarshalECPrivateKey(k.ecdsa) if err != nil { - return fmt.Errorf("%w: not a valid ECDSA private key", ErrInvalidPEM) + return nil, err } - // everything checks out, we have a ECDSA private key - c.PrivateKey = privateKey - - return nil -} - -// marshalPrivateKey encodes a private key in PEM format -func (c *PrivateKey) marshalPrivateKey(der []byte) []byte { - block := &pem.Block{ - Type: pemPrivateKeyType, + return pem.EncodeToMemory(&pem.Block{ + Type: pemLabelECDSAKey, Bytes: der, - } - - return pem.EncodeToMemory(block) -} - -// NewPrivateKey performs the setup for creating a new private key, including -// any functions that need to be created -func NewPrivateKey(key *ecdsa.PrivateKey) *PrivateKey { - return &PrivateKey{ - PrivateKey: key, - marshalECPrivateKey: marshalECPrivateKey, - } + }), nil } -// ParseCertificate accepts binary encoded data to parse a certificate -func ParseCertificate(data []byte) (*Certificate, error) { - certificate := &Certificate{} +// UnmarshalText populates k from its PEM encoding. +func (k *PrivateKey) UnmarshalText(data []byte) error { + block, _ := pem.Decode(data) - if err := certificate.UnmarshalText(data); err != nil { - return nil, err + if block == nil || block.Type != pemLabelECDSAKey { + return fmt.Errorf("not a PEM-encoded private key") } - return certificate, nil -} - -// ParsePrivateKey accepts binary encoded data attempts to parse a private key -func ParsePrivateKey(data []byte) (*PrivateKey, error) { - privateKey := NewPrivateKey(nil) - - if err := privateKey.UnmarshalText(data); err != nil { - return nil, err + key, err := x509.ParseECPrivateKey(block.Bytes) + if err == nil { + k.ecdsa = key } - - return privateKey, nil -} - -// marshalECPrivateKey is a wrapper function around the -// "x509.MarshalECPrivateKey" function that converts a private key -func marshalECPrivateKey(privateKey *ecdsa.PrivateKey) ([]byte, error) { - return x509.MarshalECPrivateKey(privateKey) + return err } diff --git a/internal/pki/encoding_test.go b/internal/pki/encoding_test.go index a75783c5a2..cdf7c0de5a 100644 --- a/internal/pki/encoding_test.go +++ b/internal/pki/encoding_test.go @@ -1,354 +1,183 @@ -package pki - -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +package pki import ( "bytes" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/x509" - "crypto/x509/pkix" - "encoding/pem" - "errors" - "math/big" - "reflect" + "os" + "os/exec" + "path/filepath" + "strings" "testing" - "time" -) -// assertConstructed ensures that private key functions are set. -func assertConstructed(t testing.TB, key *PrivateKey) { - t.Helper() + "gotest.tools/v3/assert" - if key.marshalECPrivateKey == nil { - t.Fatalf("expected marshalECPrivateKey to be set on private key") - } -} + "github.com/crunchydata/postgres-operator/internal/testing/require" +) + +func TestCertificateTextMarshaling(t *testing.T) { + t.Run("Zero", func(t *testing.T) { + // Zero cannot marshal. + _, err := Certificate{}.MarshalText() + assert.ErrorContains(t, err, "malformed") -func TestCertificate(t *testing.T) { - // generateCertificate is a helper function that generates a random private key - // and ignore any errors. creates a self-signed certificate as we don't need - // much - generateCertificate := func() *Certificate { - key, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - now := time.Now() - template := &x509.Certificate{ - BasicConstraintsValid: true, - KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment, - NotBefore: now, - NotAfter: now.Add(12 * time.Hour), - SerialNumber: big.NewInt(1234), - SignatureAlgorithm: certificateSignatureAlgorithm, - Subject: pkix.Name{ - CommonName: "*", - }, - } - - certificate, _ := x509.CreateCertificate(rand.Reader, template, template, key.Public(), key) - - return &Certificate{Certificate: certificate} - } - - t.Run("MarshalText", func(t *testing.T) { - certificate := generateCertificate() - - encoded, err := certificate.MarshalText() - - if err != nil { - t.Fatalf("something went horribly wrong") - } - - // test that it matches the value of certificate - block, _ := pem.Decode(encoded) - - // ensure it's the valid pem type - if block.Type != pemCertificateType { - t.Fatalf("expected pem type %q actual %q", block.Type, pemCertificateType) - } - - // ensure the certificates match - if !bytes.Equal(certificate.Certificate, block.Bytes) { - t.Fatalf("pem encoded certificate does not match certificate") - } + // Empty cannot unmarshal. + var sink Certificate + assert.ErrorContains(t, sink.UnmarshalText(nil), "PEM-encoded") + assert.ErrorContains(t, sink.UnmarshalText([]byte{}), "PEM-encoded") }) - t.Run("UnmarshalText", func(t *testing.T) { - expected := generateCertificate() + root, err := NewRootCertificateAuthority() + assert.NilError(t, err) - t.Run("valid", func(t *testing.T) { - // manually marshal the certificate - encoded := pem.EncodeToMemory(&pem.Block{Bytes: expected.Certificate, Type: pemCertificateType}) - c := &Certificate{} + cert := root.Certificate + txt, err := cert.MarshalText() + assert.NilError(t, err) + assert.Assert(t, bytes.HasPrefix(txt, []byte("-----BEGIN CERTIFICATE-----\n")), "got %q", txt) + assert.Assert(t, bytes.HasSuffix(txt, []byte("\n-----END CERTIFICATE-----\n")), "got %q", txt) - if err := c.UnmarshalText(encoded); err != nil { - t.Fatalf("expected no error, got %s", err.Error()) - } + t.Run("RoundTrip", func(t *testing.T) { + var sink Certificate + assert.NilError(t, sink.UnmarshalText(txt)) + assert.DeepEqual(t, cert, sink) + }) - if !reflect.DeepEqual(expected.Certificate, c.Certificate) { - t.Fatalf("expected encoded certificate to be unmarshaled in identical format") - } - }) + t.Run("Bundle", func(t *testing.T) { + other, _ := NewRootCertificateAuthority() + otherText, err := other.Certificate.MarshalText() + assert.NilError(t, err) - t.Run("invalid", func(t *testing.T) { - t.Run("not pem", func(t *testing.T) { - c := &Certificate{} + bundle := bytes.Join([][]byte{txt, otherText}, nil) - if err := c.UnmarshalText([]byte("this is very invalid")); !errors.Is(err, ErrInvalidPEM) { - t.Fatalf("expected invalid PEM error") - } - }) + // Only the first certificate of a bundle is parsed. + var sink Certificate + assert.NilError(t, sink.UnmarshalText(bundle)) + assert.DeepEqual(t, cert, sink) + }) - t.Run("not a certificate", func(t *testing.T) { - encoded := pem.EncodeToMemory(&pem.Block{Bytes: expected.Certificate, Type: "CEREAL"}) - c := &Certificate{} + t.Run("EncodedEmpty", func(t *testing.T) { + txt := []byte("-----BEGIN CERTIFICATE-----\n\n-----END CERTIFICATE-----\n") - if err := c.UnmarshalText(encoded); !errors.Is(err, ErrInvalidPEM) { - t.Fatalf("expected invalid PEM error") - } - }) - }) + var sink Certificate + assert.ErrorContains(t, sink.UnmarshalText(txt), "malformed") }) -} -func TestNewPrivateKey(t *testing.T) { - key, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - privateKey := NewPrivateKey(key) + t.Run("EncodedGarbage", func(t *testing.T) { + txt := []byte("-----BEGIN CERTIFICATE-----\nasdfasdf\n-----END CERTIFICATE-----\n") - if reflect.TypeOf(privateKey).String() != "*pki.PrivateKey" { - t.Fatalf("expected *pki.PrivateKey in return") - } -} - -func TestParseCertificate(t *testing.T) { - // generateCertificate is a helper function that generates a random private key - // and ignore any errors. creates a self-signed certificate as we don't need - // much - generateCertificate := func() *Certificate { - key, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - now := time.Now() - template := &x509.Certificate{ - BasicConstraintsValid: true, - KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment, - NotBefore: now, - NotAfter: now.Add(12 * time.Hour), - SerialNumber: big.NewInt(1234), - SignatureAlgorithm: certificateSignatureAlgorithm, - Subject: pkix.Name{ - CommonName: "*", - }, - } - - certificate, _ := x509.CreateCertificate(rand.Reader, template, template, key.Public(), key) - - return &Certificate{Certificate: certificate} - } - - t.Run("valid", func(t *testing.T) { - expected := generateCertificate() - encoded := pem.EncodeToMemory(&pem.Block{Bytes: expected.Certificate, Type: pemCertificateType}) - - certificate, err := ParseCertificate(encoded) - - if err != nil { - t.Fatalf("expected no error, actual %s", err.Error()) - } - - if !reflect.DeepEqual(expected.Certificate, certificate.Certificate) { - t.Fatalf("expected parsed certificate to match expected") - } + var sink Certificate + assert.ErrorContains(t, sink.UnmarshalText(txt), "malformed") }) - t.Run("invalid", func(t *testing.T) { - data := []byte("bad") + t.Run("ReadByOpenSSL", func(t *testing.T) { + openssl := require.OpenSSL(t) + dir := t.TempDir() - certificate, err := ParseCertificate(data) + certFile := filepath.Join(dir, "cert.pem") + certBytes, err := cert.MarshalText() + assert.NilError(t, err) + assert.NilError(t, os.WriteFile(certFile, certBytes, 0o600)) - if err == nil { - t.Fatalf("expected error") - } + // The "openssl x509" command parses X.509 certificates. + cmd := exec.Command(openssl, "x509", + "-in", certFile, "-inform", "PEM", "-noout", "-text") - if certificate != nil { - t.Fatalf("expected certificate to be nil") - } + output, err := cmd.CombinedOutput() + assert.NilError(t, err, "%q\n%s", cmd.Args, output) }) } -func TestParsePrivateKey(t *testing.T) { - // generatePrivateKey is a helper function that generates a random private key - // and ignore any errors. - generatePrivateKey := func() *PrivateKey { - key, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - privateKey := &PrivateKey{PrivateKey: key} - privateKey.marshalECPrivateKey = marshalECPrivateKey - return privateKey - } +func TestPrivateKeyTextMarshaling(t *testing.T) { + t.Run("Zero", func(t *testing.T) { + // Zero cannot marshal. + _, err := PrivateKey{}.MarshalText() + assert.ErrorContains(t, err, "unknown") - t.Run("valid", func(t *testing.T) { - expected := generatePrivateKey() + // Empty cannot unmarshal. + var sink PrivateKey + assert.ErrorContains(t, sink.UnmarshalText(nil), "PEM-encoded") + assert.ErrorContains(t, sink.UnmarshalText([]byte{}), "PEM-encoded") + }) - t.Run("plaintext", func(t *testing.T) { - b, _ := x509.MarshalECPrivateKey(expected.PrivateKey) - encoded := pem.EncodeToMemory(&pem.Block{Bytes: b, Type: pemPrivateKeyType}) + root, err := NewRootCertificateAuthority() + assert.NilError(t, err) - privateKey, err := ParsePrivateKey(encoded) + key := root.PrivateKey + txt, err := key.MarshalText() + assert.NilError(t, err) + assert.Assert(t, bytes.HasPrefix(txt, []byte("-----BEGIN EC PRIVATE KEY-----\n")), "got %q", txt) + assert.Assert(t, bytes.HasSuffix(txt, []byte("\n-----END EC PRIVATE KEY-----\n")), "got %q", txt) - if err != nil { - t.Fatalf("expected no error, actual %s", err.Error()) - } + t.Run("RoundTrip", func(t *testing.T) { + var sink PrivateKey + assert.NilError(t, sink.UnmarshalText(txt)) + assert.DeepEqual(t, key, sink) + }) - if !reflect.DeepEqual(expected.PrivateKey, privateKey.PrivateKey) { - t.Fatalf("expected parsed key to match expected") - } + t.Run("Bundle", func(t *testing.T) { + other, _ := NewRootCertificateAuthority() + otherText, err := other.PrivateKey.MarshalText() + assert.NilError(t, err) - // ensure private key functions are set - assertConstructed(t, privateKey) - }) + bundle := bytes.Join([][]byte{txt, otherText}, nil) + + // Only the first key of a bundle is parsed. + var sink PrivateKey + assert.NilError(t, sink.UnmarshalText(bundle)) + assert.DeepEqual(t, key, sink) }) - t.Run("invalid", func(t *testing.T) { - t.Run("plaintext", func(t *testing.T) { - data := []byte("bad") + t.Run("EncodedEmpty", func(t *testing.T) { + txt := []byte("-----BEGIN EC PRIVATE KEY-----\n\n-----END EC PRIVATE KEY-----\n") - privateKey, err := ParsePrivateKey(data) + var sink PrivateKey + assert.ErrorContains(t, sink.UnmarshalText(txt), "asn1") + }) - if err == nil { - t.Fatalf("expected error") - } + t.Run("EncodedGarbage", func(t *testing.T) { + txt := []byte("-----BEGIN EC PRIVATE KEY-----\nasdfasdf\n-----END EC PRIVATE KEY-----\n") - if privateKey != nil { - t.Fatalf("expected private key to be nil") - } - }) + var sink PrivateKey + assert.ErrorContains(t, sink.UnmarshalText(txt), "asn1") }) -} -func TestPrivateKey(t *testing.T) { - // generatePrivateKey is a helper function that generates a random private key - // and ignore any errors. - generatePrivateKey := func() *PrivateKey { - key, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - privateKey := &PrivateKey{PrivateKey: key} - privateKey.marshalECPrivateKey = marshalECPrivateKey - return privateKey - } - - t.Run("MarshalText", func(t *testing.T) { - t.Run("plaintext", func(t *testing.T) { - t.Run("valid", func(t *testing.T) { - privateKey := generatePrivateKey() - - encoded, err := privateKey.MarshalText() - - if err != nil { - t.Fatalf("expected no error, actual: %s", err) - } - - block, _ := pem.Decode(encoded) - - if block.Type != pemPrivateKeyType { - t.Fatalf("expected pem type %q, actual %q", pemPrivateKeyType, block.Type) - } - - decodedKey, err := x509.ParseECPrivateKey(block.Bytes) - - if err != nil { - t.Fatalf("expected valid ECDSA key, got error: %s", err.Error()) - } - - if !privateKey.PrivateKey.Equal(decodedKey) { - t.Fatalf("expected private key to match pem encoded key") - } - }) - - t.Run("invalid", func(t *testing.T) { - t.Run("ec marshal function not set", func(t *testing.T) { - privateKey := generatePrivateKey() - privateKey.marshalECPrivateKey = nil - - _, err := privateKey.MarshalText() - - if !errors.Is(err, ErrFunctionNotImplemented) { - t.Fatalf("expected function not implemented error") - } - }) - - t.Run("cannot marshal elliptical curve key", func(t *testing.T) { - msg := "marshal failed" - privateKey := generatePrivateKey() - privateKey.marshalECPrivateKey = func(*ecdsa.PrivateKey) ([]byte, error) { - return []byte{}, errors.New(msg) - } - - _, err := privateKey.MarshalText() - - if err.Error() != msg { - t.Fatalf("expected error: %s", msg) - } - }) - }) - }) - }) + t.Run("ReadByOpenSSL", func(t *testing.T) { + openssl := require.OpenSSL(t) + dir := t.TempDir() + + keyFile := filepath.Join(dir, "key.pem") + keyBytes, err := key.MarshalText() + assert.NilError(t, err) + assert.NilError(t, os.WriteFile(keyFile, keyBytes, 0o600)) + + // The "openssl pkey" command processes public and private keys. + cmd := exec.Command(openssl, "pkey", + "-in", keyFile, "-inform", "PEM", "-noout", "-text") + + output, err := cmd.CombinedOutput() + assert.NilError(t, err, "%q\n%s", cmd.Args, output) + + assert.Assert(t, + bytes.Contains(output, []byte("Private-Key:")), + "expected valid private key, got:\n%s", output) + + t.Run("Check", func(t *testing.T) { + output, _ := exec.Command(openssl, "pkey", "-help").CombinedOutput() + if !strings.Contains(string(output), "-check") { + t.Skip(`requires "-check" flag`) + } + + cmd := exec.Command(openssl, "pkey", + "-check", "-in", keyFile, "-inform", "PEM", "-noout", "-text") + + output, err := cmd.CombinedOutput() + assert.NilError(t, err, "%q\n%s", cmd.Args, output) - t.Run("UnmarshalText", func(t *testing.T) { - expected := generatePrivateKey() - - t.Run("plaintext", func(t *testing.T) { - t.Run("valid", func(t *testing.T) { - // manually marshal the private key - b, _ := x509.MarshalECPrivateKey(expected.PrivateKey) - encoded := pem.EncodeToMemory(&pem.Block{Bytes: b, Type: pemPrivateKeyType}) - pk := &PrivateKey{} - - if err := pk.UnmarshalText(encoded); err != nil { - t.Fatalf("expected no error, got %s", err.Error()) - } - - if !reflect.DeepEqual(expected.PrivateKey, pk.PrivateKey) { - t.Fatalf("expected encoded private key to be unmarshaled in identical format") - } - }) - - t.Run("invalid", func(t *testing.T) { - t.Run("not pem", func(t *testing.T) { - pk := &PrivateKey{} - - if err := pk.UnmarshalText([]byte("this is very invalid")); !errors.Is(err, ErrInvalidPEM) { - t.Fatalf("expected invalid PEM error") - } - }) - - t.Run("not labeled private key", func(t *testing.T) { - encoded := pem.EncodeToMemory(&pem.Block{Bytes: []byte("bad key"), Type: "CEREAL"}) - pk := &PrivateKey{} - - if err := pk.UnmarshalText(encoded); !errors.Is(err, ErrInvalidPEM) { - t.Fatalf("expected invalid PEM error") - } - }) - - t.Run("not a valid private key", func(t *testing.T) { - encoded := pem.EncodeToMemory(&pem.Block{Bytes: []byte("bad key"), Type: pemPrivateKeyType}) - pk := &PrivateKey{} - - if err := pk.UnmarshalText(encoded); !errors.Is(err, ErrInvalidPEM) { - t.Fatalf("expected invalid PEM error") - } - }) - }) + assert.Assert(t, + bytes.Contains(output, []byte("is valid")), + "expected valid private key, got:\n%s", output) }) }) } diff --git a/internal/pki/errors.go b/internal/pki/errors.go deleted file mode 100644 index 498050d72f..0000000000 --- a/internal/pki/errors.go +++ /dev/null @@ -1,34 +0,0 @@ -package pki - -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -import "errors" - -var ( - // ErrFunctionNotImplemented is returned if a function that should be set on - // a struct is not set - ErrFunctionNotImplemented = errors.New("function not implemented") - - // ErrMissingRequired is returned if a required parameter is missing - ErrMissingRequired = errors.New("missing required parameter") - - // ErrInvalidCertificateAuthority is returned if a certficate authority (CA) - // has not been properly generated - ErrInvalidCertificateAuthority = errors.New("invalid certificate authority") - - // ErrInvalidPEM s returned if encoded data is not a valid PEM block - ErrInvalidPEM = errors.New("invalid pem encoded data") -) diff --git a/internal/pki/leaf.go b/internal/pki/leaf.go deleted file mode 100644 index a2894ec21f..0000000000 --- a/internal/pki/leaf.go +++ /dev/null @@ -1,233 +0,0 @@ -package pki - -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -import ( - "context" - "crypto/ecdsa" - "crypto/rand" - "crypto/x509" - "crypto/x509/pkix" - "fmt" - "math/big" - "net" - "time" - - "github.com/crunchydata/postgres-operator/internal/logging" -) - -const ( - // defaultCertificateExpiration sets the default expiration time for a leaf - // certificate - defaultCertificateExpiration = 365 * 24 * time.Hour -) - -// LeafCertificate contains the ability to generate the necessary components of -// a leaf certificate that can be used to identify a PostgreSQL cluster, or -// a pgBouncer instance, etc. A leaf certificate is signed by a root -// certificate authority. -type LeafCertificate struct { - // Certificate is the certificate of this certificate authority - Certificate *Certificate - - // CommonName represents the "common name" (CN) of the certificate. - CommonName string - - // DNSNames is a list of DNS names that are represented by this certificate. - DNSNames []string - - // IPAddresses is an optional list of IP addresses that can be represented by - // this certificate - IPAddresses []net.IP - - // PrivateKey is the private key portion of the leaf certificate - PrivateKey *PrivateKey - - // generateKey generates an ECDSA keypair - generateKey func() (*ecdsa.PrivateKey, error) - - // generateCertificate generates a X509 certificate return in DER format - generateCertificate func(*ecdsa.PrivateKey, *big.Int, *RootCertificateAuthority, string, []string, []net.IP) ([]byte, error) - - // generateSerialNumber creates a unique serial number to assign to the - // certificate - generateSerialNumber func() (*big.Int, error) -} - -// Generate creates a new leaf certificate! -func (c *LeafCertificate) Generate(rootCA *RootCertificateAuthority) error { - // ensure functions are defined - if c.generateKey == nil || c.generateCertificate == nil || c.generateSerialNumber == nil { - return ErrFunctionNotImplemented - } - - // ensure there is a Common NAme - if c.CommonName == "" { - return fmt.Errorf("%w: common name is required", ErrMissingRequired) - } - - // generate a private key - privateKey, err := c.generateKey() - - if err != nil { - return err - } - - c.PrivateKey = NewPrivateKey(privateKey) - - // generate a serial number - serialNumber, err := c.generateSerialNumber() - - if err != nil { - return err - } - - // generate a certificate - certificate, err := c.generateCertificate(c.PrivateKey.PrivateKey, - serialNumber, rootCA, c.CommonName, c.DNSNames, c.IPAddresses) - - if err != nil { - return err - } - - c.Certificate = &Certificate{Certificate: certificate} - - return nil -} - -// LeafCertIsBad checks at least one leaf cert has been generated, the basic constraints -// are valid and it has been verified with the root certpool -// -// TODO(tjmoore4): Currently this will return 'true' if any of the parsed certs -// fail a given check. For scenarios where multiple certs may be returned, such -// as in a BYOC/BYOCA, this will need to be handled so we only generate a new -// certificate for our cert if it is the one that fails. -func LeafCertIsBad( - ctx context.Context, leaf *LeafCertificate, rootCertCA *RootCertificateAuthority, - namespace string, -) bool { - log := logging.FromContext(ctx) - - // if the certificate or the private key are nil, the leaf cert is bad - if leaf.Certificate == nil || leaf.PrivateKey == nil { - return true - } - - // set up root cert pool for leaf cert verification - var rootCerts []*x509.Certificate - var rootErr error - - // set up root cert pool - roots := x509.NewCertPool() - - // if there is an error parsing the root certificate or if there is not at least one certificate, - // the RootCertificateAuthority is bad - if rootCerts, rootErr = x509.ParseCertificates(rootCertCA.Certificate.Certificate); rootErr != nil && len(rootCerts) < 1 { - return true - } - - // add all the root certs returned to the root pool - for _, cert := range rootCerts { - roots.AddCert(cert) - } - - var leafCerts []*x509.Certificate - var leafErr error - // if there is an error parsing the leaf certificate or if the number of certificates - // returned is not one, the certificate is bad - if leafCerts, leafErr = x509.ParseCertificates(leaf.Certificate.Certificate); leafErr != nil && len(leafCerts) < 1 { - return true - } - - // go through the returned leaf certs and check - // that they are not CAs and Verify them - for _, cert := range leafCerts { - // a leaf cert is bad if it is a CA, or if - // the MaxPathLen or MaxPathLenZero are invalid - if !cert.BasicConstraintsValid { - return true - } - - // verify leaf cert - _, verifyError := cert.Verify(x509.VerifyOptions{ - DNSName: cert.DNSNames[0], - Roots: roots, - }) - //log verify error if not nil - if verifyError != nil { - log.Error(verifyError, "verify failed for leaf cert") - return true - } - } - - // finally, if no check failed, return false - return false -} - -// NewLeafCertificate generates a new leaf certificate that can be used for the -// identity of a particular instance -// -// Accepts arguments for the common name (CN), the DNS names and the IP -// Addresses that will be represented by this certificate -func NewLeafCertificate(commonName string, dnsNames []string, ipAddresses []net.IP) *LeafCertificate { - return &LeafCertificate{ - CommonName: commonName, - DNSNames: dnsNames, - IPAddresses: ipAddresses, - generateCertificate: generateLeafCertificate, - generateKey: generateKey, - generateSerialNumber: generateSerialNumber, - } -} - -// generateLeafCertificate creates a x509 certificate with a ECDSA -// signature using the SHA-384 algorithm -func generateLeafCertificate(privateKey *ecdsa.PrivateKey, serialNumber *big.Int, - rootCA *RootCertificateAuthority, commonName string, dnsNames []string, ipAddresses []net.IP) ([]byte, error) { - // first, ensure that the root certificate can be turned into a x509 - // Certificate object so it can be used as the parent certificate when - // generating - if rootCA == nil || rootCA.Certificate == nil || rootCA.PrivateKey == nil { - return nil, fmt.Errorf("%w: root certificate authority needs to be generated", - ErrInvalidCertificateAuthority) - } - - parent, err := x509.ParseCertificate(rootCA.Certificate.Certificate) - - if err != nil { - return nil, err - } - - // prepare the certificate. set the validity time to the predefined range - now := time.Now() - template := &x509.Certificate{ - BasicConstraintsValid: true, - DNSNames: dnsNames, - IPAddresses: ipAddresses, - KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment, - NotBefore: now.Add(beforeInterval), - NotAfter: now.Add(defaultCertificateExpiration), - SerialNumber: serialNumber, - SignatureAlgorithm: certificateSignatureAlgorithm, - Subject: pkix.Name{ - CommonName: commonName, - }, - } - - // create the leaf certificate and sign it using the root CA - return x509.CreateCertificate(rand.Reader, template, parent, - privateKey.Public(), rootCA.PrivateKey.PrivateKey) -} diff --git a/internal/pki/leaf_test.go b/internal/pki/leaf_test.go deleted file mode 100644 index ed9622072b..0000000000 --- a/internal/pki/leaf_test.go +++ /dev/null @@ -1,517 +0,0 @@ -package pki - -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -import ( - "context" - "crypto/ecdsa" - "crypto/rand" - "crypto/x509" - "crypto/x509/pkix" - "errors" - "fmt" - "math/big" - "net" - "reflect" - "testing" - "time" - - "gotest.tools/v3/assert" -) - -func TestLeafCertificate(t *testing.T) { - t.Run("Generate", func(t *testing.T) { - namespace := "pgo-test" - commonName := "hippo." + namespace - dnsNames := []string{commonName, "hippo." + namespace + ".svc"} - ipAddresses := []net.IP{net.ParseIP("127.0.0.1")} - // run generate on rootCA to ensure it sets valid values - rootCA := NewRootCertificateAuthority() - if err := rootCA.Generate(); err != nil { - t.Fatalf("root certificate authority could not be generated") - } - - // see if certificate can be parsed - x509RootCA, err := x509.ParseCertificate(rootCA.Certificate.Certificate) - if err != nil { - t.Fatalf("expected valid x509 root certificate, actual %s", err.Error()) - } - - t.Run("valid", func(t *testing.T) { - cert := &LeafCertificate{ - CommonName: commonName, - DNSNames: dnsNames, - IPAddresses: ipAddresses, - generateCertificate: generateLeafCertificate, - generateKey: generateKey, - generateSerialNumber: generateSerialNumber, - } - - // run generate to ensure it sets valid values - if err := cert.Generate(rootCA); err != nil { - t.Fatalf("expected generate to return no errors, got: %s", err.Error()) - } - - // ensure private key and certificate are set - if cert.PrivateKey == nil { - t.Fatalf("expected private key to be set") - } - - if cert.Certificate == nil { - t.Fatalf("expected certificate to be set") - } - - if cert.PrivateKey.PrivateKey == nil { - t.Fatalf("expected private key to be set, got nil") - } - - if len(cert.Certificate.Certificate) == 0 { - t.Fatalf("expected certificate to be generated") - } - - x509Certificate, err := x509.ParseCertificate(cert.Certificate.Certificate) - if err != nil { - t.Fatalf("expected valid x509 ceriticate, actual %s", err.Error()) - } - - if !cert.PrivateKey.PrivateKey.PublicKey.Equal(x509Certificate.PublicKey) { - t.Fatalf("expected public key from stored key to match public key on certificate") - } - - // check certain attributes - if x509Certificate.IsCA { - t.Fatalf("expected certificate to be a leaf certificate") - } - - if x509Certificate.Issuer.CommonName != x509RootCA.Subject.CommonName { - t.Fatalf("expected issuer common name to be %s, actual %s", - x509RootCA.Subject.CommonName, x509Certificate.Issuer.CommonName) - } - - if x509Certificate.Subject.CommonName != commonName { - t.Fatalf("expected subject name to be %s, actual %s", commonName, x509Certificate.Subject.CommonName) - } - - if !reflect.DeepEqual(x509Certificate.DNSNames, dnsNames) { - t.Fatalf("expected SAN DNS names to be %v, actual %v", dnsNames, x509Certificate.DNSNames) - } - - // check IP addresses...inefficiently, as we cannot use a DeepEqual on - // net.IP slices. - if len(x509Certificate.IPAddresses) != len(ipAddresses) { - t.Fatalf("expected SAN IP addresses to be &v, actual &v") - } - - for _, ip := range x509Certificate.IPAddresses { - ok := false - for _, knownIP := range ipAddresses { - ok = ok || (ip.Equal(knownIP)) - } - - if !ok { - t.Fatalf("expected SAN IP addresses to be %v, actual %v", ipAddresses, x509Certificate.IPAddresses) - } - } - - // ensure private key functions are set - assertConstructed(t, cert.PrivateKey) - }) - - t.Run("invalid", func(t *testing.T) { - t.Run("generate certificate not set", func(t *testing.T) { - cert := &LeafCertificate{ - CommonName: commonName, - } - cert.generateCertificate = nil - cert.generateKey = generateKey - cert.generateSerialNumber = generateSerialNumber - - if err := cert.Generate(rootCA); !errors.Is(err, ErrFunctionNotImplemented) { - t.Fatalf("expected function not implemented error") - } - }) - - t.Run("generate key not set", func(t *testing.T) { - cert := &LeafCertificate{ - CommonName: commonName, - } - cert.generateCertificate = generateLeafCertificate - cert.generateKey = nil - cert.generateSerialNumber = generateSerialNumber - - if err := cert.Generate(rootCA); !errors.Is(err, ErrFunctionNotImplemented) { - t.Fatalf("expected function not implemented error") - } - }) - - t.Run("generate serial number not set", func(t *testing.T) { - cert := &LeafCertificate{ - CommonName: commonName, - } - cert.generateCertificate = generateLeafCertificate - cert.generateKey = generateKey - cert.generateSerialNumber = nil - - if err := cert.Generate(rootCA); !errors.Is(err, ErrFunctionNotImplemented) { - t.Fatalf("expected function not implemented error") - } - }) - - t.Run("CommonName not set", func(t *testing.T) { - cert := &LeafCertificate{ - generateCertificate: generateLeafCertificate, - generateKey: generateKey, - generateSerialNumber: generateSerialNumber, - } - - if err := cert.Generate(rootCA); !errors.Is(err, ErrMissingRequired) { - t.Fatalf("expected missing required error") - } - }) - - t.Run("root certificate authority is nil", func(t *testing.T) { - cert := &LeafCertificate{ - CommonName: commonName, - } - cert.generateCertificate = generateLeafCertificate - cert.generateKey = generateKey - cert.generateSerialNumber = generateSerialNumber - - if err := cert.Generate(nil); !errors.Is(err, ErrInvalidCertificateAuthority) { - t.Log(err) - } - }) - - t.Run("root certificate authority has no private key", func(t *testing.T) { - cert := &LeafCertificate{ - CommonName: commonName, - generateCertificate: generateLeafCertificate, - generateKey: generateKey, - generateSerialNumber: generateSerialNumber, - } - rootCA := NewRootCertificateAuthority() - rootCA.PrivateKey = nil - - if err := cert.Generate(rootCA); !errors.Is(err, ErrInvalidCertificateAuthority) { - t.Fatalf("expected invalid certificate authority") - } - }) - - t.Run("root certificate authority has no certificate", func(t *testing.T) { - cert := &LeafCertificate{ - CommonName: commonName, - generateCertificate: generateLeafCertificate, - generateKey: generateKey, - generateSerialNumber: generateSerialNumber, - } - rootCA := NewRootCertificateAuthority() - if err := rootCA.Generate(); err != nil { - t.Fatalf("root certificate authority could not be generated") - } - rootCA.Certificate = nil - - if err := cert.Generate(rootCA); !errors.Is(err, ErrInvalidCertificateAuthority) { - t.Fatalf("expected invalid certificate authority") - } - }) - - t.Run("root certificate authority has invalid certificate", func(t *testing.T) { - cert := &LeafCertificate{ - CommonName: commonName, - generateCertificate: generateLeafCertificate, - generateKey: generateKey, - generateSerialNumber: generateSerialNumber, - } - rootCA := NewRootCertificateAuthority() - if err := rootCA.Generate(); err != nil { - t.Fatalf("root certificate authority could not be generated") - } - rootCA.Certificate.Certificate = []byte{} - - if err := cert.Generate(rootCA); err == nil { - t.Fatalf("expected certificate parsing error") - } - }) - - t.Run("cannot generate private key", func(t *testing.T) { - msg := "cannot generate private key" - cert := &LeafCertificate{ - CommonName: commonName, - generateCertificate: generateLeafCertificate, - generateKey: func() (*ecdsa.PrivateKey, error) { return nil, errors.New(msg) }, - generateSerialNumber: generateSerialNumber, - } - - if err := cert.Generate(rootCA); err.Error() != msg { - t.Fatalf("expected error: %s", msg) - } - }) - - t.Run("cannot generate serial number", func(t *testing.T) { - msg := "cannot generate serial number" - cert := &LeafCertificate{ - CommonName: commonName, - generateCertificate: generateLeafCertificate, - generateKey: generateKey, - generateSerialNumber: func() (*big.Int, error) { return nil, errors.New(msg) }, - } - - if err := cert.Generate(rootCA); err.Error() != msg { - t.Fatalf("expected error: %s", msg) - } - }) - - t.Run("cannot generate certificate", func(t *testing.T) { - msg := "cannot generate certificate" - cert := &LeafCertificate{ - CommonName: commonName, - generateCertificate: func(*ecdsa.PrivateKey, *big.Int, *RootCertificateAuthority, string, []string, []net.IP) ([]byte, error) { - return nil, errors.New(msg) - }, - generateKey: generateKey, - generateSerialNumber: generateSerialNumber, - } - - if err := cert.Generate(rootCA); err.Error() != msg { - t.Fatalf("expected error: %s", msg) - } - }) - }) - }) -} - -func TestNewLeafCertificate(t *testing.T) { - namespace := "pgo-test" - commonName := "hippo." + namespace - dnsNames := []string{commonName} - cert := NewLeafCertificate(commonName, dnsNames, []net.IP{}) - - if cert.CommonName != commonName { - t.Fatalf("expected commonName to be %s, actual %s", commonName, cert.CommonName) - } - - if !reflect.DeepEqual(cert.DNSNames, dnsNames) { - t.Fatalf("expected dnsNames to be %v, actual %v", dnsNames, cert.DNSNames) - } - - if cert.generateCertificate == nil { - t.Fatalf("expected generateCertificate to be set, got nil") - } - - if cert.generateKey == nil { - t.Fatalf("expected generateKey to be set, got nil") - } - - if cert.generateSerialNumber == nil { - t.Fatalf("expected generateSerialNumber to be set, got nil") - } - - // run generate to ensure it sets valid values...which means - // generating a root certificate - rootCA := NewRootCertificateAuthority() - if err := rootCA.Generate(); err != nil { - t.Fatalf("root certificate authority could not be generated") - } - - // ok...let's see if this works - if err := cert.Generate(rootCA); err != nil { - t.Fatalf("expected generate to return no errors, got: %s", err.Error()) - } - - // ensure private key and certificate are set - if cert.PrivateKey == nil { - t.Fatalf("expected private key to be set") - } - - if cert.Certificate == nil { - t.Fatalf("expected certificate to be set") - } -} - -func TestLeafCertIsBad(t *testing.T) { - ctx := context.Background() - testRoot, err := newTestRoot() - assert.NilError(t, err) - - namespace := "pgo-test" - commonName := "hippo." + namespace - dnsNames := []string{commonName, "hippo." + namespace + ".svc"} - ipAddresses := []net.IP{net.ParseIP("127.0.0.1")} - - testLeaf := &LeafCertificate{ - CommonName: commonName, - DNSNames: dnsNames, - IPAddresses: ipAddresses, - generateCertificate: generateLeafCertificate, - generateKey: generateKey, - generateSerialNumber: generateSerialNumber, - } - - // run generate to ensure it sets valid values - err = testLeaf.Generate(testRoot) - assert.NilError(t, err) - - t.Run("leaf cert is good", func(t *testing.T) { - - assert.Assert(t, !LeafCertIsBad(ctx, testLeaf, testRoot, namespace)) - }) - - t.Run("leaf cert is empty", func(t *testing.T) { - - emptyLeaf := &LeafCertificate{} - assert.Assert(t, LeafCertIsBad(ctx, emptyLeaf, testRoot, namespace)) - }) - - t.Run("error parsing root certificate", func(t *testing.T) { - testRoot.Certificate = &Certificate{ - Certificate: []byte("notacert"), - } - - assert.Assert(t, LeafCertIsBad(ctx, testLeaf, testRoot, namespace)) - }) - - t.Run("error parsing leaf certificate", func(t *testing.T) { - - testRoot2, err := newTestRoot() - assert.NilError(t, err) - - testLeaf.Certificate = &Certificate{ - Certificate: []byte("notacert"), - } - - assert.Assert(t, LeafCertIsBad(ctx, testLeaf, testRoot2, namespace)) - }) - - t.Run("leaf with invalid constraint", func(t *testing.T) { - - testRoot3, err := newTestRoot() - assert.NilError(t, err) - - badLeaf := &LeafCertificate{ - CommonName: commonName, - DNSNames: dnsNames, - IPAddresses: ipAddresses, - generateCertificate: generateLeafCertificateInvalidConstraint, - generateKey: generateKey, - generateSerialNumber: generateSerialNumber, - } - - // run generate to ensure it sets valid values - err = badLeaf.Generate(testRoot3) - assert.NilError(t, err) - - assert.Assert(t, LeafCertIsBad(ctx, badLeaf, testRoot3, namespace)) - - }) - - t.Run("leaf is a expired", func(t *testing.T) { - - testRoot3, err := newTestRoot() - assert.NilError(t, err) - - badLeaf := &LeafCertificate{ - CommonName: commonName, - DNSNames: dnsNames, - IPAddresses: ipAddresses, - generateCertificate: generateLeafCertificateExpired, - generateKey: generateKey, - generateSerialNumber: generateSerialNumber, - } - - // run generate to ensure it sets valid values - err = badLeaf.Generate(testRoot3) - assert.NilError(t, err) - - assert.Assert(t, LeafCertIsBad(ctx, badLeaf, testRoot3, namespace)) - - }) -} - -// generateLeafCertificateInvalidConstraint creates a x509 certificate with BasicConstraintsValid set to false -func generateLeafCertificateInvalidConstraint(privateKey *ecdsa.PrivateKey, serialNumber *big.Int, - rootCA *RootCertificateAuthority, commonName string, dnsNames []string, ipAddresses []net.IP) ([]byte, error) { - // first, ensure that the root certificate can be turned into a x509 - // Certificate object so it can be used as the parent certificate when - // generating - if rootCA == nil || rootCA.Certificate == nil || rootCA.PrivateKey == nil { - return nil, fmt.Errorf("%w: root certificate authority needs to be generated", - ErrInvalidCertificateAuthority) - } - - parent, err := x509.ParseCertificate(rootCA.Certificate.Certificate) - - if err != nil { - return nil, err - } - - // prepare the certificate. set the validity time to the predefined range - now := time.Now() - template := &x509.Certificate{ - BasicConstraintsValid: false, - DNSNames: dnsNames, - IPAddresses: ipAddresses, - KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment, - NotBefore: now.Add(beforeInterval), - NotAfter: now.Add(defaultCertificateExpiration), - SerialNumber: serialNumber, - SignatureAlgorithm: certificateSignatureAlgorithm, - Subject: pkix.Name{ - CommonName: commonName, - }, - } - - // create the leaf certificate and sign it using the root CA - return x509.CreateCertificate(rand.Reader, template, parent, - privateKey.Public(), rootCA.PrivateKey.PrivateKey) -} - -// generateLeafCertificateExpired creates a x509 certificate that is expired -func generateLeafCertificateExpired(privateKey *ecdsa.PrivateKey, serialNumber *big.Int, - rootCA *RootCertificateAuthority, commonName string, dnsNames []string, ipAddresses []net.IP) ([]byte, error) { - // first, ensure that the root certificate can be turned into a x509 - // Certificate object so it can be used as the parent certificate when - // generating - if rootCA == nil || rootCA.Certificate == nil || rootCA.PrivateKey == nil { - return nil, fmt.Errorf("%w: root certificate authority needs to be generated", - ErrInvalidCertificateAuthority) - } - - parent, err := x509.ParseCertificate(rootCA.Certificate.Certificate) - - if err != nil { - return nil, err - } - - // prepare the certificate. set the validity time to the predefined range - now := time.Now() - template := &x509.Certificate{ - BasicConstraintsValid: true, - DNSNames: dnsNames, - IPAddresses: ipAddresses, - KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment, - NotBefore: now.Add(beforeInterval), - NotAfter: now.Add(beforeInterval), // not after an hour ago, i.e. expired - SerialNumber: serialNumber, - SignatureAlgorithm: certificateSignatureAlgorithm, - Subject: pkix.Name{ - CommonName: commonName, - }, - } - - // create the leaf certificate and sign it using the root CA - return x509.CreateCertificate(rand.Reader, template, parent, - privateKey.Public(), rootCA.PrivateKey.PrivateKey) -} diff --git a/internal/pki/pki.go b/internal/pki/pki.go new file mode 100644 index 0000000000..7048810654 --- /dev/null +++ b/internal/pki/pki.go @@ -0,0 +1,220 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package pki + +import ( + "crypto/ecdsa" + "crypto/x509" + "math/big" + "time" +) + +const renewalRatio = 3 + +// Certificate represents an X.509 certificate that conforms to the Internet +// PKI Profile, RFC 5280. +type Certificate struct{ x509 *x509.Certificate } + +// PrivateKey represents the private key of a Certificate. +type PrivateKey struct{ ecdsa *ecdsa.PrivateKey } + +// Equal reports whether c and other have the same value. +func (c Certificate) Equal(other Certificate) bool { + return c.x509.Equal(other.x509) +} + +// CommonName returns a copy of the certificate common name (ASN.1 OID 2.5.4.3). +func (c Certificate) CommonName() string { + if c.x509 == nil { + return "" + } + return c.x509.Subject.CommonName +} + +// DNSNames returns a copy of the certificate subject alternative names +// (ASN.1 OID 2.5.29.17) that are DNS names. +func (c Certificate) DNSNames() []string { + if c.x509 == nil || len(c.x509.DNSNames) == 0 { + return nil + } + return append([]string{}, c.x509.DNSNames...) +} + +// hasSubject checks that c has these values in its subject. +func (c Certificate) hasSubject(commonName string, dnsNames []string) bool { + ok := c.x509 != nil && + c.x509.Subject.CommonName == commonName && + len(c.x509.DNSNames) == len(dnsNames) + + for i := range dnsNames { + ok = ok && c.x509.DNSNames[i] == dnsNames[i] + } + + return ok +} + +// Equal reports whether k and other have the same value. +func (k PrivateKey) Equal(other PrivateKey) bool { + if k.ecdsa == nil || other.ecdsa == nil { + return k.ecdsa == other.ecdsa + } + return k.ecdsa.Equal(other.ecdsa) +} + +// LeafCertificate is a certificate and private key pair that can be validated +// by RootCertificateAuthority. +type LeafCertificate struct { + Certificate Certificate + PrivateKey PrivateKey +} + +// RootCertificateAuthority is a certificate and private key pair that can +// generate other certificates. +type RootCertificateAuthority struct { + Certificate Certificate + PrivateKey PrivateKey +} + +// NewRootCertificateAuthority generates a new key and self-signed certificate +// for issuing other certificates. +func NewRootCertificateAuthority() (*RootCertificateAuthority, error) { + var root RootCertificateAuthority + var serial *big.Int + + key, err := generateKey() + if err == nil { + serial, err = generateSerialNumber() + } + if err == nil { + root.PrivateKey.ecdsa = key + root.Certificate.x509, err = generateRootCertificate(key, serial) + } + + return &root, err +} + +// RootIsValid checks if root is valid according to this package's policies. +func RootIsValid(root *RootCertificateAuthority) bool { + if root == nil || root.Certificate.x509 == nil { + return false + } + + trusted := x509.NewCertPool() + trusted.AddCert(root.Certificate.x509) + + // Verify the certificate expiration, basic constraints, key usages, and + // critical extensions. Trust the certificate as an authority so it is not + // compared to system roots or sent to the platform certificate verifier. + _, err := root.Certificate.x509.Verify(x509.VerifyOptions{ + Roots: trusted, + }) + + // Its expiration, key usages, and critical extensions are good. + ok := err == nil + + // It is an authority with the Subject Key Identifier extension. + // The "crypto/x509" package adds the extension automatically since Go 1.15. + // - https://tools.ietf.org/html/rfc5280#section-4.2.1.2 + // - https://go.dev/doc/go1.15#crypto/x509 + ok = ok && + root.Certificate.x509.BasicConstraintsValid && + root.Certificate.x509.IsCA && + len(root.Certificate.x509.SubjectKeyId) > 0 + + // It is signed by this private key. + ok = ok && + root.PrivateKey.ecdsa != nil && + root.PrivateKey.ecdsa.PublicKey.Equal(root.Certificate.x509.PublicKey) + + return ok +} + +// GenerateLeafCertificate generates a new key and certificate signed by root. +func (root *RootCertificateAuthority) GenerateLeafCertificate( + commonName string, dnsNames []string, +) (*LeafCertificate, error) { + var leaf LeafCertificate + var serial *big.Int + + key, err := generateKey() + if err == nil { + serial, err = generateSerialNumber() + } + if err == nil { + leaf.PrivateKey.ecdsa = key + leaf.Certificate.x509, err = generateLeafCertificate( + root.Certificate.x509, root.PrivateKey.ecdsa, &key.PublicKey, serial, + commonName, dnsNames) + } + + return &leaf, err +} + +// leafIsValid checks if leaf is valid according to this package's policies and +// is signed by root. +func (root *RootCertificateAuthority) leafIsValid(leaf *LeafCertificate) bool { + if root == nil || root.Certificate.x509 == nil { + return false + } + if leaf == nil || leaf.Certificate.x509 == nil { + return false + } + + trusted := x509.NewCertPool() + trusted.AddCert(root.Certificate.x509) + + // Go 1.10 enforces name constraints for all names in the certificate. + // Go 1.15 does not enforce name constraints on the CommonName field. + // - https://go.dev/doc/go1.10#crypto/x509 + // - https://go.dev/doc/go1.15#commonname + _, err := leaf.Certificate.x509.Verify(x509.VerifyOptions{ + Roots: trusted, + }) + + // Its expiration, name constraints, key usages, and critical extensions are good. + ok := err == nil + + // It is not an authority. + ok = ok && + leaf.Certificate.x509.BasicConstraintsValid && + !leaf.Certificate.x509.IsCA + + // It is signed by this private key. + ok = ok && + leaf.PrivateKey.ecdsa != nil && + leaf.PrivateKey.ecdsa.PublicKey.Equal(leaf.Certificate.x509.PublicKey) + + // It is not yet past the "renewal by" time, + // as defined by the before and after times of the certificate's expiration + // and the default ratio + ok = ok && isBeforeRenewalTime(leaf.Certificate.x509.NotBefore, + leaf.Certificate.x509.NotAfter) + + return ok +} + +// isBeforeRenewalTime checks if the result of `currentTime` +// is after the default renewal time of +// 1/3rds before the certificate's expiry +func isBeforeRenewalTime(before, after time.Time) bool { + renewalDuration := after.Sub(before) / renewalRatio + renewalTime := after.Add(-1 * renewalDuration) + return currentTime().Before(renewalTime) +} + +// RegenerateLeafWhenNecessary returns leaf when it is valid according to this +// package's policies, signed by root, and has commonName and dnsNames in its +// subject. Otherwise, it returns a new key and certificate signed by root. +func (root *RootCertificateAuthority) RegenerateLeafWhenNecessary( + leaf *LeafCertificate, commonName string, dnsNames []string, +) (*LeafCertificate, error) { + ok := root.leafIsValid(leaf) && + leaf.Certificate.hasSubject(commonName, dnsNames) + + if ok { + return leaf, nil + } + return root.GenerateLeafCertificate(commonName, dnsNames) +} diff --git a/internal/pki/pki_test.go b/internal/pki/pki_test.go index 174784f2ba..cd13896450 100644 --- a/internal/pki/pki_test.go +++ b/internal/pki/pki_test.go @@ -1,110 +1,448 @@ -package pki - -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +package pki import ( + "crypto/ecdsa" "crypto/x509" - "io/ioutil" - "net" + "os" "os/exec" "path/filepath" "strings" "testing" + "time" "gotest.tools/v3/assert" + + "github.com/crunchydata/postgres-operator/internal/testing/require" ) -// TestPKI does a full test of generating a valid certificate chain -func TestPKI(t *testing.T) { - // generate the root CA - rootCA := NewRootCertificateAuthority() - if err := rootCA.Generate(); err != nil { - t.Fatalf("root certificate authority could not be generated") - } +type StringSet map[string]struct{} - // generate the leaf CA - namespace := "pgo-test" - commonName := "hippo." + namespace - dnsNames := []string{commonName} - cert := NewLeafCertificate(commonName, dnsNames, []net.IP{}) - if err := cert.Generate(rootCA); err != nil { - t.Fatalf("leaf certificate could not be generated") - } +func (s StringSet) Has(item string) bool { _, ok := s[item]; return ok } +func (s StringSet) Insert(item string) { s[item] = struct{}{} } - // OK, test if we can verify the validity of the leaf certificate - rootCertificate, err := x509.ParseCertificate(rootCA.Certificate.Certificate) - if err != nil { - t.Fatalf("could not parse root certificate: %s", err.Error()) - } +func TestCertificateCommonName(t *testing.T) { + zero := Certificate{} + assert.Assert(t, zero.CommonName() == "") +} - certificate, err := x509.ParseCertificate(cert.Certificate.Certificate) - if err != nil { - t.Fatalf("could not parse leaf certificate: %s", err.Error()) - } +func TestCertificateDNSNames(t *testing.T) { + zero := Certificate{} + assert.Assert(t, zero.DNSNames() == nil) +} - opts := x509.VerifyOptions{ - DNSName: commonName, - Roots: x509.NewCertPool(), - } - opts.Roots.AddCert(rootCertificate) +func TestCertificateHasSubject(t *testing.T) { + zero := Certificate{} - if _, err := certificate.Verify(opts); err != nil { - t.Fatalf("could not verify certificate: %s", err.Error()) + // The zero value has no subject. + for _, cn := range []string{"", "any"} { + for _, dns := range [][]string{nil, {}, {"any"}} { + assert.Assert(t, !zero.hasSubject(cn, dns), "for (%q, %q)", cn, dns) + } } } -func TestPKIOpenSSL(t *testing.T) { - openssl, err := exec.LookPath("openssl") - if err != nil { - t.Skip(`requires "openssl" executable`) - } else { - output, err := exec.Command(openssl, "version", "-a").CombinedOutput() +func TestCertificateEqual(t *testing.T) { + zero := Certificate{} + assert.Assert(t, zero.Equal(zero)) + + root, err := NewRootCertificateAuthority() + assert.NilError(t, err) + assert.Assert(t, root.Certificate.Equal(root.Certificate)) + + assert.Assert(t, !root.Certificate.Equal(zero)) + assert.Assert(t, !zero.Equal(root.Certificate)) + + other, err := NewRootCertificateAuthority() + assert.NilError(t, err) + assert.Assert(t, !root.Certificate.Equal(other.Certificate)) + + // DeepEqual calls the Equal method, so no cmp.Option are necessary. + assert.DeepEqual(t, zero, zero) + assert.DeepEqual(t, root.Certificate, root.Certificate) +} + +func TestPrivateKeyEqual(t *testing.T) { + zero := PrivateKey{} + assert.Assert(t, zero.Equal(zero)) + + root, err := NewRootCertificateAuthority() + assert.NilError(t, err) + assert.Assert(t, root.PrivateKey.Equal(root.PrivateKey)) + + assert.Assert(t, !root.PrivateKey.Equal(zero)) + assert.Assert(t, !zero.Equal(root.PrivateKey)) + + other, err := NewRootCertificateAuthority() + assert.NilError(t, err) + assert.Assert(t, !root.PrivateKey.Equal(other.PrivateKey)) + + // DeepEqual calls the Equal method, so no cmp.Option are necessary. + assert.DeepEqual(t, zero, zero) + assert.DeepEqual(t, root.PrivateKey, root.PrivateKey) +} + +func TestRootCertificateAuthority(t *testing.T) { + root, err := NewRootCertificateAuthority() + assert.NilError(t, err) + assert.Assert(t, root != nil) + + cert := root.Certificate.x509 + assert.Assert(t, RootIsValid(root), "got %#v", cert) + + assert.DeepEqual(t, cert.Issuer, cert.Subject) // self-signed + assert.Assert(t, cert.BasicConstraintsValid && cert.IsCA) // authority + assert.Assert(t, time.Now().After(cert.NotBefore), "early, got %v", cert.NotBefore) + assert.Assert(t, time.Now().Before(cert.NotAfter), "expired, got %v", cert.NotAfter) + + assert.Equal(t, cert.MaxPathLen, 0) + assert.Equal(t, cert.PublicKeyAlgorithm, x509.ECDSA) + assert.Equal(t, cert.SignatureAlgorithm, x509.ECDSAWithSHA384) + assert.Equal(t, cert.Subject.CommonName, "postgres-operator-ca") + assert.Equal(t, cert.KeyUsage, x509.KeyUsageCertSign|x509.KeyUsageCRLSign) + + assert.Assert(t, cert.DNSNames == nil) + assert.Assert(t, cert.EmailAddresses == nil) + assert.Assert(t, cert.IPAddresses == nil) + assert.Assert(t, cert.URIs == nil) + + // The Subject Key Identifier extension is necessary on CAs. + // The "crypto/x509" package adds it automatically since Go 1.15. + // - https://tools.ietf.org/html/rfc5280#section-4.2.1.2 + // - https://go.dev/doc/go1.15#crypto/x509 + assert.Assert(t, len(cert.SubjectKeyId) > 0) + + // The Subject field must be populated on CAs. + // - https://tools.ietf.org/html/rfc5280#section-4.1.2.6 + assert.Assert(t, len(cert.Subject.Names) > 0) + + root2, err := NewRootCertificateAuthority() + assert.NilError(t, err) + assert.Assert(t, root2 != nil) + + cert2 := root2.Certificate.x509 + assert.Assert(t, RootIsValid(root2), "got %#v", cert2) + + assert.Assert(t, cert2.SerialNumber.Cmp(cert.SerialNumber) != 0, "new serial") + assert.Assert(t, !cert2.PublicKey.(*ecdsa.PublicKey).Equal(cert.PublicKey), "new key") + + // The root certificate cannot be verified independently by OpenSSL because + // it is self-signed. OpenSSL does perform some checks when it is part of + // a proper chain in [TestLeafCertificate]. +} + +func TestRootIsInvalid(t *testing.T) { + t.Run("NoCertificate", func(t *testing.T) { + assert.Assert(t, !RootIsValid(nil)) + assert.Assert(t, !RootIsValid(&RootCertificateAuthority{})) + + root, err := NewRootCertificateAuthority() + assert.NilError(t, err) + + root.Certificate = Certificate{} + assert.Assert(t, !RootIsValid(root)) + }) + + t.Run("NoPrivateKey", func(t *testing.T) { + root, err := NewRootCertificateAuthority() + assert.NilError(t, err) + + root.PrivateKey = PrivateKey{} + assert.Assert(t, !RootIsValid(root)) + }) + + t.Run("WrongPrivateKey", func(t *testing.T) { + root, err := NewRootCertificateAuthority() + assert.NilError(t, err) + + other, err := NewRootCertificateAuthority() + assert.NilError(t, err) + + root.PrivateKey = other.PrivateKey + assert.Assert(t, !RootIsValid(root)) + }) + + t.Run("NotAuthority", func(t *testing.T) { + root, err := NewRootCertificateAuthority() + assert.NilError(t, err) + + leaf, err := root.GenerateLeafCertificate("", nil) + assert.NilError(t, err) + + assert.Assert(t, !RootIsValid((*RootCertificateAuthority)(leaf))) + }) + + t.Run("TooEarly", func(t *testing.T) { + original := currentTime + t.Cleanup(func() { currentTime = original }) + + currentTime = func() time.Time { + return time.Now().Add(time.Hour * 24) // tomorrow + } + + root, err := NewRootCertificateAuthority() + assert.NilError(t, err) + + assert.Assert(t, !RootIsValid(root)) + }) + + t.Run("Expired", func(t *testing.T) { + original := currentTime + t.Cleanup(func() { currentTime = original }) + + currentTime = func() time.Time { + return time.Date(2010, time.January, 1, 0, 0, 0, 0, time.Local) + } + + root, err := NewRootCertificateAuthority() assert.NilError(t, err) - t.Logf("using %q:\n%s", openssl, output) + + assert.Assert(t, !RootIsValid(root)) + }) +} + +func TestLeafCertificate(t *testing.T) { + serials := StringSet{} + root, err := NewRootCertificateAuthority() + assert.NilError(t, err) + + for _, tt := range []struct { + test string + commonName string + dnsNames []string + }{ + { + test: "OnlyCommonName", commonName: "some-cn", + }, + { + test: "OnlyDNSNames", dnsNames: []string{"local-name", "sub.domain"}, + }, + } { + t.Run(tt.test, func(t *testing.T) { + leaf, err := root.GenerateLeafCertificate(tt.commonName, tt.dnsNames) + assert.NilError(t, err) + assert.Assert(t, leaf != nil) + + cert := leaf.Certificate.x509 + assert.Assert(t, root.leafIsValid(leaf), "got %#v", cert) + + number := cert.SerialNumber.String() + assert.Assert(t, !serials.Has(number)) + serials.Insert(number) + + assert.Equal(t, cert.Issuer.CommonName, "postgres-operator-ca") + assert.Assert(t, cert.BasicConstraintsValid && !cert.IsCA) + assert.Assert(t, time.Now().After(cert.NotBefore), "early, got %v", cert.NotBefore) + assert.Assert(t, time.Now().Before(cert.NotAfter), "expired, got %v", cert.NotAfter) + + assert.Equal(t, cert.PublicKeyAlgorithm, x509.ECDSA) + assert.Equal(t, cert.SignatureAlgorithm, x509.ECDSAWithSHA384) + assert.Equal(t, cert.KeyUsage, x509.KeyUsageDigitalSignature|x509.KeyUsageKeyEncipherment) + + assert.Equal(t, cert.Subject.CommonName, tt.commonName) + assert.DeepEqual(t, cert.DNSNames, tt.dnsNames) + assert.Assert(t, cert.EmailAddresses == nil) + assert.Assert(t, cert.IPAddresses == nil) + assert.Assert(t, cert.URIs == nil) + + // CAs must include the Authority Key Identifier on new certificates. + // The "crypto/x509" package adds it automatically since Go 1.15. + // - https://tools.ietf.org/html/rfc5280#section-4.2.1.1 + // - https://go.dev/doc/go1.15#crypto/x509 + assert.DeepEqual(t, + leaf.Certificate.x509.AuthorityKeyId, + root.Certificate.x509.SubjectKeyId) + + // CAs must include their entire Subject on new certificates. + // - https://tools.ietf.org/html/rfc5280#section-4.1.2.6 + assert.DeepEqual(t, + leaf.Certificate.x509.Issuer, + root.Certificate.x509.Subject) + + t.Run("OpenSSLVerify", func(t *testing.T) { + openssl := require.OpenSSL(t) + + t.Run("Basic", func(t *testing.T) { + basicOpenSSLVerify(t, openssl, root.Certificate, leaf.Certificate) + }) + + t.Run("Strict", func(t *testing.T) { + strictOpenSSLVerify(t, openssl, root.Certificate, leaf.Certificate) + }) + }) + + t.Run("Subject", func(t *testing.T) { + assert.Equal(t, + leaf.Certificate.CommonName(), tt.commonName) + assert.DeepEqual(t, + leaf.Certificate.DNSNames(), tt.dnsNames) + assert.Assert(t, + leaf.Certificate.hasSubject(tt.commonName, tt.dnsNames)) + + for _, other := range []struct { + test string + commonName string + dnsNames []string + }{ + { + test: "DifferentCommonName", + commonName: "other", + dnsNames: tt.dnsNames, + }, + { + test: "DifferentDNSNames", + commonName: tt.commonName, + dnsNames: []string{"other"}, + }, + { + test: "DNSNameSubset", + commonName: tt.commonName, + dnsNames: []string{"local-name"}, + }, + } { + assert.Assert(t, + !leaf.Certificate.hasSubject(other.commonName, other.dnsNames)) + } + }) + }) } +} + +func TestLeafIsInvalid(t *testing.T) { + root, err := NewRootCertificateAuthority() + assert.NilError(t, err) + + t.Run("ZeroRoot", func(t *testing.T) { + zero := RootCertificateAuthority{} + assert.Assert(t, !zero.leafIsValid(nil)) + + leaf, err := root.GenerateLeafCertificate("", nil) + assert.NilError(t, err) + + assert.Assert(t, !zero.leafIsValid(leaf)) + }) + + t.Run("NoCertificate", func(t *testing.T) { + assert.Assert(t, !root.leafIsValid(nil)) + assert.Assert(t, !root.leafIsValid(&LeafCertificate{})) + + leaf, err := root.GenerateLeafCertificate("", nil) + assert.NilError(t, err) + + leaf.Certificate = Certificate{} + assert.Assert(t, !root.leafIsValid(leaf)) + }) + + t.Run("NoPrivateKey", func(t *testing.T) { + leaf, err := root.GenerateLeafCertificate("", nil) + assert.NilError(t, err) + + leaf.PrivateKey = PrivateKey{} + assert.Assert(t, !root.leafIsValid(leaf)) + }) + + t.Run("WrongPrivateKey", func(t *testing.T) { + leaf, err := root.GenerateLeafCertificate("", nil) + assert.NilError(t, err) + + other, err := root.GenerateLeafCertificate("", nil) + assert.NilError(t, err) + + leaf.PrivateKey = other.PrivateKey + assert.Assert(t, !root.leafIsValid(leaf)) + }) + + t.Run("IsAuthority", func(t *testing.T) { + assert.Assert(t, !root.leafIsValid((*LeafCertificate)(root))) + }) + + t.Run("TooEarly", func(t *testing.T) { + original := currentTime + t.Cleanup(func() { currentTime = original }) + + currentTime = func() time.Time { + return time.Now().Add(time.Hour * 24) // tomorrow + } - rootCA := NewRootCertificateAuthority() - assert.NilError(t, rootCA.Generate()) + leaf, err := root.GenerateLeafCertificate("", nil) + assert.NilError(t, err) - namespace := "pgo-test" - commonName := "hippo." + namespace - dnsNames := []string{commonName} - leaf := NewLeafCertificate(commonName, dnsNames, []net.IP{}) - assert.NilError(t, leaf.Generate(rootCA)) + assert.Assert(t, !root.leafIsValid(leaf)) + }) + + t.Run("PastRenewalTime", func(t *testing.T) { + // Generate a cert with the default valid times, + // e.g., 1 hour before now until 1 year from now + leaf, err := root.GenerateLeafCertificate("", nil) + assert.NilError(t, err) - basicOpenSSLVerify(t, openssl, - rootCA.Certificate, leaf.Certificate) + // set the time now to be over 2/3rds of a year for checking + original := currentTime + t.Cleanup(func() { currentTime = original }) - t.Run("strict", func(t *testing.T) { - output, _ := exec.Command(openssl, "verify", "-help").CombinedOutput() - if !strings.Contains(string(output), "-x509_strict") { - t.Skip(`requires "-x509_strict" flag`) + currentTime = func() time.Time { + return time.Now().Add(time.Hour * 24 * 330) } - strictOpenSSLVerify(t, openssl, rootCA.Certificate, leaf.Certificate) + assert.Assert(t, !root.leafIsValid(leaf)) }) + + t.Run("Expired", func(t *testing.T) { + original := currentTime + t.Cleanup(func() { currentTime = original }) + + currentTime = func() time.Time { + return time.Date(2010, time.January, 1, 0, 0, 0, 0, time.Local) + } + + leaf, err := root.GenerateLeafCertificate("", nil) + assert.NilError(t, err) + + assert.Assert(t, !root.leafIsValid(leaf)) + }) +} + +func TestIsBeforeRenewalTime(t *testing.T) { + oneHourAgo := time.Now().Add(-1 * time.Hour) + twoHoursInTheFuture := time.Now().Add(2 * time.Hour) + + assert.Assert(t, isBeforeRenewalTime(oneHourAgo, twoHoursInTheFuture)) + + sixHoursAgo := time.Now().Add(-6 * time.Hour) + assert.Assert(t, !isBeforeRenewalTime(sixHoursAgo, twoHoursInTheFuture)) } -func basicOpenSSLVerify(t *testing.T, openssl string, root, leaf *Certificate) { +func TestRegenerateLeaf(t *testing.T) { + root, err := NewRootCertificateAuthority() + assert.NilError(t, err) + + before, err := root.GenerateLeafCertificate("before", nil) + assert.NilError(t, err) + + // Leaf is the same when the subject is the same. + same, err := root.RegenerateLeafWhenNecessary(before, "before", nil) + assert.NilError(t, err) + assert.DeepEqual(t, same, before) + + after, err := root.RegenerateLeafWhenNecessary(before, "after", nil) + assert.NilError(t, err) + assert.DeepEqual(t, same, before) // Argument does not change. + + assert.Assert(t, after.Certificate.hasSubject("after", nil)) + assert.Assert(t, !after.Certificate.Equal(before.Certificate)) +} + +func basicOpenSSLVerify(t *testing.T, openssl string, root, leaf Certificate) { verify := func(t testing.TB, args ...string) { t.Helper() - args = append([]string{"verify"}, args...) + // #nosec G204 -- args from this test + cmd := exec.Command(openssl, append([]string{"verify"}, args...)...) - output, err := exec.Command(openssl, args...).CombinedOutput() - assert.NilError(t, err, "%q\n%s", append([]string{openssl}, args...), output) + output, err := cmd.CombinedOutput() + assert.NilError(t, err, "%q\n%s", cmd.Args, output) } dir := t.TempDir() @@ -112,7 +450,7 @@ func basicOpenSSLVerify(t *testing.T, openssl string, root, leaf *Certificate) { rootFile := filepath.Join(dir, "root.crt") rootBytes, err := root.MarshalText() assert.NilError(t, err) - assert.NilError(t, ioutil.WriteFile(rootFile, rootBytes, 0600)) + assert.NilError(t, os.WriteFile(rootFile, rootBytes, 0o600)) // The root certificate cannot be verified independently because it is self-signed. // It is checked below by being the specified CA. @@ -120,47 +458,44 @@ func basicOpenSSLVerify(t *testing.T, openssl string, root, leaf *Certificate) { leafFile := filepath.Join(dir, "leaf.crt") leafBytes, err := leaf.MarshalText() assert.NilError(t, err) - assert.NilError(t, ioutil.WriteFile(leafFile, leafBytes, 0600)) - - // Older versions of OpenSSL have fewer options for verifying certificates. - // When the only flag available is "-CAfile", CAs must be bundled - // there and are *implicitly trusted*. - // - // This brings a few considerations to be made when it comes to proper verification - // of the leaf certificate. Root certificates are self-signed and must be trusted. - // However, trusted certificate keys must be handled carefully so that they don't - // sign something untrustworthy. Intermediates provide a way to automate trust without - // exposing the root key. To accomplish this, intermediates are bundled with leaf - // certificates and usually sent together as the certificate chain during TLS handshake. - // However, as discussed here: - // https://mail.python.org/pipermail/cryptography-dev/2016-August/000676.html - // OpenSSL will stop verifying the certificate chain as soon as a root certificate is - // encountered, as intended. However, OpenSSL will do the same thing when dealing with a - // self-signed Intermediate.pem, which it treats as a root certificate. In that case, any - // following root PEM files will not be considered. Because of this, it is essential to - // ensure that any Intermediate.pem in the chain is from a trusted source before relying - // on the verification method given below. - - bundleFile := filepath.Join(dir, "ca-chain.crt") - assert.NilError(t, ioutil.WriteFile(bundleFile, rootBytes, 0600)) - - verify(t, "-CAfile", bundleFile, leafFile) - verify(t, "-CAfile", bundleFile, "-purpose", "sslclient", leafFile) - verify(t, "-CAfile", bundleFile, "-purpose", "sslserver", leafFile) + assert.NilError(t, os.WriteFile(leafFile, leafBytes, 0o600)) + + // Older versions of the "openssl verify" command cannot properly verify + // a certificate chain that contains intermediates. When the only flag + // available is "-CAfile", intermediates must be bundled there and are + // *implicitly trusted*. The [strictOpenSSLVerify] function is able to + // verify the chain properly. + // - https://mail.python.org/pipermail/cryptography-dev/2016-August/000676.html + + // TODO(cbandy): When we generate intermediate certificates, verify them + // independently then bundle them with the root to verify the leaf. + + verify(t, "-CAfile", rootFile, leafFile) + verify(t, "-CAfile", rootFile, "-purpose", "sslclient", leafFile) + verify(t, "-CAfile", rootFile, "-purpose", "sslserver", leafFile) } -func strictOpenSSLVerify(t *testing.T, openssl string, root, leaf *Certificate) { +func strictOpenSSLVerify(t *testing.T, openssl string, root, leaf Certificate) { + output, _ := exec.Command(openssl, "verify", "-help").CombinedOutput() + if !strings.Contains(string(output), "-x509_strict") { + t.Skip(`requires "-x509_strict" flag`) + } + if !strings.Contains(string(output), "-no-CAfile") { + t.Skip(`requires a flag to ignore system certificates`) + } + verify := func(t testing.TB, args ...string) { t.Helper() - args = append([]string{"verify", + // #nosec G204 -- args from this test + cmd := exec.Command(openssl, append([]string{"verify", // Do not use the default trusted CAs. "-no-CAfile", "-no-CApath", // Disable "non-compliant workarounds for broken certificates". "-x509_strict", - }, args...) + }, args...)...) - output, err := exec.Command(openssl, args...).CombinedOutput() - assert.NilError(t, err, "%q\n%s", append([]string{openssl}, args...), output) + output, err := cmd.CombinedOutput() + assert.NilError(t, err, "%q\n%s", cmd.Args, output) } dir := t.TempDir() @@ -168,7 +503,7 @@ func strictOpenSSLVerify(t *testing.T, openssl string, root, leaf *Certificate) rootFile := filepath.Join(dir, "root.crt") rootBytes, err := root.MarshalText() assert.NilError(t, err) - assert.NilError(t, ioutil.WriteFile(rootFile, rootBytes, 0600)) + assert.NilError(t, os.WriteFile(rootFile, rootBytes, 0o600)) // The root certificate cannot be verified independently because it is self-signed. // Some checks are performed when it is a "trusted" certificate below. @@ -176,7 +511,10 @@ func strictOpenSSLVerify(t *testing.T, openssl string, root, leaf *Certificate) leafFile := filepath.Join(dir, "leaf.crt") leafBytes, err := leaf.MarshalText() assert.NilError(t, err) - assert.NilError(t, ioutil.WriteFile(leafFile, leafBytes, 0600)) + assert.NilError(t, os.WriteFile(leafFile, leafBytes, 0o600)) + + // TODO(cbandy): When we generate intermediate certificates, verify them + // independently then pass them via "-untrusted" to verify the leaf. verify(t, "-trusted", rootFile, leafFile) verify(t, "-trusted", rootFile, "-purpose", "sslclient", leafFile) diff --git a/internal/pki/root.go b/internal/pki/root.go deleted file mode 100644 index 70b1851cd8..0000000000 --- a/internal/pki/root.go +++ /dev/null @@ -1,184 +0,0 @@ -// +build go1.15 - -package pki - -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -import ( - "crypto/ecdsa" - "crypto/rand" - "crypto/x509" - "crypto/x509/pkix" - "math/big" - "time" -) - -const ( - // defaultRootCAExpiration sets the default time for the root CA, which is - // placed far enough into the future - defaultRootCAExpiration = 10 * 365 * 24 * time.Hour - - // rootCAName is the name of the root CA - rootCAName = "postgres-operator-ca" -) - -// RootCertificateAuthority contains the ability to generate the necessary -// components of a root certificate authority (root CA). This includes the -// private key for the root CA as well as its certificate, which is self-signed -// (as is the nature of a root CA). -// -// In the context of the Operator, there will be one root certificate per -// namespace that contains postgresclusters managed by the Operator. -type RootCertificateAuthority struct { - // Certificate is the certificate of this certificate authority - Certificate *Certificate - - // PrivateKey is the private key portion of the certificate authority - PrivateKey *PrivateKey - - // generateKey generates an ECDSA keypair - generateKey func() (*ecdsa.PrivateKey, error) - - // generateCertificate generates a X509 certificate return in DER format - generateCertificate func(*ecdsa.PrivateKey, *big.Int) ([]byte, error) - - // generateSerialNumber creates a unique serial number to assign to the - // certificate - generateSerialNumber func() (*big.Int, error) -} - -// Generate creates a new root certificate authority -func (ca *RootCertificateAuthority) Generate() error { - // ensure functions are defined - if ca.generateKey == nil || ca.generateCertificate == nil || ca.generateSerialNumber == nil { - return ErrFunctionNotImplemented - } - - // generate a private key - if privateKey, err := ca.generateKey(); err != nil { - return err - } else { - ca.PrivateKey = NewPrivateKey(privateKey) - } - - // generate a serial number - serialNumber, err := ca.generateSerialNumber() - - if err != nil { - return err - } - - // generate a certificate - if certificate, err := ca.generateCertificate(ca.PrivateKey.PrivateKey, serialNumber); err != nil { - return err - } else { - ca.Certificate = &Certificate{Certificate: certificate} - } - - return nil -} - -// NewRootCertificateAuthority generates a new root certificate authority -// that can be used to issue leaf certificates -func NewRootCertificateAuthority() *RootCertificateAuthority { - return &RootCertificateAuthority{ - generateCertificate: generateRootCertificate, - generateKey: generateKey, - generateSerialNumber: generateSerialNumber, - } -} - -// ParseRootCertificateAuthority takes a PEM encoded private key and certificate -// representation and attempts to parse it. -func ParseRootCertificateAuthority(privateKey, certificate []byte) (*RootCertificateAuthority, error) { - var err error - ca := NewRootCertificateAuthority() - - // attempt to parse the private key - if ca.PrivateKey, err = ParsePrivateKey(privateKey); err != nil { - return nil, err - } - - // attempt to parse the certificate - if ca.Certificate, err = ParseCertificate(certificate); err != nil { - return nil, err - } - - return ca, nil -} - -// RootCAIsBad checks that at least one root CA has been generated and that -// all returned certs are CAs and not expired -// -// TODO(tjmoore4): Currently this will return 'true' if any of the parsed certs -// fail a given check. For scenarios where multiple certs may be returned, such -// as in a BYOC/BYOCA, this will need to be handled so we only generate a new -// certificate for our cert if it is the one that fails. -func RootCAIsBad(root *RootCertificateAuthority) bool { - // if the certificate or the private key are nil, the root CA is bad - if root.Certificate == nil || root.PrivateKey == nil { - return true - } - - // if there is an error parsing the root certificate or if there is not at least one certificate, - // the RootCertificateAuthority is bad - rootCerts, rootErr := x509.ParseCertificates(root.Certificate.Certificate) - - if rootErr != nil && len(rootCerts) < 1 { - return true - } - - // find our root cert in the returned slice - for _, cert := range rootCerts { - // root cert is bad if it is not a CA - if !cert.IsCA || !cert.BasicConstraintsValid { - return true - } - - // if it is outside of the certs configured valid time range - if time.Now().After(cert.NotAfter) || time.Now().Before(cert.NotBefore) { - return true - } - } - - // checks passed, cert is good - return false - -} - -// generateRootCertificate creates a x509 certificate with a ECDSA signature using -// the SHA-384 algorithm -func generateRootCertificate(privateKey *ecdsa.PrivateKey, serialNumber *big.Int) ([]byte, error) { - // prepare the certificate. set the validity time to the predefined range - now := time.Now() - template := &x509.Certificate{ - BasicConstraintsValid: true, - IsCA: true, - KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign, - MaxPathLenZero: true, // there are no intermediate certificates - NotBefore: now.Add(beforeInterval), - NotAfter: now.Add(defaultRootCAExpiration), - SerialNumber: serialNumber, - SignatureAlgorithm: certificateSignatureAlgorithm, - Subject: pkix.Name{ - CommonName: rootCAName, - }, - } - - // a root certificate has no parent, so pass in the template twice - return x509.CreateCertificate(rand.Reader, template, template, - privateKey.Public(), privateKey) -} diff --git a/internal/pki/root_test.go b/internal/pki/root_test.go deleted file mode 100644 index c9bc2c495b..0000000000 --- a/internal/pki/root_test.go +++ /dev/null @@ -1,379 +0,0 @@ -package pki - -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -import ( - "crypto/ecdsa" - "crypto/rand" - "crypto/x509" - "crypto/x509/pkix" - "errors" - "math/big" - "reflect" - "testing" - "time" - - "gotest.tools/v3/assert" -) - -func TestNewRootCertificateAuthority(t *testing.T) { - ca := NewRootCertificateAuthority() - - if ca.generateCertificate == nil { - t.Fatalf("expected generateCertificate to be set, got nil") - } - - if ca.generateKey == nil { - t.Fatalf("expected generateKey to be set, got nil") - } - - if ca.generateSerialNumber == nil { - t.Fatalf("expected generateSerialNumber to be set, got nil") - } - - // run generate to ensure it sets valid values - if err := ca.Generate(); err != nil { - t.Fatalf("expected generate to return no errors, got: %s", err.Error()) - } - - // ensure private key and certificate are set - if ca.PrivateKey == nil { - t.Fatalf("expected private key to be set") - } - - if ca.Certificate == nil { - t.Fatalf("expected certificate to be set") - } -} - -func TestParseRootCertificateAuthority(t *testing.T) { - generateRootCertificateAuthority := func() *RootCertificateAuthority { - ca := NewRootCertificateAuthority() - _ = ca.Generate() - return ca - } - - marshalCertificate := func(ca *RootCertificateAuthority) []byte { - data, _ := ca.Certificate.MarshalText() - return data - } - - marshalPrivateKey := func(ca *RootCertificateAuthority) []byte { - data, _ := ca.PrivateKey.MarshalText() - return data - } - - ca := generateRootCertificateAuthority() - - t.Run("valid plaintext", func(t *testing.T) { - privateKey := marshalPrivateKey(ca) - certificate := marshalCertificate(ca) - - rootCA, err := ParseRootCertificateAuthority(privateKey, certificate) - - if err != nil { - t.Fatalf("expected no error, actual %s", err.Error()) - } - - if !reflect.DeepEqual(ca.PrivateKey.PrivateKey, rootCA.PrivateKey.PrivateKey) { - t.Fatalf("expected private keys to match") - } - - if !reflect.DeepEqual(ca.Certificate.Certificate, rootCA.Certificate.Certificate) { - t.Fatalf("expected certificates to match") - } - }) - - t.Run("invalid", func(t *testing.T) { - t.Run("bad private key", func(t *testing.T) { - privateKey := []byte("bad") - certificate := marshalCertificate(ca) - - rootCA, err := ParseRootCertificateAuthority(privateKey, certificate) - - if err == nil { - t.Fatalf("expected error") - } - - if rootCA != nil { - t.Fatalf("expected CA to be nil") - } - }) - - t.Run("bad certificate key", func(t *testing.T) { - privateKey := marshalPrivateKey(ca) - certificate := []byte("bad") - - rootCA, err := ParseRootCertificateAuthority(privateKey, certificate) - - if err == nil { - t.Fatalf("expected error") - } - - if rootCA != nil { - t.Fatalf("expected CA to be nil") - } - }) - }) -} - -func TestRootCertificateAuthority(t *testing.T) { - t.Run("Generate", func(t *testing.T) { - t.Run("valid", func(t *testing.T) { - ca := &RootCertificateAuthority{} - ca.generateCertificate = generateRootCertificate - ca.generateKey = generateKey - ca.generateSerialNumber = generateSerialNumber - - // run generate to ensure it sets valid values - if err := ca.Generate(); err != nil { - t.Fatalf("expected generate to return no errors, got: %s", err.Error()) - } - - // ensure private key and certificate are set - if ca.PrivateKey == nil { - t.Fatalf("expected private key to be set") - } - - if ca.Certificate == nil { - t.Fatalf("expected certificate to be set") - } - - if ca.PrivateKey.PrivateKey == nil { - t.Fatalf("expected private key to be set, got nil") - } - - if len(ca.Certificate.Certificate) == 0 { - t.Fatalf("expected certificate to be generated") - } - - // see if certificate can be parsed - x509Certificate, err := x509.ParseCertificate(ca.Certificate.Certificate) - - if err != nil { - t.Fatalf("expected valid x509 ceriticate, actual %s", err.Error()) - } - - if !ca.PrivateKey.PrivateKey.PublicKey.Equal(x509Certificate.PublicKey) { - t.Fatalf("expected public keys to match") - } - - // check certain attributes - if !x509Certificate.IsCA { - t.Fatalf("expected certificate to be CA") - } - - if x509Certificate.MaxPathLenZero == false { - t.Fatalf("expected MaxPathLenZero to be set to 'true', actual %t", x509Certificate.MaxPathLenZero) - } - - if x509Certificate.Subject.CommonName != rootCAName { - t.Fatalf("expected subject name to be %s, actual %s", defaultRootCAExpiration, x509Certificate.Subject.CommonName) - } - - // ensure private key functions are set - assertConstructed(t, ca.PrivateKey) - }) - - t.Run("invalid", func(t *testing.T) { - t.Run("generate certificate not set", func(t *testing.T) { - ca := &RootCertificateAuthority{} - ca.generateCertificate = nil - ca.generateKey = generateKey - ca.generateSerialNumber = generateSerialNumber - - if err := ca.Generate(); !errors.Is(err, ErrFunctionNotImplemented) { - t.Fatalf("expected function not implemented error") - } - }) - - t.Run("generate key not set", func(t *testing.T) { - ca := &RootCertificateAuthority{} - ca.generateCertificate = generateRootCertificate - ca.generateKey = nil - ca.generateSerialNumber = generateSerialNumber - - if err := ca.Generate(); !errors.Is(err, ErrFunctionNotImplemented) { - t.Fatalf("expected function not implemented error") - } - }) - - t.Run("generate serial number not set", func(t *testing.T) { - ca := &RootCertificateAuthority{} - ca.generateCertificate = generateRootCertificate - ca.generateKey = generateKey - ca.generateSerialNumber = nil - - if err := ca.Generate(); !errors.Is(err, ErrFunctionNotImplemented) { - t.Fatalf("expected function not implemented error") - } - }) - - t.Run("cannot generate private key", func(t *testing.T) { - msg := "cannot generate private key" - ca := &RootCertificateAuthority{} - ca.generateCertificate = generateRootCertificate - ca.generateKey = func() (*ecdsa.PrivateKey, error) { return nil, errors.New(msg) } - ca.generateSerialNumber = generateSerialNumber - - if err := ca.Generate(); err.Error() != msg { - t.Fatalf("expected error: %s", msg) - } - }) - - t.Run("cannot generate serial number", func(t *testing.T) { - msg := "cannot generate serial number" - ca := &RootCertificateAuthority{} - ca.generateCertificate = generateRootCertificate - ca.generateKey = generateKey - ca.generateSerialNumber = func() (*big.Int, error) { return nil, errors.New(msg) } - - if err := ca.Generate(); err.Error() != msg { - t.Fatalf("expected error: %s", msg) - } - }) - - t.Run("cannot generate certificate", func(t *testing.T) { - msg := "cannot generate certificate" - ca := &RootCertificateAuthority{} - ca.generateCertificate = func(*ecdsa.PrivateKey, *big.Int) ([]byte, error) { return nil, errors.New(msg) } - ca.generateKey = generateKey - ca.generateSerialNumber = generateSerialNumber - - if err := ca.Generate(); err.Error() != msg { - t.Fatalf("expected error: %s", msg) - } - }) - }) - }) -} - -func TestRootCAIsBad(t *testing.T) { - rootCA, err := newTestRoot() - assert.NilError(t, err) - - t.Run("root cert is good", func(t *testing.T) { - - assert.Assert(t, !RootCAIsBad(rootCA)) - }) - - t.Run("root cert is empty", func(t *testing.T) { - - emptyRoot := &RootCertificateAuthority{} - assert.Assert(t, RootCAIsBad(emptyRoot)) - }) - - t.Run("error parsing certificate", func(t *testing.T) { - rootCA.Certificate = &Certificate{ - Certificate: []byte("notacert"), - } - - assert.Assert(t, RootCAIsBad(rootCA)) - }) - - t.Run("error is not a CA", func(t *testing.T) { - - badCa := &RootCertificateAuthority{ - generateCertificate: generateRootCertificateBadCA, - generateKey: generateKey, - generateSerialNumber: generateSerialNumber, - } - - // run generate to ensure it sets valid values - if err := badCa.Generate(); err != nil { - t.Fatalf("expected generate to return no errors, got: %s", err.Error()) - } - - assert.Assert(t, RootCAIsBad(badCa)) - - }) - - t.Run("error expired", func(t *testing.T) { - - badCa := &RootCertificateAuthority{ - generateCertificate: generateRootCertificateExpired, - generateKey: generateKey, - generateSerialNumber: generateSerialNumber, - } - - // run generate to ensure it sets valid values - if err := badCa.Generate(); err != nil { - t.Fatalf("expected generate to return no errors, got: %s", err.Error()) - } - - assert.Assert(t, RootCAIsBad(badCa)) - - }) -} - -// generateRootCertificateBadCA creates a root certificate that is not -// configured as a CA -func generateRootCertificateBadCA(privateKey *ecdsa.PrivateKey, serialNumber *big.Int) ([]byte, error) { - // prepare the certificate. set the validity time to the predefined range - now := time.Now() - template := &x509.Certificate{ - BasicConstraintsValid: true, - IsCA: false, - KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign, - NotBefore: now.Add(beforeInterval), - NotAfter: now.Add(defaultRootCAExpiration), - SerialNumber: serialNumber, - SignatureAlgorithm: certificateSignatureAlgorithm, - Subject: pkix.Name{ - CommonName: rootCAName, - }, - } - - // a root certificate has no parent, so pass in the template twice - return x509.CreateCertificate(rand.Reader, template, template, - privateKey.Public(), privateKey) -} - -// generateRootCertificateExpired creates a root certificate that is already expired -func generateRootCertificateExpired(privateKey *ecdsa.PrivateKey, serialNumber *big.Int) ([]byte, error) { - // prepare the certificate. set the validity time to the predefined range - now := time.Now() - template := &x509.Certificate{ - BasicConstraintsValid: true, - IsCA: true, - KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign, - MaxPathLenZero: true, // there are no intermediate certificates - NotBefore: now.Add(beforeInterval), - NotAfter: now.Add(beforeInterval), // not after an hour ago, i.e. expired - SerialNumber: serialNumber, - SignatureAlgorithm: certificateSignatureAlgorithm, - Subject: pkix.Name{ - CommonName: rootCAName, - }, - } - - // a root certificate has no parent, so pass in the template twice - return x509.CreateCertificate(rand.Reader, template, template, - privateKey.Public(), privateKey) -} - -// newTestRoot creates a new test root certificate -func newTestRoot() (*RootCertificateAuthority, error) { - testRoot := &RootCertificateAuthority{} - testRoot.generateCertificate = generateRootCertificate - testRoot.generateKey = generateKey - testRoot.generateSerialNumber = generateSerialNumber - - // run generate to ensure it sets valid values - err := testRoot.Generate() - - return testRoot, err -} diff --git a/internal/postgis/postgis.go b/internal/postgis/postgis.go new file mode 100644 index 0000000000..f54da0dd93 --- /dev/null +++ b/internal/postgis/postgis.go @@ -0,0 +1,42 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package postgis + +import ( + "context" + "strings" + + "github.com/crunchydata/postgres-operator/internal/logging" + "github.com/crunchydata/postgres-operator/internal/postgres" +) + +// EnableInPostgreSQL installs triggers for the following extensions into every database: +// - postgis +// - postgis_topology +// - fuzzystrmatch +// - postgis_tiger_geocoder +func EnableInPostgreSQL(ctx context.Context, exec postgres.Executor) error { + log := logging.FromContext(ctx) + + stdout, stderr, err := exec.ExecInAllDatabases(ctx, + strings.Join([]string{ + // Quiet NOTICE messages from IF NOT EXISTS statements. + // - https://www.postgresql.org/docs/current/runtime-config-client.html + `SET client_min_messages = WARNING;`, + + `CREATE EXTENSION IF NOT EXISTS postgis;`, + `CREATE EXTENSION IF NOT EXISTS postgis_topology;`, + `CREATE EXTENSION IF NOT EXISTS fuzzystrmatch;`, + `CREATE EXTENSION IF NOT EXISTS postgis_tiger_geocoder;`, + }, "\n"), + map[string]string{ + "ON_ERROR_STOP": "on", // Abort when any one statement fails. + "QUIET": "on", // Do not print successful statements to stdout. + }) + + log.V(1).Info("enabled PostGIS and related extensions", "stdout", stdout, "stderr", stderr) + + return err +} diff --git a/internal/postgis/postgis_test.go b/internal/postgis/postgis_test.go new file mode 100644 index 0000000000..5f604abc90 --- /dev/null +++ b/internal/postgis/postgis_test.go @@ -0,0 +1,42 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package postgis + +import ( + "context" + "errors" + "io" + "strings" + "testing" + + "gotest.tools/v3/assert" +) + +func TestEnableInPostgreSQL(t *testing.T) { + expected := errors.New("whoops") + exec := func( + _ context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, + ) error { + assert.Assert(t, stdout != nil, "should capture stdout") + assert.Assert(t, stderr != nil, "should capture stderr") + + assert.Assert(t, strings.Contains(strings.Join(command, "\n"), + `SELECT datname FROM pg_catalog.pg_database`, + ), "expected all databases and templates") + + b, err := io.ReadAll(stdin) + assert.NilError(t, err) + assert.Equal(t, string(b), `SET client_min_messages = WARNING; +CREATE EXTENSION IF NOT EXISTS postgis; +CREATE EXTENSION IF NOT EXISTS postgis_topology; +CREATE EXTENSION IF NOT EXISTS fuzzystrmatch; +CREATE EXTENSION IF NOT EXISTS postgis_tiger_geocoder;`) + + return expected + } + + ctx := context.Background() + assert.Equal(t, expected, EnableInPostgreSQL(ctx, exec)) +} diff --git a/internal/postgres/assertions_test.go b/internal/postgres/assertions_test.go deleted file mode 100644 index e1f20bc536..0000000000 --- a/internal/postgres/assertions_test.go +++ /dev/null @@ -1,31 +0,0 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package postgres - -import ( - "strings" - - "gotest.tools/v3/assert/cmp" - "sigs.k8s.io/yaml" -) - -func marshalMatches(actual interface{}, expected string) cmp.Comparison { - b, err := yaml.Marshal(actual) - if err != nil { - return func() cmp.Result { return cmp.ResultFromError(err) } - } - return cmp.DeepEqual(string(b), strings.Trim(expected, "\t\n")+"\n") -} diff --git a/internal/postgres/config.go b/internal/postgres/config.go index 748e0ad9c1..ce1acde3fb 100644 --- a/internal/postgres/config.go +++ b/internal/postgres/config.go @@ -1,37 +1,51 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgres import ( + "context" "fmt" "strings" corev1 "k8s.io/api/core/v1" + "github.com/crunchydata/postgres-operator/internal/config" + "github.com/crunchydata/postgres-operator/internal/feature" + "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) const ( + // bashHalt is a Bash function that prints its arguments to stderr then + // exits with a non-zero status. It uses the exit status of the prior + // command if that was not zero. + bashHalt = `halt() { local rc=$?; >&2 echo "$@"; exit "${rc/#0/1}"; }` + + // bashPermissions is a Bash function that prints the permissions of a file + // or directory and all its parent directories, except the root directory. + bashPermissions = `permissions() {` + + ` while [[ -n "$1" ]]; do set "${1%/*}" "$@"; done; shift;` + + ` stat -Lc '%A %4u %4g %n' "$@";` + + ` }` + + // bashRecreateDirectory is a Bash function that moves the contents of an + // existing directory into a newly created directory of the same name. + bashRecreateDirectory = ` +recreate() ( + local tmp; tmp=$(mktemp -d -p "${1%/*}"); GLOBIGNORE='.:..'; set -x + chmod "$2" "${tmp}"; mv "$1"/* "${tmp}"; rmdir "$1"; mv "${tmp}" "$1" +) +` + // bashSafeLink is a Bash function that moves an existing file or directory // and replaces it with a symbolic link. bashSafeLink = ` safelink() ( local desired="$1" name="$2" current current=$(realpath "${name}") - if [ "${current}" = "${desired}" ]; then return; fi + if [[ "${current}" == "${desired}" ]]; then return; fi set -x; mv --no-target-directory "${current}" "${desired}" ln --no-dereference --force --symbolic "${desired}" "${name}" ) @@ -40,15 +54,24 @@ safelink() ( // dataMountPath is where to mount the main data volume. dataMountPath = "/pgdata" + // dataMountPath is where to mount the main data volume. + tablespaceMountPath = "/tablespaces" + // walMountPath is where to mount the optional WAL volume. walMountPath = "/pgwal" + // downwardAPIPath is where to mount the downwardAPI volume. + downwardAPIPath = "/etc/database-containerinfo" + // SocketDirectory is where to bind and connect to UNIX sockets. SocketDirectory = "/tmp/postgres" // ReplicationUser is the PostgreSQL role that will be created by Patroni // for streaming replication and for `pg_rewind`. ReplicationUser = "_crunchyrepl" + + // configMountPath is where to mount additional config files + configMountPath = "/etc/postgres" ) // ConfigDirectory returns the absolute path to $PGDATA for cluster. @@ -69,12 +92,17 @@ func DataDirectory(cluster *v1beta1.PostgresCluster) string { func WALDirectory( cluster *v1beta1.PostgresCluster, instance *v1beta1.PostgresInstanceSetSpec, ) string { - // When no WAL volume is specified, store WAL files on the main data volume. - walStorage := dataMountPath + return fmt.Sprintf("%s/pg%d_wal", WALStorage(instance), cluster.Spec.PostgresVersion) +} + +// WALStorage returns the absolute path to the disk where an instance stores its +// WAL files. Use [WALDirectory] for the exact directory that Postgres uses. +func WALStorage(instance *v1beta1.PostgresInstanceSetSpec) string { if instance.WALVolumeClaimSpec != nil { - walStorage = walMountPath + return walMountPath } - return fmt.Sprintf("%s/pg%d_wal", walStorage, cluster.Spec.PostgresVersion) + // When no WAL volume is specified, store WAL files on the main data volume. + return dataMountPath } // Environment returns the environment variables required to invoke PostgreSQL @@ -96,64 +124,283 @@ func Environment(cluster *v1beta1.PostgresCluster) []corev1.EnvVar { Name: "PGPORT", Value: fmt.Sprint(*cluster.Spec.Port), }, + // Setting the KRB5_CONFIG for kerberos + // - https://web.mit.edu/kerberos/krb5-current/doc/admin/conf_files/krb5_conf.html + { + Name: "KRB5_CONFIG", + Value: configMountPath + "/krb5.conf", + }, + // In testing it was determined that we need to set this env var for the replay cache + // otherwise it defaults to the read-only location `/var/tmp/` + // - https://web.mit.edu/kerberos/krb5-current/doc/basic/rcache_def.html#replay-cache-types + { + Name: "KRB5RCACHEDIR", + Value: "/tmp", + }, + // This allows a custom CA certificate to be mounted for Postgres LDAP + // authentication via spec.config.files. + // - https://wiki.postgresql.org/wiki/LDAP_Authentication_against_AD + // + // When setting the TLS_CACERT for LDAP as an environment variable, 'LDAP' + // must be appended as a prefix. + // - https://www.openldap.org/software/man.cgi?query=ldap.conf + // + // Testing with LDAPTLS_CACERTDIR did not work as expected during testing. + { + Name: "LDAPTLS_CACERT", + Value: configMountPath + "/ldap/ca.crt", + }, } } +// reloadCommand returns an entrypoint that convinces PostgreSQL to reload +// certificate files when they change. The process will appear as name in `ps` +// and `top`. +func reloadCommand(name string) []string { + // Use a Bash loop to periodically check the mtime of the mounted + // certificate volume. When it changes, copy the replication certificate, + // signal PostgreSQL, and print the observed timestamp. + // + // PostgreSQL v10 reads its server certificate files during reload (SIGHUP). + // - https://www.postgresql.org/docs/current/ssl-tcp.html#SSL-SERVER-FILES + // - https://www.postgresql.org/docs/current/app-postgres.html + // + // PostgreSQL reads its replication credentials every time it opens a + // replication connection. It does not need to be signaled when the + // certificate contents change. + // + // The copy is necessary because Kubernetes sets g+r when fsGroup is enabled, + // but PostgreSQL requires client keys to not be readable by other users. + // - https://www.postgresql.org/docs/current/libpq-ssl.html + // - https://issue.k8s.io/57923 + // + // Coreutils `sleep` uses a lot of memory, so the following opens a file + // descriptor and uses the timeout of the builtin `read` to wait. That same + // descriptor gets closed and reopened to use the builtin `[ -nt` to check + // mtimes. + // - https://unix.stackexchange.com/a/407383 + script := fmt.Sprintf(` +# Parameters for curl when managing autogrow annotation. +APISERVER="https://kubernetes.default.svc" +SERVICEACCOUNT="/var/run/secrets/kubernetes.io/serviceaccount" +NAMESPACE=$(cat ${SERVICEACCOUNT}/namespace) +TOKEN=$(cat ${SERVICEACCOUNT}/token) +CACERT=${SERVICEACCOUNT}/ca.crt + +declare -r directory=%q +exec {fd}<> <(:||:) +while read -r -t 5 -u "${fd}" ||:; do + # Manage replication certificate. + if [[ "${directory}" -nt "/proc/self/fd/${fd}" ]] && + install -D --mode=0600 -t %q "${directory}"/{%s,%s,%s} && + pkill -HUP --exact --parent=1 postgres + then + exec {fd}>&- && exec {fd}<> <(:||:) + stat --format='Loaded certificates dated %%y' "${directory}" + fi + + # Manage autogrow annotation. + # Return size in Mebibytes. + size=$(df --human-readable --block-size=M /pgdata | awk 'FNR == 2 {print $2}') + use=$(df --human-readable /pgdata | awk 'FNR == 2 {print $5}') + sizeInt="${size//M/}" + # Use the sed punctuation class, because the shell will not accept the percent sign in an expansion. + useInt=$(echo $use | sed 's/[[:punct:]]//g') + triggerExpansion="$((useInt > 75))" + if [ $triggerExpansion -eq 1 ]; then + newSize="$(((sizeInt / 2)+sizeInt))" + newSizeMi="${newSize}Mi" + d='[{"op": "add", "path": "/metadata/annotations/suggested-pgdata-pvc-size", "value": "'"$newSizeMi"'"}]' + curl --cacert ${CACERT} --header "Authorization: Bearer ${TOKEN}" -XPATCH "${APISERVER}/api/v1/namespaces/${NAMESPACE}/pods/${HOSTNAME}?fieldManager=kubectl-annotate" -H "Content-Type: application/json-patch+json" --data "$d" + fi +done +`, + naming.CertMountPath, + naming.ReplicationTmp, + naming.ReplicationCertPath, + naming.ReplicationPrivateKeyPath, + naming.ReplicationCACertPath, + ) + + // Elide the above script from `ps` and `top` by wrapping it in a function + // and calling that. + wrapper := `monitor() {` + script + `}; export -f monitor; exec -a "$0" bash -ceu monitor` + + return []string{"bash", "-ceu", "--", wrapper, name} +} + // startupCommand returns an entrypoint that prepares the filesystem for // PostgreSQL. func startupCommand( + ctx context.Context, cluster *v1beta1.PostgresCluster, instance *v1beta1.PostgresInstanceSetSpec, ) []string { version := fmt.Sprint(cluster.Spec.PostgresVersion) walDir := WALDirectory(cluster, instance) - args := []string{version, walDir} + // If the user requests tablespaces, we want to make sure the directories exist with the + // correct owner and permissions. + tablespaceCmd := "" + if feature.Enabled(ctx, feature.TablespaceVolumes) { + // This command checks if a dir exists and if not, creates it; + // if the dir does exist, then we `recreate` it to make sure the owner is correct; + // if the dir exists with the wrong owner and is not writeable, we error. + // This is the same behavior we use for the main PGDATA directory. + // Note: Postgres requires the tablespace directory to be "an existing, empty directory + // that is owned by the PostgreSQL operating system user." + // - https://www.postgresql.org/docs/current/manage-ag-tablespaces.html + // However, unlike the PGDATA directory, Postgres will _not_ error out + // if the permissions are wrong on the tablespace directory. + // Instead, when a tablespace is created in Postgres, Postgres will `chmod` the + // tablespace directory to match permissions on the PGDATA directory (either 700 or 750). + // Postgres setting the tablespace directory permissions: + // - https://git.postgresql.org/gitweb/?p=postgresql.git;f=src/backend/commands/tablespace.c;hb=REL_14_0#l600 + // Postgres choosing directory permissions: + // - https://git.postgresql.org/gitweb/?p=postgresql.git;f=src/common/file_perm.c;hb=REL_14_0#l27 + // Note: This permission change seems to happen only when the tablespace is created in Postgres. + // If the user manually `chmod`'ed the directory after the creation of the tablespace, Postgres + // would not attempt to change the directory permissions. + // Note: as noted below, we mount the tablespace directory to the mountpoint `/tablespaces/NAME`, + // and so we add the subdirectory `data` in order to set the permissions. + checkInstallRecreateCmd := strings.Join([]string{ + `if [[ ! -e "${tablespace_dir}" || -O "${tablespace_dir}" ]]; then`, + `install --directory --mode=0700 "${tablespace_dir}"`, + `elif [[ -w "${tablespace_dir}" && -g "${tablespace_dir}" ]]; then`, + `recreate "${tablespace_dir}" '0700'`, + `else (halt Permissions!); fi ||`, + `halt "$(permissions "${tablespace_dir}" ||:)"`, + }, "\n") + + for _, tablespace := range instance.TablespaceVolumes { + // The path for tablespaces volumes is /tablespaces/NAME/data + // -- the `data` path is added so that we can arrange the permissions. + tablespaceCmd = tablespaceCmd + "\ntablespace_dir=/tablespaces/" + tablespace.Name + "/data" + "\n" + + checkInstallRecreateCmd + } + } + + pg_rewind_override := "" + if config.FetchKeyCommand(&cluster.Spec) != "" { + // Quoting "EOF" disables parameter substitution during write. + // - https://tldp.org/LDP/abs/html/here-docs.html#EX71C + pg_rewind_override = `cat << "EOF" > /tmp/pg_rewind_tde.sh +#!/bin/sh +pg_rewind -K "$(postgres -C encryption_key_command)" "$@" +EOF +chmod +x /tmp/pg_rewind_tde.sh +` + } + + args := []string{version, walDir, naming.PGBackRestPGDataLogPath} script := strings.Join([]string{ - `declare -r expected_major_version="$1" pgwal_directory="$2"`, + `declare -r expected_major_version="$1" pgwal_directory="$2" pgbrLog_directory="$3"`, + + // Function to print the permissions of a file or directory and its parents. + bashPermissions, + + // Function to print a message to stderr then exit non-zero. + bashHalt, // Function to log values in a basic structured format. `results() { printf '::postgres-operator: %s::%s\n' "$@"; }`, - // Function to change a directory symlink while keeping the directory content. + // Function to change the owner of an existing directory. + strings.TrimSpace(bashRecreateDirectory), + + // Function to change a directory symlink while keeping the directory contents. strings.TrimSpace(bashSafeLink), // Log the effective user ID and all the group IDs. `echo Initializing ...`, - `results 'uid' "$(id -u)" 'gid' "$(id -G)"`, + `results 'uid' "$(id -u ||:)" 'gid' "$(id -G ||:)"`, + + // The pgbackrest spool path should be co-located with wal. If a wal volume exists, symlink the spool-path to it. + `if [[ "${pgwal_directory}" == *"pgwal/"* ]] && [[ ! -d "/pgwal/pgbackrest-spool" ]];then rm -rf "/pgdata/pgbackrest-spool" && mkdir -p "/pgwal/pgbackrest-spool" && ln --force --symbolic "/pgwal/pgbackrest-spool" "/pgdata/pgbackrest-spool";fi`, + // When a pgwal volume is removed, the symlink will be broken; force pgbackrest to recreate spool-path. + `if [[ ! -e "/pgdata/pgbackrest-spool" ]];then rm -rf /pgdata/pgbackrest-spool;fi`, // Abort when the PostgreSQL version installed in the image does not // match the cluster spec. - `results 'postgres path' "$(command -v postgres)"`, - `results 'postgres version' "${postgres_version:=$(postgres --version)}"`, - `[[ "${postgres_version}" == *") ${expected_major_version}."* ]]`, + `results 'postgres path' "$(command -v postgres ||:)"`, + `results 'postgres version' "${postgres_version:=$(postgres --version ||:)}"`, + `[[ "${postgres_version}" =~ ") ${expected_major_version}"($|[^0-9]) ]] ||`, + `halt Expected PostgreSQL version "${expected_major_version}"`, // Abort when the configured data directory is not $PGDATA. // - https://www.postgresql.org/docs/current/runtime-config-file-locations.html `results 'config directory' "${PGDATA:?}"`, - `postgres_data_directory=$([ -d "${PGDATA}" ] && postgres -C data_directory || echo "${PGDATA}")`, + `postgres_data_directory=$([[ -d "${PGDATA}" ]] && postgres -C data_directory || echo "${PGDATA}")`, `results 'data directory' "${postgres_data_directory}"`, - `[ "${postgres_data_directory}" = "${PGDATA}" ]`, + `[[ "${postgres_data_directory}" == "${PGDATA}" ]] ||`, + `halt Expected matching config and data directories`, // Determine if the data directory has been prepared for bootstrapping the cluster `bootstrap_dir="${postgres_data_directory}_bootstrap"`, - `[ -d "${bootstrap_dir}" ] && results 'bootstrap directory' "${bootstrap_dir}"`, - `[ -d "${bootstrap_dir}" ] && postgres_data_directory="${bootstrap_dir}"`, + `[[ -d "${bootstrap_dir}" ]] && results 'bootstrap directory' "${bootstrap_dir}"`, + `[[ -d "${bootstrap_dir}" ]] && postgres_data_directory="${bootstrap_dir}"`, // PostgreSQL requires its directory to be writable by only itself. // Pod "securityContext.fsGroup" sets g+w on directories for *some* - // storage providers. + // storage providers. Ensure the current user owns the directory, and + // remove group permissions. // - https://www.postgresql.org/docs/current/creating-cluster.html - // - https://git.postgresql.org/gitweb/?p=postgresql.git;f=src/backend/utils/init/miscinit.c;hb=REL_13_0#l319 + // - https://git.postgresql.org/gitweb/?p=postgresql.git;f=src/backend/postmaster/postmaster.c;hb=REL_10_0#l1522 + // - https://git.postgresql.org/gitweb/?p=postgresql.git;f=src/backend/utils/init/miscinit.c;hb=REL_14_0#l349 // - https://issue.k8s.io/93802#issuecomment-717646167 + // + // When the directory does not exist, create it with the correct permissions. + // When the directory has the correct owner, set the correct permissions. + `if [[ ! -e "${postgres_data_directory}" || -O "${postgres_data_directory}" ]]; then`, `install --directory --mode=0700 "${postgres_data_directory}"`, + // + // The directory exists but its owner is wrong. When it is writable, + // the set-group-ID bit indicates that "fsGroup" probably ran on its + // contents making them safe to use. In this case, we can make a new + // directory (owned by this user) and refill it. + `elif [[ -w "${postgres_data_directory}" && -g "${postgres_data_directory}" ]]; then`, + `recreate "${postgres_data_directory}" '0700'`, + // + // The directory exists, its owner is wrong, and it is not writable. + `else (halt Permissions!); fi ||`, + `halt "$(permissions "${postgres_data_directory}" ||:)"`, + // Create the pgBackRest log directory. + `results 'pgBackRest log directory' "${pgbrLog_directory}"`, + `install --directory --mode=0775 "${pgbrLog_directory}" ||`, + `halt "$(permissions "${pgbrLog_directory}" ||:)"`, + + // Copy replication client certificate files + // from the /pgconf/tls/replication directory to the /tmp/replication directory in order + // to set proper file permissions. This is required because the group permission settings + // applied via the defaultMode option are not honored as expected, resulting in incorrect + // group read permissions. + // See https://github.com/kubernetes/kubernetes/issues/57923 + // TODO(tjmoore4): remove this implementation when/if defaultMode permissions are set as + // expected for the mounted volume. + fmt.Sprintf(`install -D --mode=0600 -t %q %q/{%s,%s,%s}`, + naming.ReplicationTmp, naming.CertMountPath+naming.ReplicationDirectory, + naming.ReplicationCert, naming.ReplicationPrivateKey, + naming.ReplicationCACert), + + // Add the pg_rewind wrapper script, if TDE is enabled. + pg_rewind_override, + + tablespaceCmd, // When the data directory is empty, there's nothing more to do. - `[ -f "${postgres_data_directory}/PG_VERSION" ] || exit 0`, + `[[ -f "${postgres_data_directory}/PG_VERSION" ]] || exit 0`, // Abort when the data directory is not empty and its version does not // match the cluster spec. `results 'data version' "${postgres_data_version:=$(< "${postgres_data_directory}/PG_VERSION")}"`, - `[ "${postgres_data_version}" = "${expected_major_version}" ]`, + `[[ "${postgres_data_version}" == "${expected_major_version}" ]] ||`, + `halt Expected PostgreSQL data version "${expected_major_version}"`, + + // For a restore from datasource: + // Patroni will complain if there's no `postgresql.conf` file + // and PGDATA may be missing that file if this is a restored database + // where the conf file was kept elsewhere. + `[[ ! -f "${postgres_data_directory}/postgresql.conf" ]] &&`, + `touch "${postgres_data_directory}/postgresql.conf"`, // Safely move the WAL directory onto the intended volume. PostgreSQL // always writes WAL files in the "pg_wal" directory inside the data @@ -164,7 +411,16 @@ func startupCommand( // - https://git.postgresql.org/gitweb/?p=postgresql.git;f=src/bin/initdb/initdb.c;hb=REL_13_0#l2718 // - https://git.postgresql.org/gitweb/?p=postgresql.git;f=src/bin/pg_basebackup/pg_basebackup.c;hb=REL_13_0#l2621 `safelink "${pgwal_directory}" "${postgres_data_directory}/pg_wal"`, - `results 'wal directory' "$(realpath "${postgres_data_directory}/pg_wal")"`, + `results 'wal directory' "$(realpath "${postgres_data_directory}/pg_wal" ||:)"`, + + // Early versions of PGO create replicas with a recovery signal file. + // Patroni also creates a standby signal file before starting Postgres, + // causing Postgres to remove only one, the standby. Remove the extra + // signal file now, if it exists, and let Patroni manage the standby + // signal file instead. + // - https://git.postgresql.org/gitweb/?p=postgresql.git;f=src/backend/access/transam/xlog.c;hb=REL_12_0#l5318 + // TODO(cbandy): Remove this after 5.0 is EOL. + `rm -f "${postgres_data_directory}/recovery.signal"`, }, "\n") return append([]string{"bash", "-ceu", "--", script, "startup"}, args...) diff --git a/internal/postgres/config_test.go b/internal/postgres/config_test.go index c1b29ced21..cd4c92d185 100644 --- a/internal/postgres/config_test.go +++ b/internal/postgres/config_test.go @@ -1,22 +1,14 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgres import ( - "io/ioutil" + "bytes" + "context" + "errors" + "fmt" "os" "os/exec" "path/filepath" @@ -27,6 +19,8 @@ import ( corev1 "k8s.io/api/core/v1" "sigs.k8s.io/yaml" + "github.com/crunchydata/postgres-operator/internal/testing/cmp" + "github.com/crunchydata/postgres-operator/internal/testing/require" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -57,10 +51,159 @@ func TestWALDirectory(t *testing.T) { assert.Equal(t, WALDirectory(cluster, instance), "/pgwal/pg13_wal") } +func TestBashHalt(t *testing.T) { + t.Run("NoPipeline", func(t *testing.T) { + cmd := exec.Command("bash") + cmd.Args = append(cmd.Args, "-c", "--", bashHalt+`; halt ab cd e`) + + var exit *exec.ExitError + stdout, err := cmd.Output() + assert.Assert(t, errors.As(err, &exit)) + assert.Equal(t, string(stdout), "", "expected no stdout") + assert.Equal(t, string(exit.Stderr), "ab cd e\n") + assert.Equal(t, exit.ExitCode(), 1) + }) + + t.Run("PipelineZeroStatus", func(t *testing.T) { + cmd := exec.Command("bash") + cmd.Args = append(cmd.Args, "-c", "--", bashHalt+`; true && halt message`) + + var exit *exec.ExitError + stdout, err := cmd.Output() + assert.Assert(t, errors.As(err, &exit)) + assert.Equal(t, string(stdout), "", "expected no stdout") + assert.Equal(t, string(exit.Stderr), "message\n") + assert.Equal(t, exit.ExitCode(), 1) + }) + + t.Run("PipelineNonZeroStatus", func(t *testing.T) { + cmd := exec.Command("bash") + cmd.Args = append(cmd.Args, "-c", "--", bashHalt+`; (exit 99) || halt $'multi\nline'`) + + var exit *exec.ExitError + stdout, err := cmd.Output() + assert.Assert(t, errors.As(err, &exit)) + assert.Equal(t, string(stdout), "", "expected no stdout") + assert.Equal(t, string(exit.Stderr), "multi\nline\n") + assert.Equal(t, exit.ExitCode(), 99) + }) + + t.Run("Subshell", func(t *testing.T) { + cmd := exec.Command("bash") + cmd.Args = append(cmd.Args, "-c", "--", bashHalt+`; (halt 'err') || echo 'after'`) + + stderr := new(bytes.Buffer) + cmd.Stderr = stderr + + stdout, err := cmd.Output() + assert.NilError(t, err) + assert.Equal(t, string(stdout), "after\n") + assert.Equal(t, stderr.String(), "err\n") + assert.Equal(t, cmd.ProcessState.ExitCode(), 0) + }) +} + +func TestBashPermissions(t *testing.T) { + // macOS `stat` takes different arguments than BusyBox and GNU coreutils. + if output, err := exec.Command("stat", "--help").CombinedOutput(); err != nil { + t.Skip(`requires "stat" executable`) + } else if !strings.Contains(string(output), "%A") { + t.Skip(`requires "stat" with access format sequence`) + } + + dir := t.TempDir() + assert.NilError(t, os.Mkdir(filepath.Join(dir, "sub"), 0o751)) + assert.NilError(t, os.Chmod(filepath.Join(dir, "sub"), 0o751)) + assert.NilError(t, os.WriteFile(filepath.Join(dir, "sub", "fn"), nil, 0o624)) // #nosec G306 OK permissions for a temp dir in a test + assert.NilError(t, os.Chmod(filepath.Join(dir, "sub", "fn"), 0o624)) + + cmd := exec.Command("bash") + cmd.Args = append(cmd.Args, "-c", "--", + bashPermissions+`; permissions "$@"`, "-", + filepath.Join(dir, "sub", "fn")) + + stdout, err := cmd.Output() + assert.NilError(t, err) + assert.Assert(t, cmp.Regexp(``+ + `drwxr-x--x\s+\d+\s+\d+\s+[^ ]+/sub\n`+ + `-rw--w-r--\s+\d+\s+\d+\s+[^ ]+/sub/fn\n`+ + `$`, string(stdout))) +} + +func TestBashRecreateDirectory(t *testing.T) { + // macOS `stat` takes different arguments than BusyBox and GNU coreutils. + if output, err := exec.Command("stat", "--help").CombinedOutput(); err != nil { + t.Skip(`requires "stat" executable`) + } else if !strings.Contains(string(output), "%a") { + t.Skip(`requires "stat" with access format sequence`) + } + + dir := t.TempDir() + assert.NilError(t, os.Mkdir(filepath.Join(dir, "d"), 0o755)) + assert.NilError(t, os.WriteFile(filepath.Join(dir, "d", ".hidden"), nil, 0o644)) // #nosec G306 OK permissions for a temp dir in a test + assert.NilError(t, os.WriteFile(filepath.Join(dir, "d", "file"), nil, 0o644)) // #nosec G306 OK permissions for a temp dir in a test + + stat := func(args ...string) string { + cmd := exec.Command("stat", "-c", "%i %#a %N") + cmd.Args = append(cmd.Args, args...) + out, err := cmd.CombinedOutput() + + t.Helper() + assert.NilError(t, err, string(out)) + return string(out) + } + + var before, after struct{ d, f, dInode, dPerms string } + + before.d = stat(filepath.Join(dir, "d")) + before.f = stat( + filepath.Join(dir, "d", ".hidden"), + filepath.Join(dir, "d", "file"), + ) + + cmd := exec.Command("bash") + cmd.Args = append(cmd.Args, "-ceu", "--", + bashRecreateDirectory+` recreate "$@"`, "-", + filepath.Join(dir, "d"), "0740") + // The assertion below expects alphabetically sorted filenames. + // Set an empty environment to always use the default/standard locale. + cmd.Env = []string{} + output, err := cmd.CombinedOutput() + assert.NilError(t, err, string(output)) + assert.Assert(t, cmp.Regexp(`^`+ + `[+] chmod 0740 [^ ]+/tmp.[^ /]+\n`+ + `[+] mv [^ ]+/d/.hidden [^ ]+/d/file [^ ]+/tmp.[^ /]+\n`+ + `[+] rmdir [^ ]+/d\n`+ + `[+] mv [^ ]+/tmp.[^ /]+ [^ ]+/d\n`+ + `$`, string(output))) + + after.d = stat(filepath.Join(dir, "d")) + after.f = stat( + filepath.Join(dir, "d", ".hidden"), + filepath.Join(dir, "d", "file"), + ) + + _, err = fmt.Sscan(before.d, &before.dInode, &before.dPerms) + assert.NilError(t, err) + _, err = fmt.Sscan(after.d, &after.dInode, &after.dPerms) + assert.NilError(t, err) + + // New directory is new. + assert.Assert(t, after.dInode != before.dInode) + + // New directory has the requested permissions. + assert.Equal(t, after.dPerms, "0740") + + // Files are in the new directory and unchanged. + assert.DeepEqual(t, after.f, before.f) +} + func TestBashSafeLink(t *testing.T) { - // macOS lacks `realpath` which is part of GNU coreutils. - if _, err := exec.LookPath("realpath"); err != nil { - t.Skip(`requires "realpath" executable`) + // macOS `mv` takes different arguments than GNU coreutils. + if output, err := exec.Command("mv", "--help").CombinedOutput(); err != nil { + t.Skip(`requires "mv" executable`) + } else if !strings.Contains(string(output), "no-target-directory") { + t.Skip(`requires "mv" that overwrites a directory symlink`) } // execute calls the bash function with args. @@ -88,7 +231,7 @@ func TestBashSafeLink(t *testing.T) { // assertSetupContents ensures that directory contents match setupDirectory. assertSetupContents := func(t testing.TB, directory string) { t.Helper() - entries, err := ioutil.ReadDir(directory) + entries, err := os.ReadDir(directory) assert.NilError(t, err) assert.Equal(t, len(entries), 1) assert.Equal(t, entries[0].Name(), "original.file") @@ -169,14 +312,14 @@ func TestBashSafeLink(t *testing.T) { t.Helper() root = t.TempDir() current = filepath.Join(root, "original") - assert.NilError(t, ioutil.WriteFile(current, []byte(`treasure`), 0o600)) + assert.NilError(t, os.WriteFile(current, []byte(`treasure`), 0o600)) return } // assertSetupContents ensures that file contents match setupFile. assertSetupContents := func(t testing.TB, file string) { t.Helper() - content, err := ioutil.ReadFile(file) + content, err := os.ReadFile(file) assert.NilError(t, err) assert.Equal(t, string(content), `treasure`) } @@ -252,7 +395,7 @@ func TestBashSafeLink(t *testing.T) { assert.NilError(t, err, "expected symlink") assert.Equal(t, result, current) - entries, err := ioutil.ReadDir(current) + entries, err := os.ReadDir(current) assert.NilError(t, err) assert.Equal(t, len(entries), 1) assert.Equal(t, entries[0].Name(), "original.file") @@ -281,7 +424,7 @@ func TestBashSafeLink(t *testing.T) { assert.NilError(t, err, "expected symlink") assert.Equal(t, result, desired) - entries, err := ioutil.ReadDir(desired) + entries, err := os.ReadDir(desired) assert.NilError(t, err) assert.Equal(t, len(entries), 1) assert.Equal(t, entries[0].Name(), "original.file") @@ -310,40 +453,56 @@ func TestBashSafeLink(t *testing.T) { }) } -func TestBashSafeLinkPrettyYAML(t *testing.T) { - b, err := yaml.Marshal(bashSafeLink) - assert.NilError(t, err) - assert.Assert(t, strings.HasPrefix(string(b), `|`), - "expected literal block scalar, got:\n%s", b) -} - func TestStartupCommand(t *testing.T) { - shellcheck, err := exec.LookPath("shellcheck") - if err != nil { - t.Skip(`requires "shellcheck" executable`) - } else { - output, err := exec.Command(shellcheck, "--version").CombinedOutput() - assert.NilError(t, err) - t.Logf("using %q:\n%s", shellcheck, output) - } + shellcheck := require.ShellCheck(t) + t.Parallel() cluster := new(v1beta1.PostgresCluster) cluster.Spec.PostgresVersion = 13 instance := new(v1beta1.PostgresInstanceSetSpec) - command := startupCommand(cluster, instance) + ctx := context.Background() + command := startupCommand(ctx, cluster, instance) // Expect a bash command with an inline script. assert.DeepEqual(t, command[:3], []string{"bash", "-ceu", "--"}) assert.Assert(t, len(command) > 3) + script := command[3] // Write out that inline script. dir := t.TempDir() file := filepath.Join(dir, "script.bash") - assert.NilError(t, ioutil.WriteFile(file, []byte(command[3]), 0o600)) + assert.NilError(t, os.WriteFile(file, []byte(script), 0o600)) // Expect shellcheck to be happy. cmd := exec.Command(shellcheck, "--enable=all", file) output, err := cmd.CombinedOutput() assert.NilError(t, err, "%q\n%s", cmd.Args, output) + + t.Run("PrettyYAML", func(t *testing.T) { + b, err := yaml.Marshal(script) + assert.NilError(t, err) + assert.Assert(t, strings.HasPrefix(string(b), `|`), + "expected literal block scalar, got:\n%s", b) + }) + + t.Run("EnableTDE", func(t *testing.T) { + + cluster.Spec.Patroni = &v1beta1.PatroniSpec{ + DynamicConfiguration: map[string]any{ + "postgresql": map[string]any{ + "parameters": map[string]any{ + "encryption_key_command": "echo test", + }, + }, + }, + } + command := startupCommand(ctx, cluster, instance) + assert.Assert(t, len(command) > 3) + assert.Assert(t, strings.Contains(command[3], `cat << "EOF" > /tmp/pg_rewind_tde.sh +#!/bin/sh +pg_rewind -K "$(postgres -C encryption_key_command)" "$@" +EOF +chmod +x /tmp/pg_rewind_tde.sh`)) + }) } diff --git a/internal/postgres/databases.go b/internal/postgres/databases.go index 4c59c4bbef..0d70170527 100644 --- a/internal/postgres/databases.go +++ b/internal/postgres/databases.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgres @@ -51,7 +40,7 @@ CREATE TEMPORARY TABLE input (id serial, data json); for i := range databases { if err == nil { - err = encoder.Encode(map[string]interface{}{ + err = encoder.Encode(map[string]any{ "database": databases[i], }) } diff --git a/internal/postgres/databases_test.go b/internal/postgres/databases_test.go index adcebc2b71..e025e86788 100644 --- a/internal/postgres/databases_test.go +++ b/internal/postgres/databases_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgres @@ -19,26 +8,17 @@ import ( "context" "errors" "io" - "io/ioutil" "strings" "testing" "gotest.tools/v3/assert" - "gotest.tools/v3/assert/cmp" + + "github.com/crunchydata/postgres-operator/internal/testing/cmp" ) func TestCreateDatabasesInPostgreSQL(t *testing.T) { ctx := context.Background() - contains := func(actual, expected string) cmp.Comparison { - return func() cmp.Result { - if !strings.Contains(actual, expected) { - return cmp.DeepEqual(actual, expected)() - } - return cmp.ResultSuccess - } - } - t.Run("Arguments", func(t *testing.T) { expected := errors.New("pass-through") exec := func( @@ -59,7 +39,7 @@ func TestCreateDatabasesInPostgreSQL(t *testing.T) { ) error { calls++ - b, err := ioutil.ReadAll(stdin) + b, err := io.ReadAll(stdin) assert.NilError(t, err) assert.Equal(t, string(b), strings.TrimLeft(` SET search_path TO ''; @@ -93,9 +73,9 @@ SELECT pg_catalog.format('CREATE DATABASE %I', ) error { calls++ - b, err := ioutil.ReadAll(stdin) + b, err := io.ReadAll(stdin) assert.NilError(t, err) - assert.Assert(t, contains(string(b), ` + assert.Assert(t, cmp.Contains(string(b), ` \copy input (data) from stdin with (format text) {"database":"white space"} {"database":"eXaCtLy"} diff --git a/internal/postgres/doc.go b/internal/postgres/doc.go index a541dd4dc4..bd616b5916 100644 --- a/internal/postgres/doc.go +++ b/internal/postgres/doc.go @@ -1,19 +1,8 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + // Package postgres is a collection of resources that interact with PostgreSQL // or provide functionality that makes it easier for other resources to interact // with PostgreSQL. package postgres - -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ diff --git a/internal/postgres/exec.go b/internal/postgres/exec.go index 613ed4e9d7..a846a8aa57 100644 --- a/internal/postgres/exec.go +++ b/internal/postgres/exec.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgres @@ -52,6 +41,28 @@ func (exec Executor) Exec( return stdout.String(), stderr.String(), err } +// ExecInAllDatabases uses "bash" and "psql" to execute sql in every database +// that allows connections, including templates. The sql command(s) may contain +// psql variables that are assigned from the variables map. +// - https://www.postgresql.org/docs/current/app-psql.html#APP-PSQL-VARIABLES +func (exec Executor) ExecInAllDatabases( + ctx context.Context, sql string, variables map[string]string, +) (string, string, error) { + const databases = "" + + // Prevent unexpected dereferences by emptying "search_path". + // The "pg_catalog" schema is still searched. + // - https://www.postgresql.org/docs/current/runtime-config-client.html#GUC-SEARCH-PATH + `SET search_path = '';` + + + // Return the names of databases that allow connections, including + // "template1". Exclude "template0" to ensure it is never manipulated. + // - https://www.postgresql.org/docs/current/managing-databases.html + `SELECT datname FROM pg_catalog.pg_database` + + ` WHERE datallowconn AND datname NOT IN ('template0')` + + return exec.ExecInDatabasesFromQuery(ctx, databases, sql, variables) +} + // ExecInDatabasesFromQuery uses "bash" and "psql" to execute sql in every // database returned by the databases query. The sql statement(s) may contain // psql variables that are assigned from the variables map. diff --git a/internal/postgres/exec_test.go b/internal/postgres/exec_test.go index 4eb4e63273..df9b862577 100644 --- a/internal/postgres/exec_test.go +++ b/internal/postgres/exec_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgres @@ -19,13 +8,15 @@ import ( "context" "errors" "io" - "io/ioutil" + "os" "os/exec" "path/filepath" "strings" "testing" "gotest.tools/v3/assert" + + "github.com/crunchydata/postgres-operator/internal/testing/require" ) // This example demonstrates how Executor can work with exec.Cmd. @@ -45,7 +36,7 @@ func TestExecutorExec(t *testing.T) { fn := func( _ context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, ) error { - b, err := ioutil.ReadAll(stdin) + b, err := io.ReadAll(stdin) assert.NilError(t, err) assert.Equal(t, string(b), `statements; to run;`) @@ -75,12 +66,58 @@ func TestExecutorExec(t *testing.T) { assert.Equal(t, stderr, "and stderr") } +func TestExecutorExecInAllDatabases(t *testing.T) { + expected := errors.New("exact") + fn := func( + _ context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, + ) error { + b, err := io.ReadAll(stdin) + assert.NilError(t, err) + assert.Equal(t, string(b), `the; stuff;`) + + assert.DeepEqual(t, command, []string{ + "bash", "-ceu", "--", ` +sql_target=$(< /dev/stdin) +sql_databases="$1" +shift 1 + +databases=$(psql "$@" -Xw -Aqt --file=- <<< "${sql_databases}") +while IFS= read -r database; do + PGDATABASE="${database}" psql "$@" -Xw --file=- <<< "${sql_target}" +done <<< "${databases}" +`, + "-", + `SET search_path = '';SELECT datname FROM pg_catalog.pg_database WHERE datallowconn AND datname NOT IN ('template0')`, + "--set=CASE=sEnSiTiVe", + "--set=different=vars", + "--set=lots=of", + }) + + _, _ = io.WriteString(stdout, "some stdout") + _, _ = io.WriteString(stderr, "and stderr") + return expected + } + + stdout, stderr, err := Executor(fn).ExecInAllDatabases( + context.Background(), + `the; stuff;`, + map[string]string{ + "lots": "of", + "different": "vars", + "CASE": "sEnSiTiVe", + }) + + assert.Equal(t, expected, err, "expected function to be called") + assert.Equal(t, stdout, "some stdout") + assert.Equal(t, stderr, "and stderr") +} + func TestExecutorExecInDatabasesFromQuery(t *testing.T) { expected := errors.New("splat") fn := func( _ context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, ) error { - b, err := ioutil.ReadAll(stdin) + b, err := io.ReadAll(stdin) assert.NilError(t, err) assert.Equal(t, string(b), `statements; to run;`) @@ -131,14 +168,7 @@ done <<< "${databases}" assert.Equal(t, stderr, "and stderr") t.Run("ShellCheck", func(t *testing.T) { - shellcheck, err := exec.LookPath("shellcheck") - if err != nil { - t.Skip(`requires "shellcheck" executable`) - } else { - output, err := exec.Command(shellcheck, "--version").CombinedOutput() - assert.NilError(t, err) - t.Logf("using %q:\n%s", shellcheck, output) - } + shellcheck := require.ShellCheck(t) _, _, _ = Executor(func( _ context.Context, _ io.Reader, _, _ io.Writer, command ...string, @@ -151,7 +181,7 @@ done <<< "${databases}" // Write out that inline script. dir := t.TempDir() file := filepath.Join(dir, "script.bash") - assert.NilError(t, ioutil.WriteFile(file, []byte(script), 0o600)) + assert.NilError(t, os.WriteFile(file, []byte(script), 0o600)) // Expect shellcheck to be happy. cmd := exec.Command(shellcheck, "--enable=all", file) diff --git a/internal/postgres/hba.go b/internal/postgres/hba.go index a653b213d7..d9b5ce2680 100644 --- a/internal/postgres/hba.go +++ b/internal/postgres/hba.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgres @@ -129,7 +118,7 @@ func (hba *HostBasedAuthentication) Replication() *HostBasedAuthentication { // Role makes hba match connections by users that are members of a specific role. func (hba *HostBasedAuthentication) Role(name string) *HostBasedAuthentication { - hba.user = hba.quote("+" + name) + hba.user = "+" + hba.quote(name) return hba } diff --git a/internal/postgres/hba_test.go b/internal/postgres/hba_test.go index 269e72d855..9744479fdd 100644 --- a/internal/postgres/hba_test.go +++ b/internal/postgres/hba_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgres @@ -20,7 +9,8 @@ import ( "testing" "gotest.tools/v3/assert" - "gotest.tools/v3/assert/cmp" + + "github.com/crunchydata/postgres-operator/internal/testing/cmp" ) func TestNewHBAs(t *testing.T) { @@ -62,7 +52,7 @@ func TestHostBasedAuthentication(t *testing.T) { User("KD6-3.7").Method("scram-sha-256"). String()) - assert.Equal(t, `hostssl "data" "+admin" all md5 clientcert="verify-ca"`, + assert.Equal(t, `hostssl "data" +"admin" all md5 clientcert="verify-ca"`, NewHBA().TLS().Database("data").Role("admin"). Method("md5").Options(map[string]string{"clientcert": "verify-ca"}). String()) diff --git a/internal/postgres/huge_pages.go b/internal/postgres/huge_pages.go new file mode 100644 index 0000000000..ee13c0d11b --- /dev/null +++ b/internal/postgres/huge_pages.go @@ -0,0 +1,43 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package postgres + +import ( + "strings" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +// This function looks for a valid huge_pages resource request. If it finds one, +// it sets the PostgreSQL parameter "huge_pages" to "try". If it doesn't find +// one, it sets "huge_pages" to "off". +func SetHugePages(cluster *v1beta1.PostgresCluster, pgParameters *Parameters) { + if HugePagesRequested(cluster) { + pgParameters.Default.Add("huge_pages", "try") + } else { + pgParameters.Default.Add("huge_pages", "off") + } +} + +// This helper function checks to see if a huge_pages value greater than zero has +// been set in any of the PostgresCluster's instances' resource specs +func HugePagesRequested(cluster *v1beta1.PostgresCluster) bool { + for _, instance := range cluster.Spec.InstanceSets { + for resourceName := range instance.Resources.Limits { + if strings.HasPrefix(resourceName.String(), corev1.ResourceHugePagesPrefix) { + resourceQuantity := instance.Resources.Limits.Name(resourceName, resource.BinarySI) + + if resourceQuantity != nil && resourceQuantity.Value() > 0 { + return true + } + } + } + } + + return false +} diff --git a/internal/postgres/huge_pages_test.go b/internal/postgres/huge_pages_test.go new file mode 100644 index 0000000000..58a6a6aa57 --- /dev/null +++ b/internal/postgres/huge_pages_test.go @@ -0,0 +1,98 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package postgres + +import ( + "testing" + + "gotest.tools/v3/assert" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + + "github.com/crunchydata/postgres-operator/internal/initialize" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +func TestSetHugePages(t *testing.T) { + t.Run("hugepages not set at all", func(t *testing.T) { + cluster := new(v1beta1.PostgresCluster) + + cluster.Spec.InstanceSets = []v1beta1.PostgresInstanceSetSpec{{ + Name: "test-instance1", + Replicas: initialize.Int32(1), + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{}, + }, + }} + + pgParameters := NewParameters() + SetHugePages(cluster, &pgParameters) + + assert.Equal(t, pgParameters.Default.Has("huge_pages"), true) + assert.Equal(t, pgParameters.Default.Value("huge_pages"), "off") + }) + + t.Run("hugepages quantity not set", func(t *testing.T) { + cluster := new(v1beta1.PostgresCluster) + + emptyQuantity, _ := resource.ParseQuantity("") + cluster.Spec.InstanceSets = []v1beta1.PostgresInstanceSetSpec{{ + Name: "test-instance1", + Replicas: initialize.Int32(1), + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceHugePagesPrefix + "2Mi": emptyQuantity, + }, + }, + }} + + pgParameters := NewParameters() + SetHugePages(cluster, &pgParameters) + + assert.Equal(t, pgParameters.Default.Has("huge_pages"), true) + assert.Equal(t, pgParameters.Default.Value("huge_pages"), "off") + }) + + t.Run("hugepages set to zero", func(t *testing.T) { + cluster := new(v1beta1.PostgresCluster) + + cluster.Spec.InstanceSets = []v1beta1.PostgresInstanceSetSpec{{ + Name: "test-instance1", + Replicas: initialize.Int32(1), + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceHugePagesPrefix + "2Mi": resource.MustParse("0Mi"), + }, + }, + }} + + pgParameters := NewParameters() + SetHugePages(cluster, &pgParameters) + + assert.Equal(t, pgParameters.Default.Has("huge_pages"), true) + assert.Equal(t, pgParameters.Default.Value("huge_pages"), "off") + }) + + t.Run("hugepages set correctly", func(t *testing.T) { + cluster := new(v1beta1.PostgresCluster) + + cluster.Spec.InstanceSets = []v1beta1.PostgresInstanceSetSpec{{ + Name: "test-instance1", + Replicas: initialize.Int32(1), + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceHugePagesPrefix + "2Mi": resource.MustParse("16Mi"), + }, + }, + }} + + pgParameters := NewParameters() + SetHugePages(cluster, &pgParameters) + + assert.Equal(t, pgParameters.Default.Has("huge_pages"), true) + assert.Equal(t, pgParameters.Default.Value("huge_pages"), "try") + }) + +} diff --git a/internal/postgres/iana.go b/internal/postgres/iana.go new file mode 100644 index 0000000000..4392b549f1 --- /dev/null +++ b/internal/postgres/iana.go @@ -0,0 +1,16 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package postgres + +// The protocol used by PostgreSQL is registered with the Internet Assigned +// Numbers Authority (IANA). +// - https://www.iana.org/assignments/service-names-port-numbers +const ( + // IANAPortNumber is the port assigned to PostgreSQL at the IANA. + IANAPortNumber = 5432 + + // IANAServiceName is the name of the PostgreSQL protocol at the IANA. + IANAServiceName = "postgresql" +) diff --git a/internal/postgres/parameters.go b/internal/postgres/parameters.go index d6fc8b9f58..434d9fd1dd 100644 --- a/internal/postgres/parameters.go +++ b/internal/postgres/parameters.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgres @@ -96,6 +85,22 @@ func (ps *ParameterSet) Add(name, value string) { ps.values[ps.normalize(name)] = value } +// AppendToList adds each value to the right-hand side of parameter name +// as a comma-separated list without quoting. +func (ps *ParameterSet) AppendToList(name string, value ...string) { + result := ps.Value(name) + + if len(value) > 0 { + if len(result) > 0 { + result += "," + strings.Join(value, ",") + } else { + result = strings.Join(value, ",") + } + } + + ps.Add(name, result) +} + // Get returns the value of parameter name and whether or not it was present in ps. func (ps ParameterSet) Get(name string) (string, bool) { value, ok := ps.values[ps.normalize(name)] diff --git a/internal/postgres/parameters_test.go b/internal/postgres/parameters_test.go index 6e2c890b10..c6228d7958 100644 --- a/internal/postgres/parameters_test.go +++ b/internal/postgres/parameters_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgres @@ -68,3 +57,26 @@ func TestParameterSet(t *testing.T) { ps2.Add("x", "n") assert.Assert(t, ps2.Value("x") != ps.Value("x")) } + +func TestParameterSetAppendToList(t *testing.T) { + ps := NewParameterSet() + + ps.AppendToList("empty") + assert.Assert(t, ps.Has("empty")) + assert.Equal(t, ps.Value("empty"), "") + + ps.AppendToList("empty") + assert.Equal(t, ps.Value("empty"), "", "expected no change") + + ps.AppendToList("full", "a") + assert.Equal(t, ps.Value("full"), "a") + + ps.AppendToList("full", "b") + assert.Equal(t, ps.Value("full"), "a,b") + + ps.AppendToList("full") + assert.Equal(t, ps.Value("full"), "a,b", "expected no change") + + ps.AppendToList("full", "a", "cd", `"e"`) + assert.Equal(t, ps.Value("full"), `a,b,a,cd,"e"`) +} diff --git a/internal/postgres/password/doc.go b/internal/postgres/password/doc.go index 2b8965ab27..eef7ed7db2 100644 --- a/internal/postgres/password/doc.go +++ b/internal/postgres/password/doc.go @@ -1,19 +1,7 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + // package password lets one create the appropriate password hashes and // verifiers that are used for adding the information into PostgreSQL - package password - -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ diff --git a/internal/postgres/password/md5.go b/internal/postgres/password/md5.go index fe17062f71..884dfb655e 100644 --- a/internal/postgres/password/md5.go +++ b/internal/postgres/password/md5.go @@ -1,21 +1,11 @@ -package password - -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +package password import ( + // #nosec G501 "crypto/md5" "errors" diff --git a/internal/postgres/password/md5_test.go b/internal/postgres/password/md5_test.go index 4b220a934b..80cb7742d6 100644 --- a/internal/postgres/password/md5_test.go +++ b/internal/postgres/password/md5_test.go @@ -1,19 +1,8 @@ -package password - -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +package password import ( "fmt" diff --git a/internal/postgres/password/password.go b/internal/postgres/password/password.go index 104b9afdd1..337282cc74 100644 --- a/internal/postgres/password/password.go +++ b/internal/postgres/password/password.go @@ -1,19 +1,8 @@ -package password - -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +package password import ( "errors" diff --git a/internal/postgres/password/password_test.go b/internal/postgres/password/password_test.go index 1eeb7198ae..3401dec4ac 100644 --- a/internal/postgres/password/password_test.go +++ b/internal/postgres/password/password_test.go @@ -1,19 +1,8 @@ -package password - -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +package password import ( "errors" diff --git a/internal/postgres/password/scram.go b/internal/postgres/password/scram.go index 2f2980e6a9..8264cd87a0 100644 --- a/internal/postgres/password/scram.go +++ b/internal/postgres/password/scram.go @@ -1,19 +1,8 @@ -package password - -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +package password import ( "crypto/hmac" @@ -26,7 +15,7 @@ import ( "unicode" "unicode/utf8" - "github.com/xdg/stringprep" + "github.com/xdg-go/stringprep" "golang.org/x/crypto/pbkdf2" ) @@ -37,7 +26,7 @@ import ( // // where: // DIGEST = SCRAM-SHA-256 (only value for now in PostgreSQL) -// ITERATIONS = the number of iteratiosn to use for PBKDF2 +// ITERATIONS = the number of iterations to use for PBKDF2 // SALT = the salt used as part of the PBKDF2, stored in base64 // STORED_KEY = the hash of the client key, stored in base64 // SERVER_KEY = the hash of the server key @@ -162,10 +151,10 @@ func (s *SCRAMPassword) isASCII() bool { // using SCRAM. It differs from RFC 4013 in that it returns the original, // unmodified password when: // -// - the input is not valid UTF-8 -// - the output would be empty -// - the output would contain prohibited characters -// - the output would contain ambiguous bidirectional characters +// - the input is not valid UTF-8 +// - the output would be empty +// - the output would contain prohibited characters +// - the output would contain ambiguous bidirectional characters // // See: // @@ -179,7 +168,7 @@ func (s *SCRAMPassword) saslPrep() string { // perform SASLprep on the password. if the SASLprep fails or returns an // empty string, return the original password - // Otherwise return the clean pasword + // Otherwise return the clean password cleanedPassword, err := stringprep.SASLprep.Prepare(s.password) if cleanedPassword == "" || err != nil { return s.password diff --git a/internal/postgres/password/scram_test.go b/internal/postgres/password/scram_test.go index e46da3c1ec..0552e519b7 100644 --- a/internal/postgres/password/scram_test.go +++ b/internal/postgres/password/scram_test.go @@ -1,19 +1,8 @@ -package password - -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +package password import ( "bytes" diff --git a/internal/postgres/reconcile.go b/internal/postgres/reconcile.go index 23dc273b0b..344f91dd9f 100644 --- a/internal/postgres/reconcile.go +++ b/internal/postgres/reconcile.go @@ -1,157 +1,91 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgres import ( "context" - "fmt" - "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" - v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" "github.com/crunchydata/postgres-operator/internal/config" + "github.com/crunchydata/postgres-operator/internal/feature" "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) -// InitCopyReplicationTLS copies the mounted client certificate, key and CA certificate files -// from the /pgconf/tls/replication directory to the /tmp/replication directory in order -// to set proper file permissions. This is required because the group permission settings -// applied via the defaultMode option are not honored as expected, resulting in incorrect -// group read permissions. -// See https://github.com/kubernetes/kubernetes/issues/57923 -// TODO(tjmoore4): remove this implementation when/if defaultMode permissions are set as -// expected for the mounted volume. -func InitCopyReplicationTLS(postgresCluster *v1beta1.PostgresCluster, - template *v1.PodTemplateSpec) { - - cmd := fmt.Sprintf(`mkdir -p %s && install -m 0600 %s/{%s,%s,%s} %s`, - naming.ReplicationTmp, naming.CertMountPath+naming.ReplicationDirectory, - naming.ReplicationCert, naming.ReplicationPrivateKey, - naming.ReplicationCACert, naming.ReplicationTmp) - template.Spec.InitContainers = append(template.Spec.InitContainers, - v1.Container{ - Command: []string{"bash", "-c", cmd}, - Image: config.PostgresContainerImage(postgresCluster), - Name: naming.ContainerClientCertInit, - SecurityContext: initialize.RestrictedSecurityContext(), - }) -} - -// AddCertVolumeToPod adds the secret containing the TLS certificate, key and the CA certificate -// as a volume to the provided Pod template spec, while also adding associated volume mounts to -// the database container specified. -func AddCertVolumeToPod(postgresCluster *v1beta1.PostgresCluster, template *v1.PodTemplateSpec, - initContainerName, dbContainerName, sidecarContainerName string, inClusterCertificates, - inClientCertificates *v1.SecretProjection) error { - - certVolume := v1.Volume{Name: naming.CertVolume} - certVolume.Projected = &v1.ProjectedVolumeSource{ - DefaultMode: initialize.Int32(0o600), - } - - // Add the certificate volume projection - certVolume.Projected.Sources = append(append( - certVolume.Projected.Sources, []v1.VolumeProjection(nil)...), - []v1.VolumeProjection{ - {Secret: inClusterCertificates}, - {Secret: inClientCertificates}}...) - - template.Spec.Volumes = append(template.Spec.Volumes, certVolume) - - var dbContainerFound bool - var sidecarContainerFound bool - var index int - for index = range template.Spec.Containers { - if template.Spec.Containers[index].Name == dbContainerName { - dbContainerFound = true - - template.Spec.Containers[index].VolumeMounts = - append(template.Spec.Containers[index].VolumeMounts, v1.VolumeMount{ - Name: naming.CertVolume, - MountPath: naming.CertMountPath, - ReadOnly: true, - }) - } - if template.Spec.Containers[index].Name == sidecarContainerName { - sidecarContainerFound = true - - template.Spec.Containers[index].VolumeMounts = - append(template.Spec.Containers[index].VolumeMounts, v1.VolumeMount{ - Name: naming.CertVolume, - MountPath: naming.CertMountPath, - ReadOnly: true, - }) - } - if dbContainerFound && sidecarContainerFound { - break - } - } - if !dbContainerFound { - return errors.Errorf("Unable to find container %q when adding certificate volumes", - dbContainerName) - } - if !sidecarContainerFound { - return errors.Errorf("Unable to find container %q when adding certificate volumes", - sidecarContainerName) - } - - var initContainerFound bool - var initIndex int - for initIndex = range template.Spec.InitContainers { - if template.Spec.InitContainers[initIndex].Name == initContainerName { - initContainerFound = true - break - } - } - if !initContainerFound { - return fmt.Errorf("Unable to find init container %q when adding certificate volumes", - initContainerName) - } - - template.Spec.InitContainers[initIndex].VolumeMounts = - append(template.Spec.InitContainers[initIndex].VolumeMounts, v1.VolumeMount{ - Name: naming.CertVolume, - MountPath: naming.CertMountPath, - ReadOnly: true, - }) - - return nil -} +var ( + oneMillicore = resource.MustParse("1m") + oneMebibyte = resource.MustParse("1Mi") +) // DataVolumeMount returns the name and mount path of the PostgreSQL data volume. func DataVolumeMount() corev1.VolumeMount { return corev1.VolumeMount{Name: "postgres-data", MountPath: dataMountPath} } +// TablespaceVolumeMount returns the name and mount path of the PostgreSQL tablespace data volume. +func TablespaceVolumeMount(tablespaceName string) corev1.VolumeMount { + return corev1.VolumeMount{Name: "tablespace-" + tablespaceName, MountPath: tablespaceMountPath + "/" + tablespaceName} +} + // WALVolumeMount returns the name and mount path of the PostgreSQL WAL volume. func WALVolumeMount() corev1.VolumeMount { return corev1.VolumeMount{Name: "postgres-wal", MountPath: walMountPath} } +// DownwardAPIVolumeMount returns the name and mount path of the DownwardAPI volume. +func DownwardAPIVolumeMount() corev1.VolumeMount { + return corev1.VolumeMount{ + Name: "database-containerinfo", + MountPath: downwardAPIPath, + ReadOnly: true, + } +} + +// AdditionalConfigVolumeMount returns the name and mount path of the additional config files. +func AdditionalConfigVolumeMount() corev1.VolumeMount { + return corev1.VolumeMount{ + Name: "postgres-config", + MountPath: configMountPath, + ReadOnly: true, + } +} + // InstancePod initializes outInstancePod with the database container and the // volumes needed by PostgreSQL. func InstancePod(ctx context.Context, inCluster *v1beta1.PostgresCluster, inInstanceSpec *v1beta1.PostgresInstanceSetSpec, + inClusterCertificates, inClientCertificates *corev1.SecretProjection, inDataVolume, inWALVolume *corev1.PersistentVolumeClaim, + inTablespaceVolumes []*corev1.PersistentVolumeClaim, outInstancePod *corev1.PodSpec, ) { + certVolumeMount := corev1.VolumeMount{ + Name: naming.CertVolume, + MountPath: naming.CertMountPath, + ReadOnly: true, + } + certVolume := corev1.Volume{ + Name: certVolumeMount.Name, + VolumeSource: corev1.VolumeSource{ + Projected: &corev1.ProjectedVolumeSource{ + // PostgreSQL expects client certificate keys to not be readable + // by any other user. + // - https://www.postgresql.org/docs/current/libpq-ssl.html + DefaultMode: initialize.Int32(0o600), + Sources: []corev1.VolumeProjection{ + {Secret: inClusterCertificates}, + {Secret: inClientCertificates}, + }, + }, + }, + } + dataVolumeMount := DataVolumeMount() dataVolume := corev1.Volume{ Name: dataVolumeMount.Name, @@ -163,14 +97,69 @@ func InstancePod(ctx context.Context, }, } + downwardAPIVolumeMount := DownwardAPIVolumeMount() + downwardAPIVolume := corev1.Volume{ + Name: downwardAPIVolumeMount.Name, + VolumeSource: corev1.VolumeSource{ + DownwardAPI: &corev1.DownwardAPIVolumeSource{ + // The paths defined in Items (cpu_limit, cpu_request, etc.) + // are hard coded in the pgnodemx queries defined by + // pgMonitor configuration (queries_nodemx.yml) + // https://github.com/CrunchyData/pgmonitor/blob/master/postgres_exporter/common/queries_nodemx.yml + Items: []corev1.DownwardAPIVolumeFile{{ + Path: "cpu_limit", + ResourceFieldRef: &corev1.ResourceFieldSelector{ + ContainerName: naming.ContainerDatabase, + Resource: "limits.cpu", + Divisor: oneMillicore, + }, + }, { + Path: "cpu_request", + ResourceFieldRef: &corev1.ResourceFieldSelector{ + ContainerName: naming.ContainerDatabase, + Resource: "requests.cpu", + Divisor: oneMillicore, + }, + }, { + Path: "mem_limit", + ResourceFieldRef: &corev1.ResourceFieldSelector{ + ContainerName: naming.ContainerDatabase, + Resource: "limits.memory", + Divisor: oneMebibyte, + }, + }, { + Path: "mem_request", + ResourceFieldRef: &corev1.ResourceFieldSelector{ + ContainerName: naming.ContainerDatabase, + Resource: "requests.memory", + Divisor: oneMebibyte, + }, + }, { + Path: "labels", + FieldRef: &corev1.ObjectFieldSelector{ + APIVersion: corev1.SchemeGroupVersion.Version, + FieldPath: "metadata.labels", + }, + }, { + Path: "annotations", + FieldRef: &corev1.ObjectFieldSelector{ + APIVersion: corev1.SchemeGroupVersion.Version, + FieldPath: "metadata.annotations", + }, + }}, + }, + }, + } + container := corev1.Container{ Name: naming.ContainerDatabase, // Patroni will set the command and probes. - Env: Environment(inCluster), - Image: config.PostgresContainerImage(inCluster), - Resources: inInstanceSpec.Resources, + Env: Environment(inCluster), + Image: config.PostgresContainerImage(inCluster), + ImagePullPolicy: inCluster.Spec.ImagePullPolicy, + Resources: inInstanceSpec.Resources, Ports: []corev1.ContainerPort{{ Name: naming.PortPostgreSQL, @@ -178,23 +167,80 @@ func InstancePod(ctx context.Context, Protocol: corev1.ProtocolTCP, }}, - VolumeMounts: []corev1.VolumeMount{dataVolumeMount}, SecurityContext: initialize.RestrictedSecurityContext(), + VolumeMounts: []corev1.VolumeMount{ + certVolumeMount, + dataVolumeMount, + downwardAPIVolumeMount, + }, + } + + reloader := corev1.Container{ + Name: naming.ContainerClientCertCopy, + + Command: reloadCommand(naming.ContainerClientCertCopy), + + Image: container.Image, + ImagePullPolicy: container.ImagePullPolicy, + SecurityContext: initialize.RestrictedSecurityContext(), + + VolumeMounts: []corev1.VolumeMount{certVolumeMount, dataVolumeMount}, + } + + if inInstanceSpec.Sidecars != nil && + inInstanceSpec.Sidecars.ReplicaCertCopy != nil && + inInstanceSpec.Sidecars.ReplicaCertCopy.Resources != nil { + reloader.Resources = *inInstanceSpec.Sidecars.ReplicaCertCopy.Resources } startup := corev1.Container{ Name: naming.ContainerPostgresStartup, - Command: startupCommand(inCluster, inInstanceSpec), - Env: Environment(inCluster), - Image: config.PostgresContainerImage(inCluster), - Resources: inInstanceSpec.Resources, + Command: startupCommand(ctx, inCluster, inInstanceSpec), + Env: Environment(inCluster), + Image: container.Image, + ImagePullPolicy: container.ImagePullPolicy, + Resources: container.Resources, SecurityContext: initialize.RestrictedSecurityContext(), - VolumeMounts: []corev1.VolumeMount{dataVolumeMount}, + + VolumeMounts: []corev1.VolumeMount{certVolumeMount, dataVolumeMount}, } - outInstancePod.Volumes = []corev1.Volume{dataVolume} + outInstancePod.Volumes = []corev1.Volume{ + certVolume, + dataVolume, + downwardAPIVolume, + } + + // If `TablespaceVolumes` FeatureGate is enabled, `inTablespaceVolumes` may not be nil. + // In that case, add any tablespace volumes to the pod, and + // add volumeMounts to the database and startup containers + for _, vol := range inTablespaceVolumes { + tablespaceVolumeMount := TablespaceVolumeMount(vol.Labels[naming.LabelData]) + tablespaceVolume := corev1.Volume{ + Name: tablespaceVolumeMount.Name, + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: vol.Name, + ReadOnly: false, + }, + }, + } + outInstancePod.Volumes = append(outInstancePod.Volumes, tablespaceVolume) + container.VolumeMounts = append(container.VolumeMounts, tablespaceVolumeMount) + startup.VolumeMounts = append(startup.VolumeMounts, tablespaceVolumeMount) + } + + if len(inCluster.Spec.Config.Files) != 0 { + additionalConfigVolumeMount := AdditionalConfigVolumeMount() + additionalConfigVolume := corev1.Volume{Name: additionalConfigVolumeMount.Name} + additionalConfigVolume.Projected = &corev1.ProjectedVolumeSource{ + Sources: append([]corev1.VolumeProjection{}, inCluster.Spec.Config.Files...), + } + container.VolumeMounts = append(container.VolumeMounts, additionalConfigVolumeMount) + outInstancePod.Volumes = append(outInstancePod.Volumes, additionalConfigVolume) + } // Mount the WAL PVC whenever it exists. The startup command will move WAL // files to or from this volume according to inInstanceSpec. @@ -215,22 +261,29 @@ func InstancePod(ctx context.Context, outInstancePod.Volumes = append(outInstancePod.Volumes, walVolume) } - outInstancePod.Containers = []corev1.Container{container} + outInstancePod.Containers = []corev1.Container{container, reloader} + + // If the InstanceSidecars feature gate is enabled and instance sidecars are + // defined, add the defined container to the Pod. + if feature.Enabled(ctx, feature.InstanceSidecars) && + inInstanceSpec.Containers != nil { + outInstancePod.Containers = append(outInstancePod.Containers, inInstanceSpec.Containers...) + } + outInstancePod.InitContainers = []corev1.Container{startup} } // PodSecurityContext returns a v1.PodSecurityContext for cluster that can write // to PersistentVolumes. func PodSecurityContext(cluster *v1beta1.PostgresCluster) *corev1.PodSecurityContext { - podSecurityContext := initialize.RestrictedPodSecurityContext() + podSecurityContext := initialize.PodSecurityContext() // Use the specified supplementary groups except for root. The CRD has // similar validation, but we should never emit a PodSpec with that group. // - https://docs.k8s.io/concepts/security/pod-security-standards/ for i := range cluster.Spec.SupplementalGroups { if gid := cluster.Spec.SupplementalGroups[i]; gid > 0 { - podSecurityContext.SupplementalGroups = - append(podSecurityContext.SupplementalGroups, gid) + podSecurityContext.SupplementalGroups = append(podSecurityContext.SupplementalGroups, gid) } } diff --git a/internal/postgres/reconcile_test.go b/internal/postgres/reconcile_test.go index c667df8c7c..138b5c7b3e 100644 --- a/internal/postgres/reconcile_test.go +++ b/internal/postgres/reconcile_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgres @@ -21,191 +10,115 @@ import ( "gotest.tools/v3/assert" corev1 "k8s.io/api/core/v1" - v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/crunchydata/postgres-operator/internal/feature" "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/internal/testing/cmp" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) -func TestCopyClientTLS(t *testing.T) { +func TestDataVolumeMount(t *testing.T) { + mount := DataVolumeMount() - postgresCluster := &v1beta1.PostgresCluster{ObjectMeta: metav1.ObjectMeta{Name: "hippo"}} - template := &v1.PodTemplateSpec{} + assert.DeepEqual(t, mount, corev1.VolumeMount{ + Name: "postgres-data", + MountPath: "/pgdata", + ReadOnly: false, + }) +} - InitCopyReplicationTLS(postgresCluster, template) +func TestWALVolumeMount(t *testing.T) { + mount := WALVolumeMount() - var foundPGDATAInitContainer bool - for _, c := range template.Spec.InitContainers { - if c.Name == naming.ContainerClientCertInit { - foundPGDATAInitContainer = true - break - } - } + assert.DeepEqual(t, mount, corev1.VolumeMount{ + Name: "postgres-wal", + MountPath: "/pgwal", + ReadOnly: false, + }) +} + +func TestDownwardAPIVolumeMount(t *testing.T) { + mount := DownwardAPIVolumeMount() - assert.Assert(t, foundPGDATAInitContainer) + assert.DeepEqual(t, mount, corev1.VolumeMount{ + Name: "database-containerinfo", + MountPath: "/etc/database-containerinfo", + ReadOnly: true, + }) } -func TestAddCertVolumeToPod(t *testing.T) { - - postgresCluster := &v1beta1.PostgresCluster{ObjectMeta: metav1.ObjectMeta{Name: "hippo"}} - template := &v1.PodTemplateSpec{ - Spec: v1.PodSpec{ - Containers: []v1.Container{{ - Name: "database", - }, { - Name: "replication-cert-copy", - }}, - InitContainers: []v1.Container{{ - Name: "database-client-cert-init", - }, +func TestTablespaceVolumeMount(t *testing.T) { + mount := TablespaceVolumeMount("trial") + + assert.DeepEqual(t, mount, corev1.VolumeMount{ + Name: "tablespace-trial", + MountPath: "/tablespaces/trial", + ReadOnly: false, + }) +} + +func TestInstancePod(t *testing.T) { + t.Parallel() + + ctx := context.Background() + cluster := new(v1beta1.PostgresCluster) + cluster.Default() + cluster.Spec.ImagePullPolicy = corev1.PullAlways + cluster.Spec.PostgresVersion = 11 + + dataVolume := new(corev1.PersistentVolumeClaim) + dataVolume.Name = "datavol" + + instance := new(v1beta1.PostgresInstanceSetSpec) + instance.Resources.Requests = corev1.ResourceList{"cpu": resource.MustParse("9m")} + instance.Sidecars = &v1beta1.InstanceSidecars{ + ReplicaCertCopy: &v1beta1.Sidecar{ + Resources: &corev1.ResourceRequirements{ + Requests: corev1.ResourceList{"cpu": resource.MustParse("21m")}, }, }, } - mode := int32(0o600) - // example auto-generated secret projection - testServerSecretProjection := &v1.SecretProjection{ - LocalObjectReference: v1.LocalObjectReference{ - Name: naming.PostgresTLSSecret(postgresCluster).Name, - }, - Items: []v1.KeyToPath{ + + serverSecretProjection := &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{Name: "srv-secret"}, + Items: []corev1.KeyToPath{ { Key: naming.ReplicationCert, Path: naming.ReplicationCert, - Mode: &mode, }, { Key: naming.ReplicationPrivateKey, Path: naming.ReplicationPrivateKey, - Mode: &mode, }, { Key: naming.ReplicationCACert, Path: naming.ReplicationCACert, - Mode: &mode, }, }, } - testClientSecretProjection := &v1.SecretProjection{ - LocalObjectReference: v1.LocalObjectReference{ - Name: naming.ReplicationClientCertSecret(postgresCluster).Name, - }, - Items: []v1.KeyToPath{ + clientSecretProjection := &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{Name: "repl-secret"}, + Items: []corev1.KeyToPath{ { Key: naming.ReplicationCert, Path: naming.ReplicationCertPath, - Mode: &mode, }, { Key: naming.ReplicationPrivateKey, Path: naming.ReplicationPrivateKeyPath, - Mode: &mode, }, }, } - err := AddCertVolumeToPod(postgresCluster, template, - naming.ContainerClientCertInit, naming.ContainerDatabase, - naming.ContainerClientCertCopy, testServerSecretProjection, - testClientSecretProjection) - assert.NilError(t, err) - - var foundCertVol bool - var certVol *v1.Volume - for i, v := range template.Spec.Volumes { - if v.Name == naming.CertVolume { - foundCertVol = true - certVol = &template.Spec.Volumes[i] - break - } - } - - assert.Assert(t, foundCertVol) - assert.Assert(t, len(certVol.Projected.Sources) > 1) - - var serverSecret *v1.SecretProjection - var clientSecret *v1.SecretProjection - - for _, source := range certVol.Projected.Sources { - - if source.Secret.Name == naming.PostgresTLSSecret(postgresCluster).Name { - serverSecret = source.Secret - } - if source.Secret.Name == naming.ReplicationClientCertSecret(postgresCluster).Name { - clientSecret = source.Secret - } - } - - if assert.Check(t, serverSecret != nil) { - assert.Assert(t, len(serverSecret.Items) == 3) - - assert.Equal(t, serverSecret.Items[0].Key, naming.ReplicationCert) - assert.Equal(t, serverSecret.Items[0].Path, naming.ReplicationCert) - assert.Equal(t, serverSecret.Items[0].Mode, &mode) - - assert.Equal(t, serverSecret.Items[1].Key, naming.ReplicationPrivateKey) - assert.Equal(t, serverSecret.Items[1].Path, naming.ReplicationPrivateKey) - assert.Equal(t, serverSecret.Items[1].Mode, &mode) - - assert.Equal(t, serverSecret.Items[2].Key, naming.ReplicationCACert) - assert.Equal(t, serverSecret.Items[2].Path, naming.ReplicationCACert) - assert.Equal(t, serverSecret.Items[2].Mode, &mode) - } - - if assert.Check(t, clientSecret != nil) { - assert.Assert(t, len(clientSecret.Items) == 2) - - assert.Equal(t, clientSecret.Items[0].Key, naming.ReplicationCert) - assert.Equal(t, clientSecret.Items[0].Path, naming.ReplicationCertPath) - assert.Equal(t, clientSecret.Items[0].Mode, &mode) - - assert.Equal(t, clientSecret.Items[1].Key, naming.ReplicationPrivateKey) - assert.Equal(t, clientSecret.Items[1].Path, naming.ReplicationPrivateKeyPath) - assert.Equal(t, clientSecret.Items[1].Mode, &mode) - } -} - -func TestDataVolumeMount(t *testing.T) { - mount := DataVolumeMount() - - assert.DeepEqual(t, mount, corev1.VolumeMount{ - Name: "postgres-data", - MountPath: "/pgdata", - ReadOnly: false, - }) -} - -func TestWALVolumeMount(t *testing.T) { - mount := WALVolumeMount() - - assert.DeepEqual(t, mount, corev1.VolumeMount{ - Name: "postgres-wal", - MountPath: "/pgwal", - ReadOnly: false, - }) -} - -func TestInstancePod(t *testing.T) { - ctx := context.Background() - - cluster := new(v1beta1.PostgresCluster) - cluster.Default() - cluster.Spec.PostgresVersion = 11 - - dataVolume := new(corev1.PersistentVolumeClaim) - dataVolume.Name = "datavol" - - instance := new(v1beta1.PostgresInstanceSetSpec) - instance.Resources.Requests = corev1.ResourceList{"cpu": resource.MustParse("9m")} - // without WAL volume nor WAL volume spec pod := new(corev1.PodSpec) - InstancePod(ctx, cluster, instance, dataVolume, nil, pod) + InstancePod(ctx, cluster, instance, + serverSecretProjection, clientSecretProjection, dataVolume, nil, nil, pod) - assert.Assert(t, marshalMatches(pod, ` + assert.Assert(t, cmp.MarshalMatches(pod, ` containers: - env: - name: PGDATA @@ -214,6 +127,13 @@ containers: value: /tmp/postgres - name: PGPORT value: "5432" + - name: KRB5_CONFIG + value: /etc/postgres/krb5.conf + - name: KRB5RCACHEDIR + value: /tmp + - name: LDAPTLS_CACERT + value: /etc/postgres/ldap/ca.crt + imagePullPolicy: Always name: database ports: - containerPort: 5432 @@ -224,10 +144,84 @@ containers: cpu: 9m securityContext: allowPrivilegeEscalation: false + capabilities: + drop: + - ALL privileged: false readOnlyRootFilesystem: true runAsNonRoot: true + seccompProfile: + type: RuntimeDefault volumeMounts: + - mountPath: /pgconf/tls + name: cert-volume + readOnly: true + - mountPath: /pgdata + name: postgres-data + - mountPath: /etc/database-containerinfo + name: database-containerinfo + readOnly: true +- command: + - bash + - -ceu + - -- + - |- + monitor() { + # Parameters for curl when managing autogrow annotation. + APISERVER="https://kubernetes.default.svc" + SERVICEACCOUNT="/var/run/secrets/kubernetes.io/serviceaccount" + NAMESPACE=$(cat ${SERVICEACCOUNT}/namespace) + TOKEN=$(cat ${SERVICEACCOUNT}/token) + CACERT=${SERVICEACCOUNT}/ca.crt + + declare -r directory="/pgconf/tls" + exec {fd}<> <(:||:) + while read -r -t 5 -u "${fd}" ||:; do + # Manage replication certificate. + if [[ "${directory}" -nt "/proc/self/fd/${fd}" ]] && + install -D --mode=0600 -t "/tmp/replication" "${directory}"/{replication/tls.crt,replication/tls.key,replication/ca.crt} && + pkill -HUP --exact --parent=1 postgres + then + exec {fd}>&- && exec {fd}<> <(:||:) + stat --format='Loaded certificates dated %y' "${directory}" + fi + + # Manage autogrow annotation. + # Return size in Mebibytes. + size=$(df --human-readable --block-size=M /pgdata | awk 'FNR == 2 {print $2}') + use=$(df --human-readable /pgdata | awk 'FNR == 2 {print $5}') + sizeInt="${size//M/}" + # Use the sed punctuation class, because the shell will not accept the percent sign in an expansion. + useInt=$(echo $use | sed 's/[[:punct:]]//g') + triggerExpansion="$((useInt > 75))" + if [ $triggerExpansion -eq 1 ]; then + newSize="$(((sizeInt / 2)+sizeInt))" + newSizeMi="${newSize}Mi" + d='[{"op": "add", "path": "/metadata/annotations/suggested-pgdata-pvc-size", "value": "'"$newSizeMi"'"}]' + curl --cacert ${CACERT} --header "Authorization: Bearer ${TOKEN}" -XPATCH "${APISERVER}/api/v1/namespaces/${NAMESPACE}/pods/${HOSTNAME}?fieldManager=kubectl-annotate" -H "Content-Type: application/json-patch+json" --data "$d" + fi + done + }; export -f monitor; exec -a "$0" bash -ceu monitor + - replication-cert-copy + imagePullPolicy: Always + name: replication-cert-copy + resources: + requests: + cpu: 21m + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /pgconf/tls + name: cert-volume + readOnly: true - mountPath: /pgdata name: postgres-data initContainers: @@ -236,36 +230,62 @@ initContainers: - -ceu - -- - |- - declare -r expected_major_version="$1" pgwal_directory="$2" + declare -r expected_major_version="$1" pgwal_directory="$2" pgbrLog_directory="$3" + permissions() { while [[ -n "$1" ]]; do set "${1%/*}" "$@"; done; shift; stat -Lc '%A %4u %4g %n' "$@"; } + halt() { local rc=$?; >&2 echo "$@"; exit "${rc/#0/1}"; } results() { printf '::postgres-operator: %s::%s\n' "$@"; } + recreate() ( + local tmp; tmp=$(mktemp -d -p "${1%/*}"); GLOBIGNORE='.:..'; set -x + chmod "$2" "${tmp}"; mv "$1"/* "${tmp}"; rmdir "$1"; mv "${tmp}" "$1" + ) safelink() ( local desired="$1" name="$2" current current=$(realpath "${name}") - if [ "${current}" = "${desired}" ]; then return; fi + if [[ "${current}" == "${desired}" ]]; then return; fi set -x; mv --no-target-directory "${current}" "${desired}" ln --no-dereference --force --symbolic "${desired}" "${name}" ) echo Initializing ... - results 'uid' "$(id -u)" 'gid' "$(id -G)" - results 'postgres path' "$(command -v postgres)" - results 'postgres version' "${postgres_version:=$(postgres --version)}" - [[ "${postgres_version}" == *") ${expected_major_version}."* ]] + results 'uid' "$(id -u ||:)" 'gid' "$(id -G ||:)" + if [[ "${pgwal_directory}" == *"pgwal/"* ]] && [[ ! -d "/pgwal/pgbackrest-spool" ]];then rm -rf "/pgdata/pgbackrest-spool" && mkdir -p "/pgwal/pgbackrest-spool" && ln --force --symbolic "/pgwal/pgbackrest-spool" "/pgdata/pgbackrest-spool";fi + if [[ ! -e "/pgdata/pgbackrest-spool" ]];then rm -rf /pgdata/pgbackrest-spool;fi + results 'postgres path' "$(command -v postgres ||:)" + results 'postgres version' "${postgres_version:=$(postgres --version ||:)}" + [[ "${postgres_version}" =~ ") ${expected_major_version}"($|[^0-9]) ]] || + halt Expected PostgreSQL version "${expected_major_version}" results 'config directory' "${PGDATA:?}" - postgres_data_directory=$([ -d "${PGDATA}" ] && postgres -C data_directory || echo "${PGDATA}") + postgres_data_directory=$([[ -d "${PGDATA}" ]] && postgres -C data_directory || echo "${PGDATA}") results 'data directory' "${postgres_data_directory}" - [ "${postgres_data_directory}" = "${PGDATA}" ] + [[ "${postgres_data_directory}" == "${PGDATA}" ]] || + halt Expected matching config and data directories bootstrap_dir="${postgres_data_directory}_bootstrap" - [ -d "${bootstrap_dir}" ] && results 'bootstrap directory' "${bootstrap_dir}" - [ -d "${bootstrap_dir}" ] && postgres_data_directory="${bootstrap_dir}" + [[ -d "${bootstrap_dir}" ]] && results 'bootstrap directory' "${bootstrap_dir}" + [[ -d "${bootstrap_dir}" ]] && postgres_data_directory="${bootstrap_dir}" + if [[ ! -e "${postgres_data_directory}" || -O "${postgres_data_directory}" ]]; then install --directory --mode=0700 "${postgres_data_directory}" - [ -f "${postgres_data_directory}/PG_VERSION" ] || exit 0 + elif [[ -w "${postgres_data_directory}" && -g "${postgres_data_directory}" ]]; then + recreate "${postgres_data_directory}" '0700' + else (halt Permissions!); fi || + halt "$(permissions "${postgres_data_directory}" ||:)" + results 'pgBackRest log directory' "${pgbrLog_directory}" + install --directory --mode=0775 "${pgbrLog_directory}" || + halt "$(permissions "${pgbrLog_directory}" ||:)" + install -D --mode=0600 -t "/tmp/replication" "/pgconf/tls/replication"/{tls.crt,tls.key,ca.crt} + + + [[ -f "${postgres_data_directory}/PG_VERSION" ]] || exit 0 results 'data version' "${postgres_data_version:=$(< "${postgres_data_directory}/PG_VERSION")}" - [ "${postgres_data_version}" = "${expected_major_version}" ] + [[ "${postgres_data_version}" == "${expected_major_version}" ]] || + halt Expected PostgreSQL data version "${expected_major_version}" + [[ ! -f "${postgres_data_directory}/postgresql.conf" ]] && + touch "${postgres_data_directory}/postgresql.conf" safelink "${pgwal_directory}" "${postgres_data_directory}/pg_wal" - results 'wal directory' "$(realpath "${postgres_data_directory}/pg_wal")" + results 'wal directory' "$(realpath "${postgres_data_directory}/pg_wal" ||:)" + rm -f "${postgres_data_directory}/recovery.signal" - startup - "11" - /pgdata/pg11_wal + - /pgdata/pgbackrest/log env: - name: PGDATA value: /pgdata/pg11 @@ -273,22 +293,88 @@ initContainers: value: /tmp/postgres - name: PGPORT value: "5432" + - name: KRB5_CONFIG + value: /etc/postgres/krb5.conf + - name: KRB5RCACHEDIR + value: /tmp + - name: LDAPTLS_CACERT + value: /etc/postgres/ldap/ca.crt + imagePullPolicy: Always name: postgres-startup resources: requests: cpu: 9m securityContext: allowPrivilegeEscalation: false + capabilities: + drop: + - ALL privileged: false readOnlyRootFilesystem: true runAsNonRoot: true + seccompProfile: + type: RuntimeDefault volumeMounts: + - mountPath: /pgconf/tls + name: cert-volume + readOnly: true - mountPath: /pgdata name: postgres-data volumes: +- name: cert-volume + projected: + defaultMode: 384 + sources: + - secret: + items: + - key: tls.crt + path: tls.crt + - key: tls.key + path: tls.key + - key: ca.crt + path: ca.crt + name: srv-secret + - secret: + items: + - key: tls.crt + path: replication/tls.crt + - key: tls.key + path: replication/tls.key + name: repl-secret - name: postgres-data persistentVolumeClaim: claimName: datavol +- downwardAPI: + items: + - path: cpu_limit + resourceFieldRef: + containerName: database + divisor: 1m + resource: limits.cpu + - path: cpu_request + resourceFieldRef: + containerName: database + divisor: 1m + resource: requests.cpu + - path: mem_limit + resourceFieldRef: + containerName: database + divisor: 1Mi + resource: limits.memory + - path: mem_request + resourceFieldRef: + containerName: database + divisor: 1Mi + resource: requests.memory + - fieldRef: + apiVersion: v1 + fieldPath: metadata.labels + path: labels + - fieldRef: + apiVersion: v1 + fieldPath: metadata.annotations + path: annotations + name: database-containerinfo `)) t.Run("WithWALVolumeWithoutWALVolumeSpec", func(t *testing.T) { @@ -296,25 +382,90 @@ volumes: walVolume.Name = "walvol" pod := new(corev1.PodSpec) - InstancePod(ctx, cluster, instance, dataVolume, walVolume, pod) + InstancePod(ctx, cluster, instance, + serverSecretProjection, clientSecretProjection, dataVolume, walVolume, nil, pod) - containers := pod.Containers[:0:0] - containers = append(containers, pod.Containers...) - containers = append(containers, pod.InitContainers...) + assert.Assert(t, len(pod.Containers) > 0) + assert.Assert(t, len(pod.InitContainers) > 0) - for _, container := range containers { - assert.Assert(t, marshalMatches(container.VolumeMounts, ` + // Container has all mountPaths, including downwardAPI + assert.Assert(t, cmp.MarshalMatches(pod.Containers[0].VolumeMounts, ` +- mountPath: /pgconf/tls + name: cert-volume + readOnly: true - mountPath: /pgdata name: postgres-data +- mountPath: /etc/database-containerinfo + name: database-containerinfo + readOnly: true - mountPath: /pgwal - name: postgres-wal - `), "expected WAL mount in %q container", container.Name) - } + name: postgres-wal`), "expected WAL and downwardAPI mounts in %q container", pod.Containers[0].Name) - assert.Assert(t, marshalMatches(pod.Volumes, ` + // InitContainer has all mountPaths, except downwardAPI + assert.Assert(t, cmp.MarshalMatches(pod.InitContainers[0].VolumeMounts, ` +- mountPath: /pgconf/tls + name: cert-volume + readOnly: true +- mountPath: /pgdata + name: postgres-data +- mountPath: /pgwal + name: postgres-wal`), "expected WAL mount, no downwardAPI mount in %q container", pod.InitContainers[0].Name) + + assert.Assert(t, cmp.MarshalMatches(pod.Volumes, ` +- name: cert-volume + projected: + defaultMode: 384 + sources: + - secret: + items: + - key: tls.crt + path: tls.crt + - key: tls.key + path: tls.key + - key: ca.crt + path: ca.crt + name: srv-secret + - secret: + items: + - key: tls.crt + path: replication/tls.crt + - key: tls.key + path: replication/tls.key + name: repl-secret - name: postgres-data persistentVolumeClaim: claimName: datavol +- downwardAPI: + items: + - path: cpu_limit + resourceFieldRef: + containerName: database + divisor: 1m + resource: limits.cpu + - path: cpu_request + resourceFieldRef: + containerName: database + divisor: 1m + resource: requests.cpu + - path: mem_limit + resourceFieldRef: + containerName: database + divisor: 1Mi + resource: limits.memory + - path: mem_request + resourceFieldRef: + containerName: database + divisor: 1Mi + resource: requests.memory + - fieldRef: + apiVersion: v1 + fieldPath: metadata.labels + path: labels + - fieldRef: + apiVersion: v1 + fieldPath: metadata.annotations + path: annotations + name: database-containerinfo - name: postgres-wal persistentVolumeClaim: claimName: walvol @@ -322,7 +473,137 @@ volumes: // Startup moves WAL files to data volume. assert.DeepEqual(t, pod.InitContainers[0].Command[4:], - []string{"startup", "11", "/pgdata/pg11_wal"}) + []string{"startup", "11", "/pgdata/pg11_wal", "/pgdata/pgbackrest/log"}) + }) + + t.Run("WithAdditionalConfigFiles", func(t *testing.T) { + clusterWithConfig := cluster.DeepCopy() + clusterWithConfig.Spec.Config.Files = []corev1.VolumeProjection{ + { + Secret: &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "keytab", + }, + }, + }, + } + + pod := new(corev1.PodSpec) + InstancePod(ctx, clusterWithConfig, instance, + serverSecretProjection, clientSecretProjection, dataVolume, nil, nil, pod) + + assert.Assert(t, len(pod.Containers) > 0) + assert.Assert(t, len(pod.InitContainers) > 0) + + // Container has all mountPaths, including downwardAPI, + // and the postgres-config + assert.Assert(t, cmp.MarshalMatches(pod.Containers[0].VolumeMounts, ` +- mountPath: /pgconf/tls + name: cert-volume + readOnly: true +- mountPath: /pgdata + name: postgres-data +- mountPath: /etc/database-containerinfo + name: database-containerinfo + readOnly: true +- mountPath: /etc/postgres + name: postgres-config + readOnly: true`), "expected WAL and downwardAPI mounts in %q container", pod.Containers[0].Name) + + // InitContainer has all mountPaths, except downwardAPI and additionalConfig + assert.Assert(t, cmp.MarshalMatches(pod.InitContainers[0].VolumeMounts, ` +- mountPath: /pgconf/tls + name: cert-volume + readOnly: true +- mountPath: /pgdata + name: postgres-data`), "expected WAL mount, no downwardAPI mount in %q container", pod.InitContainers[0].Name) + }) + + t.Run("WithCustomSidecarContainer", func(t *testing.T) { + sidecarInstance := new(v1beta1.PostgresInstanceSetSpec) + sidecarInstance.Containers = []corev1.Container{ + {Name: "customsidecar1"}, + } + + t.Run("SidecarNotEnabled", func(t *testing.T) { + InstancePod(ctx, cluster, sidecarInstance, + serverSecretProjection, clientSecretProjection, dataVolume, nil, nil, pod) + + assert.Equal(t, len(pod.Containers), 2, "expected 2 containers in Pod, got %d", len(pod.Containers)) + }) + + t.Run("SidecarEnabled", func(t *testing.T) { + gate := feature.NewGate() + assert.NilError(t, gate.SetFromMap(map[string]bool{ + feature.InstanceSidecars: true, + })) + ctx := feature.NewContext(ctx, gate) + + InstancePod(ctx, cluster, sidecarInstance, + serverSecretProjection, clientSecretProjection, dataVolume, nil, nil, pod) + + assert.Equal(t, len(pod.Containers), 3, "expected 3 containers in Pod, got %d", len(pod.Containers)) + + var found bool + for i := range pod.Containers { + if pod.Containers[i].Name == "customsidecar1" { + found = true + break + } + } + assert.Assert(t, found, "expected custom sidecar 'customsidecar1', but container not found") + }) + }) + + t.Run("WithTablespaces", func(t *testing.T) { + clusterWithTablespaces := cluster.DeepCopy() + clusterWithTablespaces.Spec.InstanceSets = []v1beta1.PostgresInstanceSetSpec{ + { + TablespaceVolumes: []v1beta1.TablespaceVolume{ + {Name: "trial"}, + {Name: "castle"}, + }, + }, + } + + tablespaceVolume1 := new(corev1.PersistentVolumeClaim) + tablespaceVolume1.Labels = map[string]string{ + "postgres-operator.crunchydata.com/data": "castle", + } + tablespaceVolume2 := new(corev1.PersistentVolumeClaim) + tablespaceVolume2.Labels = map[string]string{ + "postgres-operator.crunchydata.com/data": "trial", + } + tablespaceVolumes := []*corev1.PersistentVolumeClaim{tablespaceVolume1, tablespaceVolume2} + + InstancePod(ctx, cluster, instance, + serverSecretProjection, clientSecretProjection, dataVolume, nil, tablespaceVolumes, pod) + + assert.Assert(t, cmp.MarshalMatches(pod.Containers[0].VolumeMounts, ` +- mountPath: /pgconf/tls + name: cert-volume + readOnly: true +- mountPath: /pgdata + name: postgres-data +- mountPath: /etc/database-containerinfo + name: database-containerinfo + readOnly: true +- mountPath: /tablespaces/castle + name: tablespace-castle +- mountPath: /tablespaces/trial + name: tablespace-trial`), "expected tablespace mount(s) in %q container", pod.Containers[0].Name) + + // InitContainer has all mountPaths, except downwardAPI and additionalConfig + assert.Assert(t, cmp.MarshalMatches(pod.InitContainers[0].VolumeMounts, ` +- mountPath: /pgconf/tls + name: cert-volume + readOnly: true +- mountPath: /pgdata + name: postgres-data +- mountPath: /tablespaces/castle + name: tablespace-castle +- mountPath: /tablespaces/trial + name: tablespace-trial`), "expected tablespace mount(s) in %q container", pod.InitContainers[0].Name) }) t.Run("WithWALVolumeWithWALVolumeSpec", func(t *testing.T) { @@ -333,25 +614,88 @@ volumes: instance.WALVolumeClaimSpec = new(corev1.PersistentVolumeClaimSpec) pod := new(corev1.PodSpec) - InstancePod(ctx, cluster, instance, dataVolume, walVolume, pod) + InstancePod(ctx, cluster, instance, + serverSecretProjection, clientSecretProjection, dataVolume, walVolume, nil, pod) - containers := pod.Containers[:0:0] - containers = append(containers, pod.Containers...) - containers = append(containers, pod.InitContainers...) + assert.Assert(t, len(pod.Containers) > 0) + assert.Assert(t, len(pod.InitContainers) > 0) - for _, container := range containers { - assert.Assert(t, marshalMatches(container.VolumeMounts, ` + assert.Assert(t, cmp.MarshalMatches(pod.Containers[0].VolumeMounts, ` +- mountPath: /pgconf/tls + name: cert-volume + readOnly: true - mountPath: /pgdata name: postgres-data +- mountPath: /etc/database-containerinfo + name: database-containerinfo + readOnly: true - mountPath: /pgwal - name: postgres-wal - `), "expected WAL mount in %s", container.Name) - } + name: postgres-wal`), "expected WAL and downwardAPI mounts in %q container", pod.Containers[0].Name) - assert.Assert(t, marshalMatches(pod.Volumes, ` + assert.Assert(t, cmp.MarshalMatches(pod.InitContainers[0].VolumeMounts, ` +- mountPath: /pgconf/tls + name: cert-volume + readOnly: true +- mountPath: /pgdata + name: postgres-data +- mountPath: /pgwal + name: postgres-wal`), "expected WAL mount, no downwardAPI mount in %q container", pod.InitContainers[0].Name) + + assert.Assert(t, cmp.MarshalMatches(pod.Volumes, ` +- name: cert-volume + projected: + defaultMode: 384 + sources: + - secret: + items: + - key: tls.crt + path: tls.crt + - key: tls.key + path: tls.key + - key: ca.crt + path: ca.crt + name: srv-secret + - secret: + items: + - key: tls.crt + path: replication/tls.crt + - key: tls.key + path: replication/tls.key + name: repl-secret - name: postgres-data persistentVolumeClaim: claimName: datavol +- downwardAPI: + items: + - path: cpu_limit + resourceFieldRef: + containerName: database + divisor: 1m + resource: limits.cpu + - path: cpu_request + resourceFieldRef: + containerName: database + divisor: 1m + resource: requests.cpu + - path: mem_limit + resourceFieldRef: + containerName: database + divisor: 1Mi + resource: limits.memory + - path: mem_request + resourceFieldRef: + containerName: database + divisor: 1Mi + resource: requests.memory + - fieldRef: + apiVersion: v1 + fieldPath: metadata.labels + path: labels + - fieldRef: + apiVersion: v1 + fieldPath: metadata.annotations + path: annotations + name: database-containerinfo - name: postgres-wal persistentVolumeClaim: claimName: walvol @@ -359,7 +703,7 @@ volumes: // Startup moves WAL files to WAL volume. assert.DeepEqual(t, pod.InitContainers[0].Command[4:], - []string{"startup", "11", "/pgwal/pg11_wal"}) + []string{"startup", "11", "/pgwal/pg11_wal", "/pgdata/pgbackrest/log"}) }) } @@ -367,33 +711,33 @@ func TestPodSecurityContext(t *testing.T) { cluster := new(v1beta1.PostgresCluster) cluster.Default() - assert.Assert(t, marshalMatches(PodSecurityContext(cluster), ` + assert.Assert(t, cmp.MarshalMatches(PodSecurityContext(cluster), ` fsGroup: 26 -runAsNonRoot: true +fsGroupChangePolicy: OnRootMismatch `)) cluster.Spec.OpenShift = initialize.Bool(true) - assert.Assert(t, marshalMatches(PodSecurityContext(cluster), ` -runAsNonRoot: true + assert.Assert(t, cmp.MarshalMatches(PodSecurityContext(cluster), ` +fsGroupChangePolicy: OnRootMismatch `)) cluster.Spec.SupplementalGroups = []int64{} - assert.Assert(t, marshalMatches(PodSecurityContext(cluster), ` -runAsNonRoot: true + assert.Assert(t, cmp.MarshalMatches(PodSecurityContext(cluster), ` +fsGroupChangePolicy: OnRootMismatch `)) cluster.Spec.SupplementalGroups = []int64{999, 65000} - assert.Assert(t, marshalMatches(PodSecurityContext(cluster), ` -runAsNonRoot: true + assert.Assert(t, cmp.MarshalMatches(PodSecurityContext(cluster), ` +fsGroupChangePolicy: OnRootMismatch supplementalGroups: - 999 - 65000 `)) *cluster.Spec.OpenShift = false - assert.Assert(t, marshalMatches(PodSecurityContext(cluster), ` + assert.Assert(t, cmp.MarshalMatches(PodSecurityContext(cluster), ` fsGroup: 26 -runAsNonRoot: true +fsGroupChangePolicy: OnRootMismatch supplementalGroups: - 999 - 65000 diff --git a/internal/postgres/users.go b/internal/postgres/users.go index 6f8895f330..be8785a4e5 100644 --- a/internal/postgres/users.go +++ b/internal/postgres/users.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgres @@ -19,17 +8,57 @@ import ( "bytes" "context" "encoding/json" + "strings" + + pg_query "github.com/pganalyze/pg_query_go/v5" + "github.com/crunchydata/postgres-operator/internal/feature" "github.com/crunchydata/postgres-operator/internal/logging" + "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) +var RESERVED_SCHEMA_NAMES = map[string]bool{ + "public": true, // This is here for documentation; Postgres will reject a role named `public` as reserved + "pgbouncer": true, + "monitor": true, +} + +func sanitizeAlterRoleOptions(options string) string { + const AlterRolePrefix = `ALTER ROLE "any" WITH ` + + // Parse the options and discard them completely when incoherent. + parsed, err := pg_query.Parse(AlterRolePrefix + options) + if err != nil || len(parsed.GetStmts()) != 1 { + return "" + } + + // Rebuild the options list without invalid options. TODO(go1.21) TODO(slices) + orig := parsed.GetStmts()[0].GetStmt().GetAlterRoleStmt().GetOptions() + next := make([]*pg_query.Node, 0, len(orig)) + for i, option := range orig { + if strings.EqualFold(option.GetDefElem().GetDefname(), "password") { + continue + } + next = append(next, orig[i]) + } + if len(next) > 0 { + parsed.GetStmts()[0].GetStmt().GetAlterRoleStmt().Options = next + } else { + return "" + } + + // Turn the modified statement back into SQL and remove the ALTER ROLE portion. + sql, _ := pg_query.Deparse(parsed) + return strings.TrimPrefix(sql, AlterRolePrefix) +} + // WriteUsersInPostgreSQL calls exec to create users that do not exist in // PostgreSQL. Once they exist, it updates their options and passwords and // grants them access to their specified databases. The databases must already // exist. func WriteUsersInPostgreSQL( - ctx context.Context, exec Executor, + ctx context.Context, cluster *v1beta1.PostgresCluster, exec Executor, users []v1beta1.PostgresUserSpec, verifiers map[string]string, ) error { log := logging.FromContext(ctx) @@ -56,7 +85,7 @@ CREATE TEMPORARY TABLE input (id serial, data json); spec := users[i] databases := spec.Databases - options := spec.Options + options := sanitizeAlterRoleOptions(spec.Options) // The "postgres" user must always be a superuser that can login to // the "postgres" database. @@ -66,7 +95,7 @@ CREATE TEMPORARY TABLE input (id serial, data json); } if err == nil { - err = encoder.Encode(map[string]interface{}{ + err = encoder.Encode(map[string]any{ "databases": databases, "options": options, "username": spec.Name, @@ -130,5 +159,83 @@ SELECT pg_catalog.format('GRANT ALL PRIVILEGES ON DATABASE %I TO %I', log.V(1).Info("wrote PostgreSQL users", "stdout", stdout, "stderr", stderr) + // The operator will attempt to write schemas for the users in the spec if + // * the feature gate is enabled and + // * the cluster is annotated. + if feature.Enabled(ctx, feature.AutoCreateUserSchema) { + autoCreateUserSchemaAnnotationValue, annotationExists := cluster.Annotations[naming.AutoCreateUserSchemaAnnotation] + if annotationExists && strings.EqualFold(autoCreateUserSchemaAnnotationValue, "true") { + log.V(1).Info("Writing schemas for users.") + err = WriteUsersSchemasInPostgreSQL(ctx, exec, users) + } + } + + return err +} + +// WriteUsersSchemasInPostgreSQL will create a schema for each user in each database that user has access to +func WriteUsersSchemasInPostgreSQL(ctx context.Context, exec Executor, + users []v1beta1.PostgresUserSpec) error { + + log := logging.FromContext(ctx) + + var err error + var stdout string + var stderr string + + for i := range users { + spec := users[i] + + // We skip if the user has the name of a reserved schema + if RESERVED_SCHEMA_NAMES[string(spec.Name)] { + log.V(1).Info("Skipping schema creation for user with reserved name", + "name", string(spec.Name)) + continue + } + + // We skip if the user has no databases + if len(spec.Databases) == 0 { + continue + } + + var sql bytes.Buffer + + // Prevent unexpected dereferences by emptying "search_path". The "pg_catalog" + // schema is still searched, and only temporary objects can be created. + // - https://www.postgresql.org/docs/current/runtime-config-client.html#GUC-SEARCH-PATH + _, _ = sql.WriteString(`SET search_path TO '';`) + + _, _ = sql.WriteString(`SELECT * FROM json_array_elements_text(:'databases');`) + + databases, _ := json.Marshal(spec.Databases) + + stdout, stderr, err = exec.ExecInDatabasesFromQuery(ctx, + sql.String(), + strings.Join([]string{ + // Quiet NOTICE messages from IF EXISTS statements. + // - https://www.postgresql.org/docs/current/runtime-config-client.html + `SET client_min_messages = WARNING;`, + + // Creates a schema named after and owned by the user + // - https://www.postgresql.org/docs/current/ddl-schemas.html + // - https://www.postgresql.org/docs/current/sql-createschema.html + + // We create a schema named after the user because + // the PG search_path does not need to be updated, + // since search_path defaults to "$user", public. + // - https://www.postgresql.org/docs/current/ddl-schemas.html#DDL-SCHEMAS-PATH + `CREATE SCHEMA IF NOT EXISTS :"username" AUTHORIZATION :"username";`, + }, "\n"), + map[string]string{ + "databases": string(databases), + "username": string(spec.Name), + + "ON_ERROR_STOP": "on", // Abort when any one statement fails. + "QUIET": "on", // Do not print successful commands to stdout. + }, + ) + + log.V(1).Info("wrote PostgreSQL schemas", "stdout", stdout, "stderr", stderr) + } return err } diff --git a/internal/postgres/users_test.go b/internal/postgres/users_test.go index 93500275dd..141175c78e 100644 --- a/internal/postgres/users_test.go +++ b/internal/postgres/users_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgres @@ -19,28 +8,37 @@ import ( "context" "errors" "io" - "io/ioutil" + "regexp" "strings" "testing" "gotest.tools/v3/assert" - "gotest.tools/v3/assert/cmp" + "github.com/crunchydata/postgres-operator/internal/testing/cmp" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) +func TestSanitizeAlterRoleOptions(t *testing.T) { + assert.Equal(t, sanitizeAlterRoleOptions(""), "") + assert.Equal(t, sanitizeAlterRoleOptions(" login other stuff"), "", + "expected non-options to be removed") + + t.Run("RemovesPassword", func(t *testing.T) { + assert.Equal(t, sanitizeAlterRoleOptions("password 'anything'"), "") + assert.Equal(t, sanitizeAlterRoleOptions("password $wild$ dollar quoting $wild$ login"), "LOGIN") + assert.Equal(t, sanitizeAlterRoleOptions(" login password '' replication "), "LOGIN REPLICATION") + }) + + t.Run("RemovesComments", func(t *testing.T) { + assert.Equal(t, sanitizeAlterRoleOptions("login -- asdf"), "LOGIN") + assert.Equal(t, sanitizeAlterRoleOptions("login /*"), "") + assert.Equal(t, sanitizeAlterRoleOptions("login /* createdb */ createrole"), "LOGIN CREATEROLE") + }) +} + func TestWriteUsersInPostgreSQL(t *testing.T) { ctx := context.Background() - contains := func(actual, expected string) cmp.Comparison { - return func() cmp.Result { - if !strings.Contains(actual, expected) { - return cmp.DeepEqual(actual, expected)() - } - return cmp.ResultSuccess - } - } - t.Run("Arguments", func(t *testing.T) { expected := errors.New("pass-through") exec := func( @@ -51,7 +49,8 @@ func TestWriteUsersInPostgreSQL(t *testing.T) { return expected } - assert.Equal(t, expected, WriteUsersInPostgreSQL(ctx, exec, nil, nil)) + cluster := new(v1beta1.PostgresCluster) + assert.Equal(t, expected, WriteUsersInPostgreSQL(ctx, cluster, exec, nil, nil)) }) t.Run("Empty", func(t *testing.T) { @@ -61,7 +60,7 @@ func TestWriteUsersInPostgreSQL(t *testing.T) { ) error { calls++ - b, err := ioutil.ReadAll(stdin) + b, err := io.ReadAll(stdin) assert.NilError(t, err) assert.Equal(t, string(b), strings.TrimSpace(` SET search_path TO ''; @@ -96,36 +95,39 @@ COMMIT;`)) return nil } - assert.NilError(t, WriteUsersInPostgreSQL(ctx, exec, nil, nil)) + cluster := new(v1beta1.PostgresCluster) + assert.NilError(t, WriteUsersInPostgreSQL(ctx, cluster, exec, nil, nil)) assert.Equal(t, calls, 1) - assert.NilError(t, WriteUsersInPostgreSQL(ctx, exec, []v1beta1.PostgresUserSpec{}, nil)) + assert.NilError(t, WriteUsersInPostgreSQL(ctx, cluster, exec, []v1beta1.PostgresUserSpec{}, nil)) assert.Equal(t, calls, 2) - assert.NilError(t, WriteUsersInPostgreSQL(ctx, exec, nil, map[string]string{})) + assert.NilError(t, WriteUsersInPostgreSQL(ctx, cluster, exec, nil, map[string]string{})) assert.Equal(t, calls, 3) }) t.Run("OptionalFields", func(t *testing.T) { + cluster := new(v1beta1.PostgresCluster) calls := 0 exec := func( _ context.Context, stdin io.Reader, _, _ io.Writer, command ...string, ) error { calls++ - b, err := ioutil.ReadAll(stdin) + b, err := io.ReadAll(stdin) assert.NilError(t, err) - assert.Assert(t, contains(string(b), ` + assert.Assert(t, cmp.Contains(string(b), ` \copy input (data) from stdin with (format text) {"databases":["db1"],"options":"","username":"user-no-options","verifier":""} -{"databases":null,"options":"some options here","username":"user-no-databases","verifier":""} +{"databases":null,"options":"CREATEDB CREATEROLE","username":"user-no-databases","verifier":""} {"databases":null,"options":"","username":"user-with-verifier","verifier":"some$verifier"} +{"databases":null,"options":"LOGIN","username":"user-invalid-options","verifier":""} \. `)) return nil } - assert.NilError(t, WriteUsersInPostgreSQL(ctx, exec, + assert.NilError(t, WriteUsersInPostgreSQL(ctx, cluster, exec, []v1beta1.PostgresUserSpec{ { Name: "user-no-options", @@ -133,11 +135,15 @@ COMMIT;`)) }, { Name: "user-no-databases", - Options: "some options here", + Options: "createdb createrole", }, { Name: "user-with-verifier", }, + { + Name: "user-invalid-options", + Options: "login password 'doot' --", + }, }, map[string]string{ "no-user": "ignored", @@ -149,14 +155,15 @@ COMMIT;`)) t.Run("PostgresSuperuser", func(t *testing.T) { calls := 0 + cluster := new(v1beta1.PostgresCluster) exec := func( _ context.Context, stdin io.Reader, _, _ io.Writer, command ...string, ) error { calls++ - b, err := ioutil.ReadAll(stdin) + b, err := io.ReadAll(stdin) assert.NilError(t, err) - assert.Assert(t, contains(string(b), ` + assert.Assert(t, cmp.Contains(string(b), ` \copy input (data) from stdin with (format text) {"databases":["postgres"],"options":"LOGIN SUPERUSER","username":"postgres","verifier":"allowed"} \. @@ -164,7 +171,7 @@ COMMIT;`)) return nil } - assert.NilError(t, WriteUsersInPostgreSQL(ctx, exec, + assert.NilError(t, WriteUsersInPostgreSQL(ctx, cluster, exec, []v1beta1.PostgresUserSpec{ { Name: "postgres", @@ -179,3 +186,52 @@ COMMIT;`)) assert.Equal(t, calls, 1) }) } + +func TestWriteUsersSchemasInPostgreSQL(t *testing.T) { + ctx := context.Background() + + t.Run("Mixed users", func(t *testing.T) { + calls := 0 + exec := func( + _ context.Context, stdin io.Reader, _, _ io.Writer, command ...string, + ) error { + calls++ + + b, err := io.ReadAll(stdin) + assert.NilError(t, err) + + // The command strings will contain either of two possibilities, depending on the user called. + commands := strings.Join(command, ",") + re := regexp.MustCompile("--set=databases=\\[\"db1\"\\],--set=username=user-single-db|--set=databases=\\[\"db1\",\"db2\"\\],--set=username=user-multi-db") + assert.Assert(t, cmp.Regexp(re, commands)) + + assert.Assert(t, cmp.Contains(string(b), `CREATE SCHEMA IF NOT EXISTS :"username" AUTHORIZATION :"username";`)) + return nil + } + + assert.NilError(t, WriteUsersSchemasInPostgreSQL(ctx, exec, + []v1beta1.PostgresUserSpec{ + { + Name: "user-single-db", + Databases: []v1beta1.PostgresIdentifier{"db1"}, + }, + { + Name: "user-no-databases", + }, + { + Name: "user-multi-dbs", + Databases: []v1beta1.PostgresIdentifier{"db1", "db2"}, + }, + { + Name: "public", + Databases: []v1beta1.PostgresIdentifier{"db3"}, + }, + }, + )) + // The spec.users has four elements, but two will be skipped: + // * the user with the reserved name `public` + // * the user with 0 databases + assert.Equal(t, calls, 2) + }) + +} diff --git a/internal/postgres/wal.md b/internal/postgres/wal.md index d5c38f3e7d..afb094c20e 100644 --- a/internal/postgres/wal.md +++ b/internal/postgres/wal.md @@ -1,16 +1,7 @@ PostgreSQL commits transactions by storing changes in its [write-ahead log][WAL]. diff --git a/internal/registration/interface.go b/internal/registration/interface.go new file mode 100644 index 0000000000..578a064e2b --- /dev/null +++ b/internal/registration/interface.go @@ -0,0 +1,67 @@ +// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package registration + +import ( + "fmt" + "os" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +type Registration interface { + // Required returns true when registration is required but the token is missing or invalid. + Required(record.EventRecorder, client.Object, *[]metav1.Condition) bool +} + +var URL = os.Getenv("REGISTRATION_URL") + +func SetAdvanceWarning(recorder record.EventRecorder, object client.Object, conditions *[]metav1.Condition) { + recorder.Eventf(object, corev1.EventTypeWarning, "Register Soon", + "Crunchy Postgres for Kubernetes requires registration for upgrades."+ + " Register now to be ready for your next upgrade. See %s for details.", URL) + + meta.SetStatusCondition(conditions, metav1.Condition{ + Type: v1beta1.Registered, + Status: metav1.ConditionFalse, + Reason: "TokenRequired", + Message: fmt.Sprintf( + "Crunchy Postgres for Kubernetes requires registration for upgrades."+ + " Register now to be ready for your next upgrade. See %s for details.", URL), + ObservedGeneration: object.GetGeneration(), + }) +} + +func SetRequiredWarning(recorder record.EventRecorder, object client.Object, conditions *[]metav1.Condition) { + recorder.Eventf(object, corev1.EventTypeWarning, "Registration Required", + "Crunchy Postgres for Kubernetes requires registration for upgrades."+ + " Register now to be ready for your next upgrade. See %s for details.", URL) + + meta.SetStatusCondition(conditions, metav1.Condition{ + Type: v1beta1.Registered, + Status: metav1.ConditionFalse, + Reason: "TokenRequired", + Message: fmt.Sprintf( + "Crunchy Postgres for Kubernetes requires registration for upgrades."+ + " Upgrade suspended. See %s for details.", URL), + ObservedGeneration: object.GetGeneration(), + }) +} + +func emitFailedWarning(recorder record.EventRecorder, object client.Object) { + recorder.Eventf(object, corev1.EventTypeWarning, "Token Authentication Failed", + "See %s for details.", URL) +} + +func emitVerifiedEvent(recorder record.EventRecorder, object client.Object) { + recorder.Event(object, corev1.EventTypeNormal, "Token Verified", + "Thank you for registering your installation of Crunchy Postgres for Kubernetes.") +} diff --git a/internal/registration/runner.go b/internal/registration/runner.go new file mode 100644 index 0000000000..0d607e1e94 --- /dev/null +++ b/internal/registration/runner.go @@ -0,0 +1,191 @@ +// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package registration + +import ( + "context" + "crypto/rsa" + "errors" + "os" + "strings" + "sync" + "time" + + "github.com/golang-jwt/jwt/v5" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/manager" + + "github.com/crunchydata/postgres-operator/internal/logging" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +// Runner implements [Registration] by loading and validating the token at a +// fixed path. Its methods are safe to call concurrently. +type Runner struct { + changed func() + enabled bool + publicKey *rsa.PublicKey + refresh time.Duration + tokenPath string + + token struct { + sync.RWMutex + Exists bool `json:"-"` + + jwt.RegisteredClaims + Iteration int `json:"itr"` + } +} + +// Runner implements [Registration] and [manager.Runnable]. +var _ Registration = (*Runner)(nil) +var _ manager.Runnable = (*Runner)(nil) + +// NewRunner creates a [Runner] that periodically checks the validity of the +// token at tokenPath. It calls changed when the validity of the token changes. +func NewRunner(publicKey, tokenPath string, changed func()) (*Runner, error) { + runner := &Runner{ + changed: changed, + refresh: time.Minute, + tokenPath: tokenPath, + } + + var err error + switch { + case publicKey != "" && tokenPath != "": + if !strings.HasPrefix(strings.TrimSpace(publicKey), "-") { + publicKey = "-----BEGIN -----\n" + publicKey + "\n-----END -----" + } + + runner.enabled = true + runner.publicKey, err = jwt.ParseRSAPublicKeyFromPEM([]byte(publicKey)) + + case publicKey == "" && tokenPath != "": + err = errors.New("registration: missing public key") + + case publicKey != "" && tokenPath == "": + err = errors.New("registration: missing token path") + } + + return runner, err +} + +// CheckToken loads and verifies the configured token, returning an error when +// the file exists but cannot be verified, and +// returning the token if it can be verified. +// NOTE(upgradecheck): return the token/nil so that we can use the token +// in upgradecheck; currently a refresh of the token will cause a restart of the pod +// meaning that the token used in upgradecheck is always the current token. +// But if the restart behavior changes, we might drop the token return in main.go +// and change upgradecheck to retrieve the token itself +func (r *Runner) CheckToken() (*jwt.Token, error) { + data, errFile := os.ReadFile(r.tokenPath) + key := func(*jwt.Token) (any, error) { return r.publicKey, nil } + + // Assume [jwt] and [os] functions could do something unexpected; use defer + // to safely write to the token. + r.token.Lock() + defer r.token.Unlock() + + token, errToken := jwt.ParseWithClaims(string(data), &r.token, key, + jwt.WithExpirationRequired(), + jwt.WithValidMethods([]string{"RS256"}), + ) + + // The error from [os.ReadFile] indicates whether a token file exists. + r.token.Exists = !os.IsNotExist(errFile) + + // Reset most claims if there is any problem loading, parsing, validating, or + // verifying the token file. + if errFile != nil || errToken != nil { + r.token.RegisteredClaims = jwt.RegisteredClaims{} + } + + switch { + case !r.enabled || !r.token.Exists: + return nil, nil + case errFile != nil: + return nil, errFile + default: + return token, errToken + } +} + +func (r *Runner) state() (failed, required bool) { + // Assume [time] functions could do something unexpected; use defer to safely + // read the token. + r.token.RLock() + defer r.token.RUnlock() + + failed = r.token.Exists && r.token.ExpiresAt == nil + required = r.enabled && + (!r.token.Exists || failed || r.token.ExpiresAt.Before(time.Now())) + return +} + +// Required returns true when registration is required but the token is missing or invalid. +func (r *Runner) Required( + recorder record.EventRecorder, object client.Object, conditions *[]metav1.Condition, +) bool { + failed, required := r.state() + + if r.enabled && failed { + emitFailedWarning(recorder, object) + } + + if !required && conditions != nil { + before := len(*conditions) + meta.RemoveStatusCondition(conditions, v1beta1.Registered) + meta.RemoveStatusCondition(conditions, "RegistrationRequired") + meta.RemoveStatusCondition(conditions, "TokenRequired") + found := len(*conditions) != before + + if r.enabled && found { + emitVerifiedEvent(recorder, object) + } + } + + return required +} + +// NeedLeaderElection returns true so that r runs only on the single +// [manager.Manager] that is elected leader in the Kubernetes namespace. +func (r *Runner) NeedLeaderElection() bool { return true } + +// Start watches for a mounted registration token when enabled. It blocks +// until ctx is cancelled. +func (r *Runner) Start(ctx context.Context) error { + var ticks <-chan time.Time + + if r.enabled { + ticker := time.NewTicker(r.refresh) + defer ticker.Stop() + ticks = ticker.C + } + + log := logging.FromContext(ctx).WithValues("controller", "registration") + + for { + select { + case <-ticks: + _, before := r.state() + if _, err := r.CheckToken(); err != nil { + log.Error(err, "Unable to validate token") + } + if _, after := r.state(); before != after && r.changed != nil { + r.changed() + } + case <-ctx.Done(): + // https://github.com/kubernetes-sigs/controller-runtime/issues/1927 + if errors.Is(ctx.Err(), context.Canceled) { + return nil + } + return ctx.Err() + } + } +} diff --git a/internal/registration/runner_test.go b/internal/registration/runner_test.go new file mode 100644 index 0000000000..8e75848986 --- /dev/null +++ b/internal/registration/runner_test.go @@ -0,0 +1,574 @@ +// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package registration + +import ( + "context" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "os" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/golang-jwt/jwt/v5" + "gotest.tools/v3/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/manager" + + "github.com/crunchydata/postgres-operator/internal/testing/events" +) + +func TestNewRunner(t *testing.T) { + t.Parallel() + + key, err := rsa.GenerateKey(rand.Reader, 2048) + assert.NilError(t, err) + + der, err := x509.MarshalPKIXPublicKey(&key.PublicKey) + assert.NilError(t, err) + + public := pem.EncodeToMemory(&pem.Block{Bytes: der}) + assert.Assert(t, len(public) != 0) + + t.Run("Disabled", func(t *testing.T) { + runner, err := NewRunner("", "", nil) + assert.NilError(t, err) + assert.Assert(t, runner != nil) + assert.Assert(t, !runner.enabled) + }) + + t.Run("ConfiguredCorrectly", func(t *testing.T) { + runner, err := NewRunner(string(public), "any", nil) + assert.NilError(t, err) + assert.Assert(t, runner != nil) + assert.Assert(t, runner.enabled) + + t.Run("ExtraLines", func(t *testing.T) { + input := "\n\n" + strings.ReplaceAll(string(public), "\n", "\n\n") + "\n\n" + + runner, err := NewRunner(input, "any", nil) + assert.NilError(t, err) + assert.Assert(t, runner != nil) + assert.Assert(t, runner.enabled) + }) + + t.Run("WithoutPEMBoundaries", func(t *testing.T) { + lines := strings.Split(strings.TrimSpace(string(public)), "\n") + lines = lines[1 : len(lines)-1] + + for _, input := range []string{ + strings.Join(lines, ""), // single line + strings.Join(lines, "\n"), // multi-line + "\n\n" + strings.Join(lines, "\n\n") + "\n\n", // extra lines + } { + runner, err := NewRunner(input, "any", nil) + assert.NilError(t, err) + assert.Assert(t, runner != nil) + assert.Assert(t, runner.enabled) + } + }) + }) + + t.Run("ConfiguredIncorrectly", func(t *testing.T) { + for _, tt := range []struct { + key, path, msg string + }{ + {msg: "public key", key: "", path: "any"}, + {msg: "token path", key: "bad", path: ""}, + {msg: "invalid key", key: "bad", path: "any"}, + {msg: "token path", key: string(public), path: ""}, + } { + _, err := NewRunner(tt.key, tt.path, nil) + assert.ErrorContains(t, err, tt.msg, "(key=%q, path=%q)", tt.key, tt.path) + } + }) +} + +func TestRunnerCheckToken(t *testing.T) { + t.Parallel() + + dir := t.TempDir() + key, err := rsa.GenerateKey(rand.Reader, 2048) + assert.NilError(t, err) + + t.Run("SafeToCallDisabled", func(t *testing.T) { + r := Runner{enabled: false} + _, err := r.CheckToken() + assert.NilError(t, err) + }) + + t.Run("FileMissing", func(t *testing.T) { + r := Runner{enabled: true, tokenPath: filepath.Join(dir, "nope")} + _, err := r.CheckToken() + assert.NilError(t, err) + }) + + t.Run("FileUnreadable", func(t *testing.T) { + r := Runner{enabled: true, tokenPath: filepath.Join(dir, "nope")} + assert.NilError(t, os.WriteFile(r.tokenPath, nil, 0o200)) // Writeable + + _, err := r.CheckToken() + assert.ErrorContains(t, err, "permission") + assert.Assert(t, r.token.ExpiresAt == nil) + }) + + t.Run("FileEmpty", func(t *testing.T) { + r := Runner{enabled: true, tokenPath: filepath.Join(dir, "empty")} + assert.NilError(t, os.WriteFile(r.tokenPath, nil, 0o400)) // Readable + + _, err := r.CheckToken() + assert.ErrorContains(t, err, "malformed") + assert.Assert(t, r.token.ExpiresAt == nil) + }) + + t.Run("WrongAlgorithm", func(t *testing.T) { + r := Runner{ + enabled: true, + publicKey: &key.PublicKey, + tokenPath: filepath.Join(dir, "hs256"), + } + + // Maliciously treating an RSA public key as an HMAC secret. + // - https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/ + public, err := x509.MarshalPKIXPublicKey(r.publicKey) + assert.NilError(t, err) + data, err := jwt.New(jwt.SigningMethodHS256).SignedString(public) + assert.NilError(t, err) + assert.NilError(t, os.WriteFile(r.tokenPath, []byte(data), 0o400)) // Readable + + _, err = r.CheckToken() + assert.Assert(t, err != nil, "HMAC algorithm should be rejected") + assert.Assert(t, r.token.ExpiresAt == nil) + }) + + t.Run("MissingExpiration", func(t *testing.T) { + r := Runner{ + enabled: true, + publicKey: &key.PublicKey, + tokenPath: filepath.Join(dir, "no-claims"), + } + + data, err := jwt.New(jwt.SigningMethodRS256).SignedString(key) + assert.NilError(t, err) + assert.NilError(t, os.WriteFile(r.tokenPath, []byte(data), 0o400)) // Readable + + _, err = r.CheckToken() + assert.ErrorContains(t, err, "exp claim is required") + assert.Assert(t, r.token.ExpiresAt == nil) + }) + + t.Run("ExpiredToken", func(t *testing.T) { + r := Runner{ + enabled: true, + publicKey: &key.PublicKey, + tokenPath: filepath.Join(dir, "expired"), + } + + data, err := jwt.NewWithClaims(jwt.SigningMethodRS256, jwt.MapClaims{ + "exp": jwt.NewNumericDate(time.Date(2020, 1, 1, 1, 1, 1, 1, time.UTC)), + }).SignedString(key) + assert.NilError(t, err) + assert.NilError(t, os.WriteFile(r.tokenPath, []byte(data), 0o400)) // Readable + + _, err = r.CheckToken() + assert.ErrorContains(t, err, "is expired") + assert.Assert(t, r.token.ExpiresAt == nil) + }) + + t.Run("ValidToken", func(t *testing.T) { + r := Runner{ + enabled: true, + publicKey: &key.PublicKey, + tokenPath: filepath.Join(dir, "valid"), + } + + expiration := jwt.NewNumericDate(time.Now().Add(time.Hour)) + data, err := jwt.NewWithClaims(jwt.SigningMethodRS256, jwt.MapClaims{ + "exp": expiration, + }).SignedString(key) + assert.NilError(t, err) + assert.NilError(t, os.WriteFile(r.tokenPath, []byte(data), 0o400)) // Readable + + token, err := r.CheckToken() + assert.NilError(t, err) + assert.Assert(t, r.token.ExpiresAt != nil) + assert.Assert(t, token.Valid) + exp, err := token.Claims.GetExpirationTime() + assert.NilError(t, err) + assert.Equal(t, exp.Time, expiration.Time) + }) +} + +func TestRunnerLeaderElectionRunnable(t *testing.T) { + var runner manager.LeaderElectionRunnable = &Runner{} + + assert.Assert(t, runner.NeedLeaderElection()) +} + +func TestRunnerRequiredConditions(t *testing.T) { + t.Parallel() + + t.Run("RegistrationDisabled", func(t *testing.T) { + r := Runner{enabled: false} + + for _, tt := range []struct { + before, after []metav1.Condition + }{ + { + before: []metav1.Condition{}, + after: []metav1.Condition{}, + }, + { + before: []metav1.Condition{{Type: "ExistingOther"}}, + after: []metav1.Condition{{Type: "ExistingOther"}}, + }, + { + before: []metav1.Condition{{Type: "Registered"}, {Type: "ExistingOther"}}, + after: []metav1.Condition{{Type: "ExistingOther"}}, + }, + { + before: []metav1.Condition{ + {Type: "Registered"}, + {Type: "ExistingOther"}, + {Type: "RegistrationRequired"}, + }, + after: []metav1.Condition{{Type: "ExistingOther"}}, + }, + { + before: []metav1.Condition{{Type: "TokenRequired"}}, + after: []metav1.Condition{}, + }, + } { + for _, exists := range []bool{false, true} { + for _, expires := range []time.Time{ + time.Now().Add(time.Hour), + time.Now().Add(-time.Hour), + } { + r.token.Exists = exists + r.token.ExpiresAt = jwt.NewNumericDate(expires) + + conditions := append([]metav1.Condition{}, tt.before...) + discard := new(events.Recorder) + object := &corev1.ConfigMap{} + + result := r.Required(discard, object, &conditions) + + assert.Equal(t, result, false, "expected registration not required") + assert.DeepEqual(t, conditions, tt.after) + } + } + } + }) + + t.Run("RegistrationRequired", func(t *testing.T) { + r := Runner{enabled: true} + + for _, tt := range []struct { + exists bool + expires time.Time + before []metav1.Condition + }{ + { + exists: false, expires: time.Now().Add(time.Hour), + before: []metav1.Condition{{Type: "Registered"}, {Type: "ExistingOther"}}, + }, + { + exists: false, expires: time.Now().Add(-time.Hour), + before: []metav1.Condition{{Type: "Registered"}, {Type: "ExistingOther"}}, + }, + { + exists: true, expires: time.Now().Add(-time.Hour), + before: []metav1.Condition{{Type: "Registered"}, {Type: "ExistingOther"}}, + }, + } { + r.token.Exists = tt.exists + r.token.ExpiresAt = jwt.NewNumericDate(tt.expires) + + conditions := append([]metav1.Condition{}, tt.before...) + discard := new(events.Recorder) + object := &corev1.ConfigMap{} + + result := r.Required(discard, object, &conditions) + + assert.Equal(t, result, true, "expected registration required") + assert.DeepEqual(t, conditions, tt.before) + } + }) + + t.Run("Registered", func(t *testing.T) { + r := Runner{} + r.token.Exists = true + r.token.ExpiresAt = jwt.NewNumericDate(time.Now().Add(time.Hour)) + + for _, tt := range []struct { + before, after []metav1.Condition + }{ + { + before: []metav1.Condition{}, + after: []metav1.Condition{}, + }, + { + before: []metav1.Condition{{Type: "ExistingOther"}}, + after: []metav1.Condition{{Type: "ExistingOther"}}, + }, + { + before: []metav1.Condition{{Type: "Registered"}, {Type: "ExistingOther"}}, + after: []metav1.Condition{{Type: "ExistingOther"}}, + }, + { + before: []metav1.Condition{ + {Type: "Registered"}, + {Type: "ExistingOther"}, + {Type: "RegistrationRequired"}, + }, + after: []metav1.Condition{{Type: "ExistingOther"}}, + }, + { + before: []metav1.Condition{{Type: "TokenRequired"}}, + after: []metav1.Condition{}, + }, + } { + for _, enabled := range []bool{false, true} { + r.enabled = enabled + + conditions := append([]metav1.Condition{}, tt.before...) + discard := new(events.Recorder) + object := &corev1.ConfigMap{} + + result := r.Required(discard, object, &conditions) + + assert.Equal(t, result, false, "expected registration not required") + assert.DeepEqual(t, conditions, tt.after) + } + } + }) +} + +func TestRunnerRequiredEvents(t *testing.T) { + t.Parallel() + + t.Run("RegistrationDisabled", func(t *testing.T) { + r := Runner{enabled: false} + + for _, tt := range []struct { + before []metav1.Condition + }{ + { + before: []metav1.Condition{}, + }, + { + before: []metav1.Condition{{Type: "ExistingOther"}}, + }, + { + before: []metav1.Condition{{Type: "Registered"}, {Type: "ExistingOther"}}, + }, + } { + for _, exists := range []bool{false, true} { + for _, expires := range []time.Time{ + time.Now().Add(time.Hour), + time.Now().Add(-time.Hour), + } { + r.token.Exists = exists + r.token.ExpiresAt = jwt.NewNumericDate(expires) + + conditions := append([]metav1.Condition{}, tt.before...) + object := &corev1.ConfigMap{} + recorder := events.NewRecorder(t, scheme.Scheme) + + result := r.Required(recorder, object, &conditions) + + assert.Equal(t, result, false, "expected registration not required") + assert.Equal(t, len(recorder.Events), 0, "expected no events") + } + } + } + }) + + t.Run("RegistrationRequired", func(t *testing.T) { + r := Runner{enabled: true} + + t.Run("MissingToken", func(t *testing.T) { + r.token.Exists = false + + for _, tt := range []struct { + before []metav1.Condition + }{ + { + before: []metav1.Condition{}, + }, + { + before: []metav1.Condition{{Type: "ExistingOther"}}, + }, + { + before: []metav1.Condition{{Type: "Registered"}, {Type: "ExistingOther"}}, + }, + } { + conditions := append([]metav1.Condition{}, tt.before...) + object := &corev1.ConfigMap{} + recorder := events.NewRecorder(t, scheme.Scheme) + + result := r.Required(recorder, object, &conditions) + + assert.Equal(t, result, true, "expected registration required") + assert.Equal(t, len(recorder.Events), 0, "expected no events") + } + }) + + t.Run("InvalidToken", func(t *testing.T) { + r.token.Exists = true + r.token.ExpiresAt = nil + + for _, tt := range []struct { + before []metav1.Condition + }{ + { + before: []metav1.Condition{}, + }, + { + before: []metav1.Condition{{Type: "ExistingOther"}}, + }, + { + before: []metav1.Condition{{Type: "Registered"}, {Type: "ExistingOther"}}, + }, + } { + conditions := append([]metav1.Condition{}, tt.before...) + object := &corev1.ConfigMap{} + recorder := events.NewRecorder(t, scheme.Scheme) + + result := r.Required(recorder, object, &conditions) + + assert.Equal(t, result, true, "expected registration required") + assert.Equal(t, len(recorder.Events), 1, "expected one event") + assert.Equal(t, recorder.Events[0].Type, "Warning") + assert.Equal(t, recorder.Events[0].Reason, "Token Authentication Failed") + } + }) + }) + + t.Run("Registered", func(t *testing.T) { + r := Runner{} + r.token.Exists = true + r.token.ExpiresAt = jwt.NewNumericDate(time.Now().Add(time.Hour)) + + t.Run("AlwaysRegistered", func(t *testing.T) { + // No prior registration conditions + for _, tt := range []struct { + before []metav1.Condition + }{ + { + before: []metav1.Condition{}, + }, + { + before: []metav1.Condition{{Type: "ExistingOther"}}, + }, + } { + for _, enabled := range []bool{false, true} { + r.enabled = enabled + + conditions := append([]metav1.Condition{}, tt.before...) + object := &corev1.ConfigMap{} + recorder := events.NewRecorder(t, scheme.Scheme) + + result := r.Required(recorder, object, &conditions) + + assert.Equal(t, result, false, "expected registration not required") + assert.Equal(t, len(recorder.Events), 0, "expected no events") + } + } + }) + + t.Run("PreviouslyUnregistered", func(t *testing.T) { + r.enabled = true + + // One or more prior registration conditions + for _, tt := range []struct { + before []metav1.Condition + }{ + { + before: []metav1.Condition{{Type: "Registered"}, {Type: "ExistingOther"}}, + }, + { + before: []metav1.Condition{ + {Type: "Registered"}, + {Type: "ExistingOther"}, + {Type: "RegistrationRequired"}, + }, + }, + { + before: []metav1.Condition{{Type: "TokenRequired"}}, + }, + } { + conditions := append([]metav1.Condition{}, tt.before...) + object := &corev1.ConfigMap{} + recorder := events.NewRecorder(t, scheme.Scheme) + + result := r.Required(recorder, object, &conditions) + + assert.Equal(t, result, false, "expected registration not required") + assert.Equal(t, len(recorder.Events), 1, "expected one event") + assert.Equal(t, recorder.Events[0].Type, "Normal") + assert.Equal(t, recorder.Events[0].Reason, "Token Verified") + } + }) + }) +} + +func TestRunnerStart(t *testing.T) { + t.Parallel() + + dir := t.TempDir() + key, err := rsa.GenerateKey(rand.Reader, 2048) + assert.NilError(t, err) + + token, err := jwt.NewWithClaims(jwt.SigningMethodRS256, jwt.MapClaims{ + "exp": jwt.NewNumericDate(time.Now().Add(time.Hour)), + }).SignedString(key) + assert.NilError(t, err) + + t.Run("DisabledDoesNothing", func(t *testing.T) { + runner := &Runner{ + enabled: false, + refresh: time.Nanosecond, + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) + defer cancel() + + assert.ErrorIs(t, runner.Start(ctx), context.DeadlineExceeded, + "expected it to block until context is canceled") + }) + + t.Run("WithCallback", func(t *testing.T) { + called := false + runner := &Runner{ + changed: func() { called = true }, + enabled: true, + publicKey: &key.PublicKey, + refresh: time.Second, + tokenPath: filepath.Join(dir, "token"), + } + + // Begin with an invalid token. + assert.NilError(t, os.WriteFile(runner.tokenPath, nil, 0o600)) + _, err = runner.CheckToken() + assert.Assert(t, err != nil) + + // Replace it with a valid token. + assert.NilError(t, os.WriteFile(runner.tokenPath, []byte(token), 0o600)) + + // Run with a timeout that exceeds the refresh interval. + ctx, cancel := context.WithTimeout(context.Background(), runner.refresh*3/2) + defer cancel() + + assert.ErrorIs(t, runner.Start(ctx), context.DeadlineExceeded) + assert.Assert(t, called, "expected a call back") + }) +} diff --git a/internal/registration/testing.go b/internal/registration/testing.go new file mode 100644 index 0000000000..1418f6d2d3 --- /dev/null +++ b/internal/registration/testing.go @@ -0,0 +1,21 @@ +// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package registration + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// NOTE: This type can go away following https://go.dev/issue/47487. + +type RegistrationFunc func(record.EventRecorder, client.Object, *[]metav1.Condition) bool + +func (fn RegistrationFunc) Required(rec record.EventRecorder, obj client.Object, conds *[]metav1.Condition) bool { + return fn(rec, obj, conds) +} + +var _ Registration = RegistrationFunc(nil) diff --git a/internal/testing/cmp/cmp.go b/internal/testing/cmp/cmp.go new file mode 100644 index 0000000000..265a598064 --- /dev/null +++ b/internal/testing/cmp/cmp.go @@ -0,0 +1,67 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package cmp + +import ( + "strings" + + gocmp "github.com/google/go-cmp/cmp" + gotest "gotest.tools/v3/assert/cmp" + "sigs.k8s.io/yaml" +) + +type Comparison = gotest.Comparison + +// Contains succeeds if item is in collection. The collection may be a string, +// map, slice, or array. See [gotest.tools/v3/assert/cmp.Contains]. When either +// item or collection is a multi-line string, the failure message contains a +// multi-line report of the differences. +func Contains(collection, item any) Comparison { + cString, cStringOK := collection.(string) + iString, iStringOK := item.(string) + + if cStringOK && iStringOK { + if strings.Contains(cString, "\n") || strings.Contains(iString, "\n") { + return func() gotest.Result { + if strings.Contains(cString, iString) { + return gotest.ResultSuccess + } + return gotest.ResultFailureTemplate(` +--- {{ with callArg 0 }}{{ formatNode . }}{{else}}←{{end}} string does not contain ++++ {{ with callArg 1 }}{{ formatNode . }}{{else}}→{{end}} substring +{{ .Data.diff }}`, + map[string]any{ + "diff": gocmp.Diff(collection, item), + }) + } + } + } + + return gotest.Contains(collection, item) +} + +// DeepEqual compares two values using [github.com/google/go-cmp/cmp] and +// succeeds if the values are equal. The comparison can be customized using +// comparison Options. See [github.com/google/go-cmp/cmp.Option] constructors +// and [github.com/google/go-cmp/cmp/cmpopts]. +func DeepEqual(x, y any, opts ...gocmp.Option) Comparison { + return gotest.DeepEqual(x, y, opts...) +} + +// MarshalMatches converts actual to YAML and compares that to expected. +func MarshalMatches(actual any, expected string) Comparison { + b, err := yaml.Marshal(actual) + if err != nil { + return func() gotest.Result { return gotest.ResultFromError(err) } + } + return gotest.DeepEqual(string(b), strings.Trim(expected, "\t\n")+"\n") +} + +// Regexp succeeds if value contains any match of the regular expression re. +// The regular expression may be a *regexp.Regexp or a string that is a valid +// regexp pattern. +func Regexp(re any, value string) Comparison { + return gotest.Regexp(re, value) +} diff --git a/internal/testing/events/recorder.go b/internal/testing/events/recorder.go new file mode 100644 index 0000000000..23c03a4c40 --- /dev/null +++ b/internal/testing/events/recorder.go @@ -0,0 +1,99 @@ +// Copyright 2022 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package events + +import ( + "fmt" + "testing" + "time" + + "gotest.tools/v3/assert" + corev1 "k8s.io/api/core/v1" + eventsv1 "k8s.io/api/events/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/tools/record/util" + "k8s.io/client-go/tools/reference" +) + +// Recorder implements the interface for the deprecated v1.Event API. +// The zero value discards events. +// - https://pkg.go.dev/k8s.io/client-go@v0.24.1/tools/record#EventRecorder +type Recorder struct { + Events []eventsv1.Event + + // eventf signature is intended to match the recorder for the events/v1 API. + // - https://pkg.go.dev/k8s.io/client-go@v0.24.1/tools/events#EventRecorder + eventf func(regarding, related runtime.Object, eventtype, reason, action, note string, args ...any) +} + +// NewRecorder returns an EventRecorder for the deprecated v1.Event API. +func NewRecorder(t testing.TB, scheme *runtime.Scheme) *Recorder { + t.Helper() + + var recorder Recorder + + // Construct an events/v1.Event and store it. This is a copy of the upstream + // implementation except that t.Error is called rather than klog. + // - https://releases.k8s.io/v1.24.1/staging/src/k8s.io/client-go/tools/events/event_recorder.go#L43-L92 + recorder.eventf = func(regarding, related runtime.Object, eventtype, reason, action, note string, args ...any) { + t.Helper() + + timestamp := metav1.MicroTime{Time: time.Now()} + message := fmt.Sprintf(note, args...) + + refRegarding, err := reference.GetReference(scheme, regarding) + assert.Check(t, err, "Could not construct reference to: '%#v'", regarding) + + var refRelated *corev1.ObjectReference + if related != nil { + refRelated, err = reference.GetReference(scheme, related) + assert.Check(t, err, "Could not construct reference to: '%#v'", related) + } + + assert.Check(t, util.ValidateEventType(eventtype), "Unsupported event type: '%v'", eventtype) + + namespace := refRegarding.Namespace + if namespace == "" { + namespace = metav1.NamespaceDefault + } + + recorder.Events = append(recorder.Events, eventsv1.Event{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%v.%x", refRegarding.Name, timestamp.UnixNano()), + Namespace: namespace, + }, + EventTime: timestamp, + Series: nil, + ReportingController: t.Name(), + ReportingInstance: t.Name() + "-{hostname}", + Action: action, + Reason: reason, + Regarding: *refRegarding, + Related: refRelated, + Note: message, + Type: eventtype, + }) + } + + return &recorder +} + +var _ record.EventRecorder = (*Recorder)(nil) + +func (*Recorder) AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...any) { + panic("DEPRECATED: do not use AnnotatedEventf") +} +func (r *Recorder) Event(object runtime.Object, eventtype, reason, message string) { + if r.eventf != nil { + r.eventf(object, nil, eventtype, reason, "", message) + } +} +func (r *Recorder) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...any) { + if r.eventf != nil { + r.eventf(object, nil, eventtype, reason, "", messageFmt, args...) + } +} diff --git a/internal/testing/require/exec.go b/internal/testing/require/exec.go new file mode 100644 index 0000000000..c182e84996 --- /dev/null +++ b/internal/testing/require/exec.go @@ -0,0 +1,68 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package require + +import ( + "os/exec" + "sync" + "testing" + + "gotest.tools/v3/assert" +) + +// Flake8 returns the path to the "flake8" executable or calls t.Skip. +func Flake8(t testing.TB) string { t.Helper(); return flake8(t) } + +var flake8 = executable("flake8", "--version") + +// OpenSSL returns the path to the "openssl" executable or calls t.Skip. +func OpenSSL(t testing.TB) string { t.Helper(); return openssl(t) } + +var openssl = executable("openssl", "version", "-a") + +// ShellCheck returns the path to the "shellcheck" executable or calls t.Skip. +func ShellCheck(t testing.TB) string { t.Helper(); return shellcheck(t) } + +var shellcheck = executable("shellcheck", "--version") + +// executable builds a function that returns the full path to name. +// The function (1) locates name or calls t.Skip, (2) runs that with args, +// (3) calls t.Log with the output, and (4) calls t.Fatal if it exits non-zero. +func executable(name string, args ...string) func(testing.TB) string { + var result func(testing.TB) string + var once sync.Once + + return func(t testing.TB) string { + t.Helper() + once.Do(func() { + path, err := exec.LookPath(name) + cmd := exec.Command(path, args...) // #nosec G204 -- args from init() + + if err != nil { + result = func(t testing.TB) string { + t.Helper() + t.Skipf("requires %q executable", name) + return "" + } + } else if info, err := cmd.CombinedOutput(); err != nil { + result = func(t testing.TB) string { + t.Helper() + // Let the "assert" package inspect and format the error. + // Show what was executed and anything it printed as well. + // This always calls t.Fatal because err is not nil here. + assert.NilError(t, err, "%q\n%s", cmd.Args, info) + return "" + } + } else { + result = func(t testing.TB) string { + t.Helper() + t.Logf("using %q\n%s", path, info) + return path + } + } + }) + return result(t) + } +} diff --git a/internal/testing/require/kubernetes.go b/internal/testing/require/kubernetes.go new file mode 100644 index 0000000000..df21bca058 --- /dev/null +++ b/internal/testing/require/kubernetes.go @@ -0,0 +1,167 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package require + +import ( + "context" + "os" + "path/filepath" + goruntime "runtime" + "strings" + "sync" + "testing" + + "gotest.tools/v3/assert" + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + + "github.com/crunchydata/postgres-operator/internal/controller/runtime" +) + +// https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/envtest#pkg-constants +var envtestVarsSet = os.Getenv("KUBEBUILDER_ASSETS") != "" || + strings.EqualFold(os.Getenv("USE_EXISTING_CLUSTER"), "true") + +// EnvTest returns an unstarted Environment with crds. It calls t.Skip when +// the "KUBEBUILDER_ASSETS" and "USE_EXISTING_CLUSTER" environment variables +// are unset. +func EnvTest(t testing.TB, crds envtest.CRDInstallOptions) *envtest.Environment { + t.Helper() + + if !envtestVarsSet { + t.SkipNow() + } + + return &envtest.Environment{ + CRDInstallOptions: crds, + Scheme: crds.Scheme, + } +} + +var kubernetes struct { + sync.Mutex + + // Count references to the started Environment. + count int + env *envtest.Environment +} + +// Kubernetes starts or connects to a Kubernetes API and returns a client that uses it. +// When starting a local API, the client is a member of the "system:masters" group. +// +// It calls t.Fatal when something fails. It stops the local API using t.Cleanup. +// It calls t.Skip when the "KUBEBUILDER_ASSETS" and "USE_EXISTING_CLUSTER" environment +// variables are unset. +// +// Tests that call t.Parallel might share the same local API. Call t.Parallel after this +// function to ensure they share. +func Kubernetes(t testing.TB) client.Client { + t.Helper() + _, cc := kubernetes3(t) + return cc +} + +// Kubernetes2 is the same as [Kubernetes] but also returns a copy of the client +// configuration. +func Kubernetes2(t testing.TB) (*rest.Config, client.Client) { + t.Helper() + env, cc := kubernetes3(t) + return rest.CopyConfig(env.Config), cc +} + +func kubernetes3(t testing.TB) (*envtest.Environment, client.Client) { + t.Helper() + + if !envtestVarsSet { + t.SkipNow() + } + + frames := func() *goruntime.Frames { + var pcs [5]uintptr + n := goruntime.Callers(2, pcs[:]) + return goruntime.CallersFrames(pcs[0:n]) + }() + + // Calculate the project directory as reported by [goruntime.CallersFrames]. + frame, ok := frames.Next() + self := frame.File + root := strings.TrimSuffix(self, + filepath.Join("internal", "testing", "require", "kubernetes.go")) + + // Find the first caller that is not in this file. + for ok && frame.File == self { + frame, ok = frames.Next() + } + caller := frame.File + + // Calculate the project directory path relative to the caller. + base, err := filepath.Rel(filepath.Dir(caller), root) + assert.NilError(t, err) + + kubernetes.Lock() + defer kubernetes.Unlock() + + if kubernetes.env == nil { + env := EnvTest(t, envtest.CRDInstallOptions{ + ErrorIfPathMissing: true, + Paths: []string{ + filepath.Join(base, "config", "crd", "bases"), + filepath.Join(base, "hack", "tools", "external-snapshotter", "client", "config", "crd"), + }, + Scheme: runtime.Scheme, + }) + + _, err := env.Start() + assert.NilError(t, err) + + kubernetes.env = env + } + + kubernetes.count++ + + t.Cleanup(func() { + kubernetes.Lock() + defer kubernetes.Unlock() + + kubernetes.count-- + + if kubernetes.count == 0 { + assert.Check(t, kubernetes.env.Stop()) + kubernetes.env = nil + } + }) + + cc, err := client.New(kubernetes.env.Config, client.Options{ + Scheme: kubernetes.env.Scheme, + }) + assert.NilError(t, err) + + return kubernetes.env, cc +} + +// Namespace creates a random namespace that is deleted by t.Cleanup. It calls +// t.Fatal when creation fails. The caller may delete the namespace at any time. +func Namespace(t testing.TB, cc client.Client) *corev1.Namespace { + t.Helper() + + // Remove / that shows up when running a sub-test + // TestSomeThing/test_some_specific_thing + name, _, _ := strings.Cut(t.Name(), "/") + + ns := &corev1.Namespace{} + ns.GenerateName = "postgres-operator-test-" + ns.Labels = map[string]string{"postgres-operator-test": name} + + ctx := context.Background() + assert.NilError(t, cc.Create(ctx, ns)) + + t.Cleanup(func() { + assert.Check(t, client.IgnoreNotFound(cc.Delete(ctx, ns))) + }) + + return ns +} diff --git a/internal/testing/require/parallel.go b/internal/testing/require/parallel.go new file mode 100644 index 0000000000..4fbdf42284 --- /dev/null +++ b/internal/testing/require/parallel.go @@ -0,0 +1,26 @@ +// Copyright 2022 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package require + +import ( + "sync" + "testing" +) + +var capacity sync.Mutex + +// ParallelCapacity calls t.Parallel then waits for needed capacity. There is +// no wait when needed is zero. +func ParallelCapacity(t *testing.T, needed int) { + t.Helper() + t.Parallel() + + if needed > 0 { + // Assume capacity of one; allow only one caller at a time. + // TODO: actually track how much capacity is available. + capacity.Lock() + t.Cleanup(capacity.Unlock) + } +} diff --git a/internal/testing/token_invalid b/internal/testing/token_invalid new file mode 100644 index 0000000000..1e4622430a --- /dev/null +++ b/internal/testing/token_invalid @@ -0,0 +1 @@ +eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJDUEsiLCJzdWIiOiJwb2ludC5vZi5jb250YWN0QGNvbXBhbnkuY29tIiwiaXNzIjoiQ3J1bmNoeSBEYXRhIiwiZXhwIjoxNzI3NDUxOTM1LCJuYmYiOjE1MTYyMzkwMjIsImlhdCI6MTUxNjIzOTAyMn0.I2RBGvpHV4GKoWD5TaM89ToEFBhNdSYovyNlYp-PbEmSTTGLc_Wa3cKujahSYtlfwlZ6gSPKVE5U4IPAv7kzO8C74zoX-9_5GpHxGyBBDLL2XLglRmuTO_W5bheuFzrCq9A7HIi-kjKTk_DRvep1dhdooHqFzZQiAxxDa_U-zCkUAByo1cWd-Z2k51VZp1TUzAYSId6rDclIBc7QSi2HrMsdh3IeXZQs4dPhjemf09l6vVIT94sdqj774t6kTawUJhTdGVrZ_ad8ar3YxCpWGZzB3oSo62K7QEGWp9KCqTebP-LAF8glkpwi8H4HWiUcXo4bfANXPXe9Z0Oziau69Q+ diff --git a/internal/testing/token_rsa_key.pub b/internal/testing/token_rsa_key.pub new file mode 100644 index 0000000000..e548f1cef5 --- /dev/null +++ b/internal/testing/token_rsa_key.pub @@ -0,0 +1,9 @@ +-----BEGIN PUBLIC KEY----- +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAu1SU1LfVLPHCozMxH2Mo +4lgOEePzNm0tRgeLezV6ffAt0gunVTLw7onLRnrq0/IzW7yWR7QkrmBL7jTKEn5u ++qKhbwKfBstIs+bMY2Zkp18gnTxKLxoS2tFczGkPLPgizskuemMghRniWaoLcyeh +kd3qqGElvW/VDL5AaWTg0nLVkjRo9z+40RQzuVaE8AkAFmxZzow3x+VJYKdjykkJ +0iT9wCS0DRTXu269V264Vf/3jvredZiKRkgwlL9xNAwxXFg0x/XFw005UWVRIkdg +cKWTjpBP2dPwVZ4WWC+9aGVd+Gyn1o0CLelf4rEjGoXbAAEgAqeGUxrcIlbjXfbc +mwIDAQAB +-----END PUBLIC KEY----- diff --git a/internal/testing/token_valid b/internal/testing/token_valid new file mode 100644 index 0000000000..6982d38829 --- /dev/null +++ b/internal/testing/token_valid @@ -0,0 +1 @@ +eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJDUEsiLCJzdWIiOiJwb2ludC5vZi5jb250YWN0QGNvbXBhbnkuY29tIiwiaXNzIjoiQ3J1bmNoeSBEYXRhIiwiZXhwIjoxNzI3NDUxOTM1LCJuYmYiOjE1MTYyMzkwMjIsImlhdCI6MTUxNjIzOTAyMn0.I2RBGvpHV4GKoWD5TaM89ToEFBhNdSYovyNlYp-PbEmSTTGLc_Wa3cKujahSYtlfwlZ6gSPKVE5U4IPAv7kzO8C74zoX-9_5GpHxGyBBDLL2XLglRmuTO_W5bheuFzrCq9A7HIi-kjKTk_DRvep1dhdooHqFzZQiAxxDa_U-zCkUAByo1cWd-Z2k51VZp1TUzAYSId6rDclIBc7QSi2HrMsdh3IeXZQs4dPhjemf09l6vVIT94sdqj774t6kTawUJhTdGVrZ_ad8ar3YxCpWGZzB3oSo62K7QEGWp9KCqTebP-LAF8glkpwi8H4HWiUcXo4bfANXPXe9Z0Oziau69Q diff --git a/internal/testing/validation/postgrescluster_test.go b/internal/testing/validation/postgrescluster_test.go new file mode 100644 index 0000000000..e71ff22b2e --- /dev/null +++ b/internal/testing/validation/postgrescluster_test.go @@ -0,0 +1,125 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package validation + +import ( + "context" + "fmt" + "testing" + + "gotest.tools/v3/assert" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/yaml" + + "github.com/crunchydata/postgres-operator/internal/testing/cmp" + "github.com/crunchydata/postgres-operator/internal/testing/require" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +func TestPostgresUserOptions(t *testing.T) { + ctx := context.Background() + cc := require.Kubernetes(t) + t.Parallel() + + namespace := require.Namespace(t, cc) + base := v1beta1.NewPostgresCluster() + + // Start with a bunch of required fields. + assert.NilError(t, yaml.Unmarshal([]byte(`{ + postgresVersion: 16, + backups: { + pgbackrest: { + repos: [{ name: repo1 }], + }, + }, + instances: [{ + dataVolumeClaimSpec: { + accessModes: [ReadWriteOnce], + resources: { requests: { storage: 1Mi } }, + }, + }], + }`), &base.Spec)) + + base.Namespace = namespace.Name + base.Name = "postgres-user-options" + + assert.NilError(t, cc.Create(ctx, base.DeepCopy(), client.DryRunAll), + "expected this base cluster to be valid") + + // See [internal/controller/postgrescluster.TestValidatePostgresUsers] + + t.Run("NoComments", func(t *testing.T) { + cluster := base.DeepCopy() + cluster.Spec.Users = []v1beta1.PostgresUserSpec{ + {Name: "dashes", Options: "ANY -- comment"}, + {Name: "block-open", Options: "/* asdf"}, + {Name: "block-close", Options: " qw */ rt"}, + } + + err := cc.Create(ctx, cluster, client.DryRunAll) + assert.Assert(t, apierrors.IsInvalid(err)) + assert.ErrorContains(t, err, "cannot contain comments") + + //nolint:errorlint // This is a test, and a panic is unlikely. + status := err.(apierrors.APIStatus).Status() + assert.Assert(t, status.Details != nil) + assert.Equal(t, len(status.Details.Causes), 3) + + for i, cause := range status.Details.Causes { + assert.Equal(t, cause.Field, fmt.Sprintf("spec.users[%d].options", i)) + assert.Assert(t, cmp.Contains(cause.Message, "cannot contain comments")) + } + }) + + t.Run("NoPassword", func(t *testing.T) { + cluster := base.DeepCopy() + cluster.Spec.Users = []v1beta1.PostgresUserSpec{ + {Name: "uppercase", Options: "SUPERUSER PASSWORD ''"}, + {Name: "lowercase", Options: "password 'asdf'"}, + } + + err := cc.Create(ctx, cluster, client.DryRunAll) + assert.Assert(t, apierrors.IsInvalid(err)) + assert.ErrorContains(t, err, "cannot assign password") + + //nolint:errorlint // This is a test, and a panic is unlikely. + status := err.(apierrors.APIStatus).Status() + assert.Assert(t, status.Details != nil) + assert.Equal(t, len(status.Details.Causes), 2) + + for i, cause := range status.Details.Causes { + assert.Equal(t, cause.Field, fmt.Sprintf("spec.users[%d].options", i)) + assert.Assert(t, cmp.Contains(cause.Message, "cannot assign password")) + } + }) + + t.Run("NoTerminators", func(t *testing.T) { + cluster := base.DeepCopy() + cluster.Spec.Users = []v1beta1.PostgresUserSpec{ + {Name: "semicolon", Options: "some ;where"}, + } + + err := cc.Create(ctx, cluster, client.DryRunAll) + assert.Assert(t, apierrors.IsInvalid(err)) + assert.ErrorContains(t, err, "should match") + + //nolint:errorlint // This is a test, and a panic is unlikely. + status := err.(apierrors.APIStatus).Status() + assert.Assert(t, status.Details != nil) + assert.Equal(t, len(status.Details.Causes), 1) + assert.Equal(t, status.Details.Causes[0].Field, "spec.users[0].options") + }) + + t.Run("Valid", func(t *testing.T) { + cluster := base.DeepCopy() + cluster.Spec.Users = []v1beta1.PostgresUserSpec{ + {Name: "normal", Options: "CREATEDB valid until '2006-01-02'"}, + {Name: "very-full", Options: "NOSUPERUSER NOCREATEDB NOCREATEROLE NOINHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT 5"}, + } + + assert.NilError(t, cc.Create(ctx, cluster, client.DryRunAll)) + }) +} diff --git a/internal/upgradecheck/header.go b/internal/upgradecheck/header.go new file mode 100644 index 0000000000..a1d56ef442 --- /dev/null +++ b/internal/upgradecheck/header.go @@ -0,0 +1,219 @@ +// Copyright 2017 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package upgradecheck + +import ( + "context" + "encoding/json" + "net/http" + "os" + + googleuuid "github.com/google/uuid" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/client-go/discovery" + "k8s.io/client-go/rest" + crclient "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/crunchydata/postgres-operator/internal/controller/postgrescluster" + "github.com/crunchydata/postgres-operator/internal/feature" + "github.com/crunchydata/postgres-operator/internal/logging" + "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +const ( + clientHeader = "X-Crunchy-Client-Metadata" +) + +var ( + // Using apimachinery's UUID package, so our deployment UUID will be a string + deploymentID string +) + +// Extensible struct for client upgrade data +type clientUpgradeData struct { + BridgeClustersTotal int `json:"bridge_clusters_total"` + BuildSource string `json:"build_source"` + DeploymentID string `json:"deployment_id"` + FeatureGatesEnabled string `json:"feature_gates_enabled"` + IsOpenShift bool `json:"is_open_shift"` + KubernetesEnv string `json:"kubernetes_env"` + PGOClustersTotal int `json:"pgo_clusters_total"` + PGOInstaller string `json:"pgo_installer"` + PGOInstallerOrigin string `json:"pgo_installer_origin"` + PGOVersion string `json:"pgo_version"` + RegistrationToken string `json:"registration_token"` +} + +// generateHeader aggregates data and returns a struct of that data +// If any errors are encountered, it logs those errors and uses the default values +func generateHeader(ctx context.Context, cfg *rest.Config, crClient crclient.Client, + pgoVersion string, isOpenShift bool, registrationToken string) *clientUpgradeData { + + return &clientUpgradeData{ + BridgeClustersTotal: getBridgeClusters(ctx, crClient), + BuildSource: os.Getenv("BUILD_SOURCE"), + DeploymentID: ensureDeploymentID(ctx, crClient), + FeatureGatesEnabled: feature.ShowGates(ctx), + IsOpenShift: isOpenShift, + KubernetesEnv: getServerVersion(ctx, cfg), + PGOClustersTotal: getManagedClusters(ctx, crClient), + PGOInstaller: os.Getenv("PGO_INSTALLER"), + PGOInstallerOrigin: os.Getenv("PGO_INSTALLER_ORIGIN"), + PGOVersion: pgoVersion, + RegistrationToken: registrationToken, + } +} + +// ensureDeploymentID checks if the UUID exists in memory or in a ConfigMap +// If no UUID exists, ensureDeploymentID creates one and saves it in memory/as a ConfigMap +// Any errors encountered will be logged and the ID result will be what is in memory +func ensureDeploymentID(ctx context.Context, crClient crclient.Client) string { + // If there is no deploymentID in memory, generate one for possible use + if deploymentID == "" { + deploymentID = string(uuid.NewUUID()) + } + + cm := manageUpgradeCheckConfigMap(ctx, crClient, deploymentID) + + if cm != nil && cm.Data["deployment_id"] != "" { + deploymentID = cm.Data["deployment_id"] + } + + return deploymentID +} + +// manageUpgradeCheckConfigMap ensures a ConfigMap exists with a UUID +// If it doesn't exist, this creates it with the in-memory ID +// If it exists and it has a valid UUID, use that to replace the in-memory ID +// If it exists but the field is blank or mangled, we update the ConfigMap with the in-memory ID +func manageUpgradeCheckConfigMap(ctx context.Context, crClient crclient.Client, + currentID string) *corev1.ConfigMap { + + log := logging.FromContext(ctx) + upgradeCheckConfigMapMetadata := naming.UpgradeCheckConfigMap() + + cm := &corev1.ConfigMap{ + ObjectMeta: upgradeCheckConfigMapMetadata, + Data: map[string]string{"deployment_id": currentID}, + } + cm.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("ConfigMap")) + + // If no namespace is set, then log this and skip trying to set the UUID in the ConfigMap + if upgradeCheckConfigMapMetadata.GetNamespace() == "" { + log.V(1).Info("upgrade check issue: namespace not set") + return cm + } + + retrievedCM := &corev1.ConfigMap{} + err := crClient.Get(ctx, naming.AsObjectKey(upgradeCheckConfigMapMetadata), retrievedCM) + + // If we get any error besides IsNotFound, log it, skip any ConfigMap steps, + // and use the in-memory deploymentID + if err != nil && !apierrors.IsNotFound(err) { + log.V(1).Info("upgrade check issue: error retrieving configmap", + "response", err.Error()) + return cm + } + + // If we get a ConfigMap with a "deployment_id", check if that UUID is valid + if retrievedCM.Data["deployment_id"] != "" { + _, parseErr := googleuuid.Parse(retrievedCM.Data["deployment_id"]) + // No error -- the ConfigMap has a valid deploymentID, so use that + if parseErr == nil { + cm.Data["deployment_id"] = retrievedCM.Data["deployment_id"] + } + } + + err = applyConfigMap(ctx, crClient, cm, postgrescluster.ControllerName) + if err != nil { + log.V(1).Info("upgrade check issue: could not apply configmap", + "response", err.Error()) + } + return cm +} + +// applyConfigMap is a focused version of the Reconciler.apply method, +// meant only to work with this ConfigMap +// It sends an apply patch to the Kubernetes API, with the fieldManager set to the deployment_id +// and the force parameter set to true. +// - https://docs.k8s.io/reference/using-api/server-side-apply/#managers +// - https://docs.k8s.io/reference/using-api/server-side-apply/#conflicts +func applyConfigMap(ctx context.Context, crClient crclient.Client, + object crclient.Object, owner string) error { + // Generate an apply-patch by comparing the object to its zero value. + zero := &corev1.ConfigMap{} + data, err := crclient.MergeFrom(zero).Data(object) + + if err == nil { + apply := crclient.RawPatch(crclient.Apply.Type(), data) + err = crClient.Patch(ctx, object, apply, + []crclient.PatchOption{crclient.ForceOwnership, crclient.FieldOwner(owner)}...) + } + return err +} + +// getManagedClusters returns a count of postgres clusters managed by this PGO instance +// Any errors encountered will be logged and the count result will be 0 +func getManagedClusters(ctx context.Context, crClient crclient.Client) int { + var count int + clusters := &v1beta1.PostgresClusterList{} + err := crClient.List(ctx, clusters) + if err != nil { + log := logging.FromContext(ctx) + log.V(1).Info("upgrade check issue: could not count postgres clusters", + "response", err.Error()) + } else { + count = len(clusters.Items) + } + return count +} + +// getBridgeClusters returns a count of Bridge clusters managed by this PGO instance +// Any errors encountered will be logged and the count result will be 0 +func getBridgeClusters(ctx context.Context, crClient crclient.Client) int { + var count int + clusters := &v1beta1.CrunchyBridgeClusterList{} + err := crClient.List(ctx, clusters) + if err != nil { + log := logging.FromContext(ctx) + log.V(1).Info("upgrade check issue: could not count bridge clusters", + "response", err.Error()) + } else { + count = len(clusters.Items) + } + return count +} + +// getServerVersion returns the stringified server version (i.e., the same info `kubectl version` +// returns for the server) +// Any errors encountered will be logged and will return an empty string +func getServerVersion(ctx context.Context, cfg *rest.Config) string { + log := logging.FromContext(ctx) + discoveryClient, err := discovery.NewDiscoveryClientForConfig(cfg) + if err != nil { + log.V(1).Info("upgrade check issue: could not retrieve discovery client", + "response", err.Error()) + return "" + } + versionInfo, err := discoveryClient.ServerVersion() + if err != nil { + log.V(1).Info("upgrade check issue: could not retrieve server version", + "response", err.Error()) + return "" + } + return versionInfo.String() +} + +func addHeader(req *http.Request, upgradeInfo *clientUpgradeData) (*http.Request, error) { + marshaled, err := json.Marshal(upgradeInfo) + if err == nil { + upgradeInfoString := string(marshaled) + req.Header.Add(clientHeader, upgradeInfoString) + } + return req, err +} diff --git a/internal/upgradecheck/header_test.go b/internal/upgradecheck/header_test.go new file mode 100644 index 0000000000..c144e7629b --- /dev/null +++ b/internal/upgradecheck/header_test.go @@ -0,0 +1,611 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package upgradecheck + +import ( + "context" + "encoding/json" + "net/http" + "strings" + "testing" + + "gotest.tools/v3/assert" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/client-go/discovery" + + // Google Kubernetes Engine / Google Cloud Platform authentication provider + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + "k8s.io/client-go/rest" + + "github.com/crunchydata/postgres-operator/internal/controller/postgrescluster" + "github.com/crunchydata/postgres-operator/internal/feature" + "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/internal/testing/cmp" + "github.com/crunchydata/postgres-operator/internal/testing/require" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +func TestGenerateHeader(t *testing.T) { + setupDeploymentID(t) + ctx := context.Background() + cfg, cc := require.Kubernetes2(t) + setupNamespace(t, cc) + + dc, err := discovery.NewDiscoveryClientForConfig(cfg) + assert.NilError(t, err) + server, err := dc.ServerVersion() + assert.NilError(t, err) + + reconciler := postgrescluster.Reconciler{Client: cc} + + t.Setenv("PGO_INSTALLER", "test") + t.Setenv("PGO_INSTALLER_ORIGIN", "test-origin") + t.Setenv("BUILD_SOURCE", "developer") + + t.Run("error ensuring ID", func(t *testing.T) { + fakeClientWithOptionalError := &fakeClientWithError{ + cc, "patch error", + } + ctx, calls := setupLogCapture(ctx) + + res := generateHeader(ctx, cfg, fakeClientWithOptionalError, + "1.2.3", reconciler.IsOpenShift, "") + assert.Equal(t, len(*calls), 1) + assert.Assert(t, cmp.Contains((*calls)[0], `upgrade check issue: could not apply configmap`)) + assert.Equal(t, res.IsOpenShift, reconciler.IsOpenShift) + assert.Equal(t, deploymentID, res.DeploymentID) + pgoList := v1beta1.PostgresClusterList{} + err := cc.List(ctx, &pgoList) + assert.NilError(t, err) + assert.Equal(t, len(pgoList.Items), res.PGOClustersTotal) + bridgeList := v1beta1.CrunchyBridgeClusterList{} + err = cc.List(ctx, &bridgeList) + assert.NilError(t, err) + assert.Equal(t, len(bridgeList.Items), res.BridgeClustersTotal) + assert.Equal(t, "1.2.3", res.PGOVersion) + assert.Equal(t, server.String(), res.KubernetesEnv) + assert.Equal(t, "test", res.PGOInstaller) + assert.Equal(t, "test-origin", res.PGOInstallerOrigin) + assert.Equal(t, "developer", res.BuildSource) + }) + + t.Run("error getting cluster count", func(t *testing.T) { + fakeClientWithOptionalError := &fakeClientWithError{ + cc, "list error", + } + ctx, calls := setupLogCapture(ctx) + + res := generateHeader(ctx, cfg, fakeClientWithOptionalError, + "1.2.3", reconciler.IsOpenShift, "") + assert.Equal(t, len(*calls), 2) + // Aggregating the logs since we cannot determine which call will be first + callsAggregate := strings.Join(*calls, " ") + assert.Assert(t, cmp.Contains(callsAggregate, `upgrade check issue: could not count postgres clusters`)) + assert.Assert(t, cmp.Contains(callsAggregate, `upgrade check issue: could not count bridge clusters`)) + assert.Equal(t, res.IsOpenShift, reconciler.IsOpenShift) + assert.Equal(t, deploymentID, res.DeploymentID) + assert.Equal(t, 0, res.PGOClustersTotal) + assert.Equal(t, 0, res.BridgeClustersTotal) + assert.Equal(t, "1.2.3", res.PGOVersion) + assert.Equal(t, server.String(), res.KubernetesEnv) + assert.Equal(t, "test", res.PGOInstaller) + assert.Equal(t, "test-origin", res.PGOInstallerOrigin) + assert.Equal(t, "developer", res.BuildSource) + }) + + t.Run("error getting server version info", func(t *testing.T) { + ctx, calls := setupLogCapture(ctx) + badcfg := &rest.Config{} + + res := generateHeader(ctx, badcfg, cc, + "1.2.3", reconciler.IsOpenShift, "") + assert.Equal(t, len(*calls), 1) + assert.Assert(t, cmp.Contains((*calls)[0], `upgrade check issue: could not retrieve server version`)) + assert.Equal(t, res.IsOpenShift, reconciler.IsOpenShift) + assert.Equal(t, deploymentID, res.DeploymentID) + pgoList := v1beta1.PostgresClusterList{} + err := cc.List(ctx, &pgoList) + assert.NilError(t, err) + assert.Equal(t, len(pgoList.Items), res.PGOClustersTotal) + assert.Equal(t, "1.2.3", res.PGOVersion) + assert.Equal(t, "", res.KubernetesEnv) + assert.Equal(t, "test", res.PGOInstaller) + assert.Equal(t, "test-origin", res.PGOInstallerOrigin) + assert.Equal(t, "developer", res.BuildSource) + }) + + t.Run("success", func(t *testing.T) { + ctx, calls := setupLogCapture(ctx) + gate := feature.NewGate() + assert.NilError(t, gate.SetFromMap(map[string]bool{ + feature.TablespaceVolumes: true, + })) + ctx = feature.NewContext(ctx, gate) + + res := generateHeader(ctx, cfg, cc, + "1.2.3", reconciler.IsOpenShift, "") + assert.Equal(t, len(*calls), 0) + assert.Equal(t, res.IsOpenShift, reconciler.IsOpenShift) + assert.Equal(t, deploymentID, res.DeploymentID) + pgoList := v1beta1.PostgresClusterList{} + err := cc.List(ctx, &pgoList) + assert.NilError(t, err) + assert.Equal(t, len(pgoList.Items), res.PGOClustersTotal) + assert.Equal(t, "1.2.3", res.PGOVersion) + assert.Equal(t, server.String(), res.KubernetesEnv) + assert.Equal(t, "TablespaceVolumes=true", res.FeatureGatesEnabled) + assert.Equal(t, "test", res.PGOInstaller) + assert.Equal(t, "test-origin", res.PGOInstallerOrigin) + assert.Equal(t, "developer", res.BuildSource) + }) +} + +func TestEnsureID(t *testing.T) { + ctx := context.Background() + cc := require.Kubernetes(t) + setupNamespace(t, cc) + + t.Run("success, no id set in mem or configmap", func(t *testing.T) { + deploymentID = "" + oldID := deploymentID + ctx, calls := setupLogCapture(ctx) + + newID := ensureDeploymentID(ctx, cc) + assert.Equal(t, len(*calls), 0) + assert.Assert(t, newID != oldID) + assert.Assert(t, newID == deploymentID) + + cm := &corev1.ConfigMap{} + err := cc.Get(ctx, naming.AsObjectKey(naming.UpgradeCheckConfigMap()), cm) + assert.NilError(t, err) + assert.Equal(t, newID, cm.Data["deployment_id"]) + err = cc.Delete(ctx, cm) + assert.NilError(t, err) + }) + + t.Run("success, id set in mem, configmap created", func(t *testing.T) { + oldID := setupDeploymentID(t) + + cm := &corev1.ConfigMap{} + err := cc.Get(ctx, naming.AsObjectKey( + naming.UpgradeCheckConfigMap()), cm) + assert.Error(t, err, `configmaps "pgo-upgrade-check" not found`) + ctx, calls := setupLogCapture(ctx) + + newID := ensureDeploymentID(ctx, cc) + assert.Equal(t, len(*calls), 0) + assert.Assert(t, newID == oldID) + assert.Assert(t, newID == deploymentID) + + err = cc.Get(ctx, naming.AsObjectKey( + naming.UpgradeCheckConfigMap()), cm) + assert.NilError(t, err) + assert.Assert(t, deploymentID == cm.Data["deployment_id"]) + + err = cc.Delete(ctx, cm) + assert.NilError(t, err) + }) + + t.Run("success, id set in configmap, mem overwritten", func(t *testing.T) { + cm := &corev1.ConfigMap{ + ObjectMeta: naming.UpgradeCheckConfigMap(), + Data: map[string]string{ + "deployment_id": string(uuid.NewUUID()), + }, + } + err := cc.Create(ctx, cm) + assert.NilError(t, err) + + cmRetrieved := &corev1.ConfigMap{} + err = cc.Get(ctx, naming.AsObjectKey( + naming.UpgradeCheckConfigMap()), cmRetrieved) + assert.NilError(t, err) + + oldID := setupDeploymentID(t) + ctx, calls := setupLogCapture(ctx) + newID := ensureDeploymentID(ctx, cc) + assert.Equal(t, len(*calls), 0) + assert.Assert(t, newID != oldID) + assert.Assert(t, newID == deploymentID) + assert.Assert(t, deploymentID == cmRetrieved.Data["deployment_id"]) + + err = cc.Delete(ctx, cm) + assert.NilError(t, err) + }) + + t.Run("configmap failed, no namespace given", func(t *testing.T) { + cm := &corev1.ConfigMap{ + ObjectMeta: naming.UpgradeCheckConfigMap(), + Data: map[string]string{ + "deployment_id": string(uuid.NewUUID()), + }, + } + err := cc.Create(ctx, cm) + assert.NilError(t, err) + + cmRetrieved := &corev1.ConfigMap{} + err = cc.Get(ctx, naming.AsObjectKey( + naming.UpgradeCheckConfigMap()), cmRetrieved) + assert.NilError(t, err) + + oldID := setupDeploymentID(t) + ctx, calls := setupLogCapture(ctx) + t.Setenv("PGO_NAMESPACE", "") + + newID := ensureDeploymentID(ctx, cc) + assert.Equal(t, len(*calls), 1) + assert.Assert(t, cmp.Contains((*calls)[0], `upgrade check issue: namespace not set`)) + assert.Assert(t, newID == oldID) + assert.Assert(t, newID == deploymentID) + assert.Assert(t, deploymentID != cmRetrieved.Data["deployment_id"]) + err = cc.Delete(ctx, cm) + assert.NilError(t, err) + }) + + t.Run("configmap failed with not NotFound error, using preexisting ID", func(t *testing.T) { + fakeClientWithOptionalError := &fakeClientWithError{ + cc, "get error", + } + oldID := setupDeploymentID(t) + ctx, calls := setupLogCapture(ctx) + + newID := ensureDeploymentID(ctx, fakeClientWithOptionalError) + assert.Equal(t, len(*calls), 1) + assert.Assert(t, cmp.Contains((*calls)[0], `upgrade check issue: error retrieving configmap`)) + assert.Assert(t, newID == oldID) + assert.Assert(t, newID == deploymentID) + + cmRetrieved := &corev1.ConfigMap{} + err := cc.Get(ctx, naming.AsObjectKey( + naming.UpgradeCheckConfigMap()), cmRetrieved) + assert.Error(t, err, `configmaps "pgo-upgrade-check" not found`) + }) + + t.Run("configmap failed to create, using preexisting ID", func(t *testing.T) { + fakeClientWithOptionalError := &fakeClientWithError{ + cc, "patch error", + } + oldID := setupDeploymentID(t) + + ctx, calls := setupLogCapture(ctx) + newID := ensureDeploymentID(ctx, fakeClientWithOptionalError) + assert.Equal(t, len(*calls), 1) + assert.Assert(t, cmp.Contains((*calls)[0], `upgrade check issue: could not apply configmap`)) + assert.Assert(t, newID == oldID) + assert.Assert(t, newID == deploymentID) + }) +} + +func TestManageUpgradeCheckConfigMap(t *testing.T) { + ctx := context.Background() + cc := require.Kubernetes(t) + setupNamespace(t, cc) + + t.Run("no namespace given", func(t *testing.T) { + ctx, calls := setupLogCapture(ctx) + t.Setenv("PGO_NAMESPACE", "") + + returnedCM := manageUpgradeCheckConfigMap(ctx, cc, "current-id") + assert.Equal(t, len(*calls), 1) + assert.Assert(t, cmp.Contains((*calls)[0], `upgrade check issue: namespace not set`)) + assert.Assert(t, returnedCM.Data["deployment_id"] == "current-id") + }) + + t.Run("configmap not found, created", func(t *testing.T) { + cmRetrieved := &corev1.ConfigMap{} + err := cc.Get(ctx, naming.AsObjectKey( + naming.UpgradeCheckConfigMap()), cmRetrieved) + assert.Error(t, err, `configmaps "pgo-upgrade-check" not found`) + + ctx, calls := setupLogCapture(ctx) + returnedCM := manageUpgradeCheckConfigMap(ctx, cc, "current-id") + + assert.Equal(t, len(*calls), 0) + assert.Assert(t, returnedCM.Data["deployment_id"] == "current-id") + err = cc.Delete(ctx, returnedCM) + assert.NilError(t, err) + }) + + t.Run("configmap failed with not NotFound error", func(t *testing.T) { + fakeClientWithOptionalError := &fakeClientWithError{ + cc, "get error", + } + ctx, calls := setupLogCapture(ctx) + + returnedCM := manageUpgradeCheckConfigMap(ctx, fakeClientWithOptionalError, + "current-id") + assert.Equal(t, len(*calls), 1) + assert.Assert(t, cmp.Contains((*calls)[0], `upgrade check issue: error retrieving configmap`)) + assert.Assert(t, returnedCM.Data["deployment_id"] == "current-id") + }) + + t.Run("no deployment id in configmap", func(t *testing.T) { + cm := &corev1.ConfigMap{ + ObjectMeta: naming.UpgradeCheckConfigMap(), + Data: map[string]string{ + "wrong_field": string(uuid.NewUUID()), + }, + } + err := cc.Create(ctx, cm) + assert.NilError(t, err) + + cmRetrieved := &corev1.ConfigMap{} + err = cc.Get(ctx, naming.AsObjectKey( + naming.UpgradeCheckConfigMap()), cmRetrieved) + assert.NilError(t, err) + + ctx, calls := setupLogCapture(ctx) + returnedCM := manageUpgradeCheckConfigMap(ctx, cc, "current-id") + assert.Equal(t, len(*calls), 0) + assert.Assert(t, returnedCM.Data["deployment_id"] == "current-id") + err = cc.Delete(ctx, cm) + assert.NilError(t, err) + }) + + t.Run("mangled deployment id", func(t *testing.T) { + cm := &corev1.ConfigMap{ + ObjectMeta: naming.UpgradeCheckConfigMap(), + Data: map[string]string{ + "deploymentid": string(uuid.NewUUID())[1:], + }, + } + err := cc.Create(ctx, cm) + assert.NilError(t, err) + + cmRetrieved := &corev1.ConfigMap{} + err = cc.Get(ctx, naming.AsObjectKey( + naming.UpgradeCheckConfigMap()), cmRetrieved) + assert.NilError(t, err) + + ctx, calls := setupLogCapture(ctx) + returnedCM := manageUpgradeCheckConfigMap(ctx, cc, "current-id") + assert.Equal(t, len(*calls), 0) + assert.Assert(t, returnedCM.Data["deployment_id"] == "current-id") + err = cc.Delete(ctx, cm) + assert.NilError(t, err) + }) + + t.Run("good configmap with good id", func(t *testing.T) { + cm := &corev1.ConfigMap{ + ObjectMeta: naming.UpgradeCheckConfigMap(), + Data: map[string]string{ + "deployment_id": string(uuid.NewUUID()), + }, + } + err := cc.Create(ctx, cm) + assert.NilError(t, err) + + cmRetrieved := &corev1.ConfigMap{} + err = cc.Get(ctx, naming.AsObjectKey( + naming.UpgradeCheckConfigMap()), cmRetrieved) + assert.NilError(t, err) + + ctx, calls := setupLogCapture(ctx) + returnedCM := manageUpgradeCheckConfigMap(ctx, cc, "current-id") + assert.Equal(t, len(*calls), 0) + assert.Assert(t, returnedCM.Data["deployment-id"] != "current-id") + err = cc.Delete(ctx, cm) + assert.NilError(t, err) + }) + + t.Run("configmap failed to create", func(t *testing.T) { + fakeClientWithOptionalError := &fakeClientWithError{ + cc, "patch error", + } + + ctx, calls := setupLogCapture(ctx) + returnedCM := manageUpgradeCheckConfigMap(ctx, fakeClientWithOptionalError, + "current-id") + assert.Equal(t, len(*calls), 1) + assert.Assert(t, cmp.Contains((*calls)[0], `upgrade check issue: could not apply configmap`)) + assert.Assert(t, returnedCM.Data["deployment_id"] == "current-id") + }) +} + +func TestApplyConfigMap(t *testing.T) { + ctx := context.Background() + cc := require.Kubernetes(t) + setupNamespace(t, cc) + + t.Run("successful create", func(t *testing.T) { + cmRetrieved := &corev1.ConfigMap{} + err := cc.Get(ctx, naming.AsObjectKey(naming.UpgradeCheckConfigMap()), cmRetrieved) + assert.Error(t, err, `configmaps "pgo-upgrade-check" not found`) + + cm := &corev1.ConfigMap{ + ObjectMeta: naming.UpgradeCheckConfigMap(), + Data: map[string]string{ + "new_field": "new_value", + }, + } + cm.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("ConfigMap")) + err = applyConfigMap(ctx, cc, cm, "test") + assert.NilError(t, err) + cmRetrieved = &corev1.ConfigMap{} + err = cc.Get(ctx, naming.AsObjectKey(naming.UpgradeCheckConfigMap()), cmRetrieved) + assert.NilError(t, err) + assert.Equal(t, cm.Data["new_value"], cmRetrieved.Data["new_value"]) + err = cc.Delete(ctx, cm) + assert.NilError(t, err) + }) + + t.Run("successful update", func(t *testing.T) { + cm := &corev1.ConfigMap{ + ObjectMeta: naming.UpgradeCheckConfigMap(), + Data: map[string]string{ + "new_field": "old_value", + }, + } + cm.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("ConfigMap")) + err := cc.Create(ctx, cm) + assert.NilError(t, err) + cmRetrieved := &corev1.ConfigMap{} + err = cc.Get(ctx, naming.AsObjectKey(naming.UpgradeCheckConfigMap()), cmRetrieved) + assert.NilError(t, err) + + cm2 := &corev1.ConfigMap{ + ObjectMeta: naming.UpgradeCheckConfigMap(), + Data: map[string]string{ + "new_field": "new_value", + }, + } + cm2.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("ConfigMap")) + err = applyConfigMap(ctx, cc, cm2, "test") + assert.NilError(t, err) + cmRetrieved = &corev1.ConfigMap{} + err = cc.Get(ctx, naming.AsObjectKey(naming.UpgradeCheckConfigMap()), cmRetrieved) + assert.NilError(t, err) + assert.Equal(t, cm.Data["new_value"], cmRetrieved.Data["new_value"]) + err = cc.Delete(ctx, cm) + assert.NilError(t, err) + }) + + t.Run("successful nothing changed", func(t *testing.T) { + cm := &corev1.ConfigMap{ + ObjectMeta: naming.UpgradeCheckConfigMap(), + Data: map[string]string{ + "new_field": "new_value", + }, + } + cm.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("ConfigMap")) + err := cc.Create(ctx, cm) + assert.NilError(t, err) + cmRetrieved := &corev1.ConfigMap{} + err = cc.Get(ctx, naming.AsObjectKey(naming.UpgradeCheckConfigMap()), cmRetrieved) + assert.NilError(t, err) + + cm2 := &corev1.ConfigMap{ + ObjectMeta: naming.UpgradeCheckConfigMap(), + Data: map[string]string{ + "new_field": "new_value", + }, + } + cm2.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("ConfigMap")) + err = applyConfigMap(ctx, cc, cm2, "test") + assert.NilError(t, err) + cmRetrieved = &corev1.ConfigMap{} + err = cc.Get(ctx, naming.AsObjectKey(naming.UpgradeCheckConfigMap()), cmRetrieved) + assert.NilError(t, err) + assert.Equal(t, cm.Data["new_value"], cmRetrieved.Data["new_value"]) + err = cc.Delete(ctx, cm) + assert.NilError(t, err) + }) + + t.Run("failure", func(t *testing.T) { + cmRetrieved := &corev1.ConfigMap{} + err := cc.Get(ctx, naming.AsObjectKey(naming.UpgradeCheckConfigMap()), cmRetrieved) + assert.Error(t, err, `configmaps "pgo-upgrade-check" not found`) + + cm := &corev1.ConfigMap{ + ObjectMeta: naming.UpgradeCheckConfigMap(), + Data: map[string]string{ + "new_field": "new_value", + }, + } + cm.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("ConfigMap")) + fakeClientWithOptionalError := &fakeClientWithError{ + cc, "patch error", + } + + err = applyConfigMap(ctx, fakeClientWithOptionalError, cm, "test") + assert.Error(t, err, "patch error") + }) +} + +func TestGetManagedClusters(t *testing.T) { + ctx := context.Background() + + t.Run("success", func(t *testing.T) { + fakeClient := setupFakeClientWithPGOScheme(t, true) + ctx, calls := setupLogCapture(ctx) + count := getManagedClusters(ctx, fakeClient) + assert.Equal(t, len(*calls), 0) + assert.Assert(t, count == 2) + }) + + t.Run("list throw error", func(t *testing.T) { + fakeClientWithOptionalError := &fakeClientWithError{ + setupFakeClientWithPGOScheme(t, true), "list error", + } + ctx, calls := setupLogCapture(ctx) + count := getManagedClusters(ctx, fakeClientWithOptionalError) + assert.Assert(t, len(*calls) > 0) + assert.Assert(t, cmp.Contains((*calls)[0], `upgrade check issue: could not count postgres clusters`)) + assert.Assert(t, count == 0) + }) +} + +func TestGetBridgeClusters(t *testing.T) { + ctx := context.Background() + + t.Run("success", func(t *testing.T) { + fakeClient := setupFakeClientWithPGOScheme(t, true) + ctx, calls := setupLogCapture(ctx) + count := getBridgeClusters(ctx, fakeClient) + assert.Equal(t, len(*calls), 0) + assert.Assert(t, count == 2) + }) + + t.Run("list throw error", func(t *testing.T) { + fakeClientWithOptionalError := &fakeClientWithError{ + setupFakeClientWithPGOScheme(t, true), "list error", + } + ctx, calls := setupLogCapture(ctx) + count := getBridgeClusters(ctx, fakeClientWithOptionalError) + assert.Assert(t, len(*calls) > 0) + assert.Assert(t, cmp.Contains((*calls)[0], `upgrade check issue: could not count bridge clusters`)) + assert.Assert(t, count == 0) + }) +} + +func TestGetServerVersion(t *testing.T) { + t.Run("success", func(t *testing.T) { + expect, server := setupVersionServer(t, true) + ctx, calls := setupLogCapture(context.Background()) + + got := getServerVersion(ctx, &rest.Config{ + Host: server.URL, + }) + assert.Equal(t, len(*calls), 0) + assert.Equal(t, got, expect.String()) + }) + + t.Run("failure", func(t *testing.T) { + _, server := setupVersionServer(t, false) + ctx, calls := setupLogCapture(context.Background()) + + got := getServerVersion(ctx, &rest.Config{ + Host: server.URL, + }) + assert.Equal(t, len(*calls), 1) + assert.Assert(t, cmp.Contains((*calls)[0], `upgrade check issue: could not retrieve server version`)) + assert.Equal(t, got, "") + }) +} + +func TestAddHeader(t *testing.T) { + t.Run("successful", func(t *testing.T) { + req := &http.Request{ + Header: http.Header{}, + } + versionString := "1.2.3" + upgradeInfo := &clientUpgradeData{ + PGOVersion: versionString, + } + + result, err := addHeader(req, upgradeInfo) + assert.NilError(t, err) + header := result.Header[clientHeader] + + passedThroughData := &clientUpgradeData{} + err = json.Unmarshal([]byte(header[0]), passedThroughData) + assert.NilError(t, err) + + assert.Equal(t, passedThroughData.PGOVersion, "1.2.3") + // Failure to list clusters results in 0 returned + assert.Equal(t, passedThroughData.PGOClustersTotal, 0) + }) +} diff --git a/internal/upgradecheck/helpers_test.go b/internal/upgradecheck/helpers_test.go new file mode 100644 index 0000000000..63184184db --- /dev/null +++ b/internal/upgradecheck/helpers_test.go @@ -0,0 +1,179 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package upgradecheck + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "testing" + + "github.com/go-logr/logr/funcr" + "gotest.tools/v3/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/apimachinery/pkg/version" + crclient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + "github.com/crunchydata/postgres-operator/internal/controller/runtime" + "github.com/crunchydata/postgres-operator/internal/logging" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +// fakeClientWithError is a controller runtime client and an error type to force +type fakeClientWithError struct { + crclient.Client + errorType string +} + +// Get returns the client.get OR an Error (`get error`) if the fakeClientWithError is set to error that way +func (f *fakeClientWithError) Get(ctx context.Context, key types.NamespacedName, obj crclient.Object, opts ...crclient.GetOption) error { + switch f.errorType { + case "get error": + return fmt.Errorf("get error") + default: + return f.Client.Get(ctx, key, obj, opts...) + } +} + +// Patch returns the client.get OR an Error (`patch error`) if the fakeClientWithError is set to error that way +// TODO: PatchType is not supported currently by fake +// - https://github.com/kubernetes/client-go/issues/970 +// Once that gets fixed, we can test without envtest +func (f *fakeClientWithError) Patch(ctx context.Context, obj crclient.Object, + patch crclient.Patch, opts ...crclient.PatchOption) error { + switch { + case f.errorType == "patch error": + return fmt.Errorf("patch error") + default: + return f.Client.Patch(ctx, obj, patch, opts...) + } +} + +// List returns the client.get OR an Error (`list error`) if the fakeClientWithError is set to error that way +func (f *fakeClientWithError) List(ctx context.Context, objList crclient.ObjectList, + opts ...crclient.ListOption) error { + switch f.errorType { + case "list error": + return fmt.Errorf("list error") + default: + return f.Client.List(ctx, objList, opts...) + } +} + +// setupDeploymentID returns a UUID +func setupDeploymentID(t *testing.T) string { + t.Helper() + deploymentID = string(uuid.NewUUID()) + return deploymentID +} + +// setupFakeClientWithPGOScheme returns a fake client with the PGO scheme added; +// if `includeCluster` is true, also adds some empty PostgresCluster and CrunchyBridgeCluster +// items to the client +func setupFakeClientWithPGOScheme(t *testing.T, includeCluster bool) crclient.Client { + t.Helper() + if includeCluster { + pc := &v1beta1.PostgresClusterList{ + Items: []v1beta1.PostgresCluster{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "hippo", + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "elephant", + }, + }, + }, + } + + bcl := &v1beta1.CrunchyBridgeClusterList{ + Items: []v1beta1.CrunchyBridgeCluster{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "hippo", + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "elephant", + }, + }, + }, + } + + return fake.NewClientBuilder(). + WithScheme(runtime.Scheme). + WithLists(pc, bcl). + Build() + } + return fake.NewClientBuilder().WithScheme(runtime.Scheme).Build() +} + +// setupVersionServer sets up and tears down a server and version info for testing +func setupVersionServer(t *testing.T, works bool) (version.Info, *httptest.Server) { + t.Helper() + expect := version.Info{ + Major: "1", + Minor: "22", + GitCommit: "v1.22.2", + } + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, + req *http.Request) { + if works { + output, _ := json.Marshal(expect) + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + // We don't need to check the error output from this + _, _ = w.Write(output) + } else { + w.WriteHeader(http.StatusBadRequest) + } + })) + t.Cleanup(server.Close) + + return expect, server +} + +// setupLogCapture captures the logs and keeps count of the logs captured +func setupLogCapture(ctx context.Context) (context.Context, *[]string) { + calls := []string{} + testlog := funcr.NewJSON(func(object string) { + calls = append(calls, object) + }, funcr.Options{ + Verbosity: 1, + }) + return logging.NewContext(ctx, testlog), &calls +} + +// setupNamespace creates a namespace that will be deleted by t.Cleanup. +// For upgradechecking, this namespace is set to `postgres-operator`, +// which sometimes is created by other parts of the testing apparatus, +// cf., the createnamespace call in `make check-envtest-existing`. +// When creation fails, it calls t.Fatal. The caller may delete the namespace +// at any time. +func setupNamespace(t testing.TB, cc crclient.Client) { + t.Helper() + ns := &corev1.Namespace{} + ns.Name = "postgres-operator" + ns.Labels = map[string]string{"postgres-operator-test": t.Name()} + + ctx := context.Background() + exists := &corev1.Namespace{} + assert.NilError(t, crclient.IgnoreNotFound( + cc.Get(ctx, crclient.ObjectKeyFromObject(ns), exists))) + if exists.Name != "" { + return + } + assert.NilError(t, cc.Create(ctx, ns)) + t.Cleanup(func() { assert.Check(t, crclient.IgnoreNotFound(cc.Delete(ctx, ns))) }) +} diff --git a/internal/upgradecheck/http.go b/internal/upgradecheck/http.go new file mode 100644 index 0000000000..71a3c465c0 --- /dev/null +++ b/internal/upgradecheck/http.go @@ -0,0 +1,201 @@ +// Copyright 2017 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package upgradecheck + +import ( + "context" + "fmt" + "io" + "net/http" + "time" + + "github.com/golang-jwt/jwt/v5" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/rest" + crclient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/manager" + + "github.com/crunchydata/postgres-operator/internal/logging" +) + +var ( + client HTTPClient + + // With these Backoff settings, wait.ExponentialBackoff will + // * use one second as the base time; + // * increase delays between calls by a power of 2 (1, 2, 4, etc.); + // * and retry four times. + // Note that there is no indeterminacy here since there is no Jitter set). + // With these parameters, the calls will occur at 0, 1, 3, and 7 seconds + // (i.e., at 1, 2, and 4 second delays for the retries). + backoff = wait.Backoff{ + Duration: 1 * time.Second, + Factor: float64(2), + Steps: 4, + } +) + +const ( + // upgradeCheckURL can be set using the CHECK_FOR_UPGRADES_URL env var + upgradeCheckURL = "https://operator-maestro.crunchydata.com/pgo-versions" +) + +type HTTPClient interface { + Do(req *http.Request) (*http.Response, error) +} + +// Creating an interface for cache with WaitForCacheSync to allow easier mocking +type CacheWithWait interface { + WaitForCacheSync(ctx context.Context) bool +} + +func init() { + // Since we create this client once during startup, + // we want each connection to be fresh, hence the non-default transport + // with DisableKeepAlives set to true + // See https://github.com/golang/go/issues/43905 and https://github.com/golang/go/issues/23427 + // for discussion of problems with long-lived connections + client = &http.Client{ + Timeout: 5 * time.Second, + Transport: &http.Transport{ + DisableKeepAlives: true, + }, + } +} + +func checkForUpgrades(ctx context.Context, url, versionString string, backoff wait.Backoff, + crclient crclient.Client, cfg *rest.Config, + isOpenShift bool, registrationToken string) (message string, header string, err error) { + var headerPayloadStruct *clientUpgradeData + + // Prep request + req, err := http.NewRequest("GET", url, nil) + if err == nil { + // generateHeader always returns some sort of struct, using defaults/nil values + // in case some of the checks return errors + headerPayloadStruct = generateHeader(ctx, cfg, crclient, + versionString, isOpenShift, registrationToken) + req, err = addHeader(req, headerPayloadStruct) + } + + // wait.ExponentialBackoff will retry the func according to the backoff object until + // (a) func returns done as true or + // (b) the backoff settings are exhausted, + // i.e., the process hits the cap for time or the number of steps + // The anonymous function here sets certain preexisting variables (bodyBytes, err, status) + // which are then used by the surrounding `checkForUpgrades` function as part of the return + var bodyBytes []byte + var status int + + if err == nil { + _ = wait.ExponentialBackoff( + backoff, + func() (done bool, backoffErr error) { + var res *http.Response + res, err = client.Do(req) + + if err == nil { + defer res.Body.Close() + status = res.StatusCode + + // This is a very basic check, ignoring nuances around + // certain StatusCodes that should either prevent or impact retries + if status == http.StatusOK { + bodyBytes, err = io.ReadAll(res.Body) + return true, nil + } + } + + // Return false, nil to continue checking + return false, nil + }) + } + + // We received responses, but none of them were 200 OK. + if err == nil && status != http.StatusOK { + err = fmt.Errorf("received StatusCode %d", status) + } + + // TODO: Parse response and log info for user on potential upgrades + return string(bodyBytes), req.Header.Get(clientHeader), err +} + +type CheckForUpgradesScheduler struct { + Client crclient.Client + Config *rest.Config + + OpenShift bool + Refresh time.Duration + RegistrationToken string + URL, Version string +} + +// ManagedScheduler creates a [CheckForUpgradesScheduler] and adds it to m. +// NOTE(registration): This takes a token/nil parameter when the operator is started. +// Currently the operator restarts when the token is updated, +// so this token is always current; but if that restart behavior is changed, +// we will want the upgrade mechanism to instantiate its own registration runner +// or otherwise get the most recent token. +func ManagedScheduler(m manager.Manager, openshift bool, + url, version string, registrationToken *jwt.Token) error { + if url == "" { + url = upgradeCheckURL + } + + var token string + if registrationToken != nil { + token = registrationToken.Raw + } + + return m.Add(&CheckForUpgradesScheduler{ + Client: m.GetClient(), + Config: m.GetConfig(), + OpenShift: openshift, + Refresh: 24 * time.Hour, + RegistrationToken: token, + URL: url, + Version: version, + }) +} + +// NeedLeaderElection returns true so that s runs only on the single +// [manager.Manager] that is elected leader in the Kubernetes cluster. +func (s *CheckForUpgradesScheduler) NeedLeaderElection() bool { return true } + +// Start checks for upgrades periodically. It blocks until ctx is cancelled. +func (s *CheckForUpgradesScheduler) Start(ctx context.Context) error { + s.check(ctx) + + ticker := time.NewTicker(s.Refresh) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + s.check(ctx) + case <-ctx.Done(): + return ctx.Err() + } + } +} + +func (s *CheckForUpgradesScheduler) check(ctx context.Context) { + log := logging.FromContext(ctx) + + defer func() { + if v := recover(); v != nil { + log.V(1).Info("encountered panic in upgrade check", "response", v) + } + }() + + info, header, err := checkForUpgrades(ctx, + s.URL, s.Version, backoff, s.Client, s.Config, s.OpenShift, s.RegistrationToken) + + if err != nil { + log.V(1).Info("could not complete upgrade check", "response", err.Error()) + } else { + log.Info(info, clientHeader, header) + } +} diff --git a/internal/upgradecheck/http_test.go b/internal/upgradecheck/http_test.go new file mode 100644 index 0000000000..9535f942ea --- /dev/null +++ b/internal/upgradecheck/http_test.go @@ -0,0 +1,236 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package upgradecheck + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "strings" + "testing" + "time" + + "github.com/go-logr/logr/funcr" + "gotest.tools/v3/assert" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/manager" + + "github.com/crunchydata/postgres-operator/internal/feature" + "github.com/crunchydata/postgres-operator/internal/logging" + "github.com/crunchydata/postgres-operator/internal/testing/cmp" +) + +func init() { + client = &MockClient{Timeout: 1} + // set backoff to two steps, 1 second apart for testing + backoff = wait.Backoff{ + Duration: 1 * time.Second, + Factor: float64(1), + Steps: 2, + } +} + +type MockClient struct { + Timeout time.Duration +} + +var funcFoo func() (*http.Response, error) + +// Do is the mock request that will return a mock success +func (m *MockClient) Do(req *http.Request) (*http.Response, error) { + return funcFoo() +} + +func TestCheckForUpgrades(t *testing.T) { + fakeClient := setupFakeClientWithPGOScheme(t, true) + cfg := &rest.Config{} + + ctx := logging.NewContext(context.Background(), logging.Discard()) + gate := feature.NewGate() + assert.NilError(t, gate.SetFromMap(map[string]bool{ + feature.TablespaceVolumes: true, + })) + ctx = feature.NewContext(ctx, gate) + + // Pass *testing.T to allows the correct messages from the assert package + // in the event of certain failures. + checkData := func(t *testing.T, header string) { + data := clientUpgradeData{} + err := json.Unmarshal([]byte(header), &data) + assert.NilError(t, err) + assert.Assert(t, data.DeploymentID != "") + assert.Equal(t, data.PGOVersion, "4.7.3") + assert.Equal(t, data.RegistrationToken, "speakFriend") + assert.Equal(t, data.BridgeClustersTotal, 2) + assert.Equal(t, data.PGOClustersTotal, 2) + assert.Equal(t, data.FeatureGatesEnabled, "TablespaceVolumes=true") + } + + t.Run("success", func(t *testing.T) { + // A successful call + funcFoo = func() (*http.Response, error) { + json := `{"pgo_versions":[{"tag":"v5.0.4"},{"tag":"v5.0.3"},{"tag":"v5.0.2"},{"tag":"v5.0.1"},{"tag":"v5.0.0"}]}` + return &http.Response{ + Body: io.NopCloser(strings.NewReader(json)), + StatusCode: http.StatusOK, + }, nil + } + + res, header, err := checkForUpgrades(ctx, "", "4.7.3", backoff, + fakeClient, cfg, false, "speakFriend") + assert.NilError(t, err) + assert.Equal(t, res, `{"pgo_versions":[{"tag":"v5.0.4"},{"tag":"v5.0.3"},{"tag":"v5.0.2"},{"tag":"v5.0.1"},{"tag":"v5.0.0"}]}`) + checkData(t, header) + }) + + t.Run("total failure, err sending", func(t *testing.T) { + var counter int + // A call returning errors + funcFoo = func() (*http.Response, error) { + counter++ + return &http.Response{}, errors.New("whoops") + } + + res, header, err := checkForUpgrades(ctx, "", "4.7.3", backoff, + fakeClient, cfg, false, "speakFriend") + // Two failed calls because of env var + assert.Equal(t, counter, 2) + assert.Equal(t, res, "") + assert.Equal(t, err.Error(), `whoops`) + checkData(t, header) + }) + + t.Run("total failure, bad StatusCode", func(t *testing.T) { + var counter int + // A call returning bad StatusCode + funcFoo = func() (*http.Response, error) { + counter++ + return &http.Response{ + Body: io.NopCloser(strings.NewReader("")), + StatusCode: http.StatusBadRequest, + }, nil + } + + res, header, err := checkForUpgrades(ctx, "", "4.7.3", backoff, + fakeClient, cfg, false, "speakFriend") + assert.Equal(t, res, "") + // Two failed calls because of env var + assert.Equal(t, counter, 2) + assert.Equal(t, err.Error(), `received StatusCode 400`) + checkData(t, header) + }) + + t.Run("one failure, then success", func(t *testing.T) { + var counter int + // A call returning bad StatusCode the first time + // and a successful response the second time + funcFoo = func() (*http.Response, error) { + if counter < 1 { + counter++ + return &http.Response{ + Body: io.NopCloser(strings.NewReader("")), + StatusCode: http.StatusBadRequest, + }, nil + } + counter++ + json := `{"pgo_versions":[{"tag":"v5.0.4"},{"tag":"v5.0.3"},{"tag":"v5.0.2"},{"tag":"v5.0.1"},{"tag":"v5.0.0"}]}` + return &http.Response{ + Body: io.NopCloser(strings.NewReader(json)), + StatusCode: http.StatusOK, + }, nil + } + + res, header, err := checkForUpgrades(ctx, "", "4.7.3", backoff, + fakeClient, cfg, false, "speakFriend") + assert.Equal(t, counter, 2) + assert.NilError(t, err) + assert.Equal(t, res, `{"pgo_versions":[{"tag":"v5.0.4"},{"tag":"v5.0.3"},{"tag":"v5.0.2"},{"tag":"v5.0.1"},{"tag":"v5.0.0"}]}`) + checkData(t, header) + }) +} + +// TODO(benjaminjb): Replace `fake` with envtest +func TestCheckForUpgradesScheduler(t *testing.T) { + fakeClient := setupFakeClientWithPGOScheme(t, false) + _, server := setupVersionServer(t, true) + defer server.Close() + cfg := &rest.Config{Host: server.URL} + + t.Run("panic from checkForUpgrades doesn't bubble up", func(t *testing.T) { + ctx := context.Background() + + // capture logs + var calls []string + ctx = logging.NewContext(ctx, funcr.NewJSON(func(object string) { + calls = append(calls, object) + }, funcr.Options{ + Verbosity: 1, + })) + + // A panicking call + funcFoo = func() (*http.Response, error) { + panic(fmt.Errorf("oh no!")) + } + + s := CheckForUpgradesScheduler{ + Client: fakeClient, + Config: cfg, + } + s.check(ctx) + + assert.Equal(t, len(calls), 2) + assert.Assert(t, cmp.Contains(calls[1], `encountered panic in upgrade check`)) + }) + + t.Run("successful log each loop, ticker works", func(t *testing.T) { + ctx := context.Background() + + // capture logs + var calls []string + ctx = logging.NewContext(ctx, funcr.NewJSON(func(object string) { + calls = append(calls, object) + }, funcr.Options{ + Verbosity: 1, + })) + + // A successful call + funcFoo = func() (*http.Response, error) { + json := `{"pgo_versions":[{"tag":"v5.0.4"},{"tag":"v5.0.3"},{"tag":"v5.0.2"},{"tag":"v5.0.1"},{"tag":"v5.0.0"}]}` + return &http.Response{ + Body: io.NopCloser(strings.NewReader(json)), + StatusCode: http.StatusOK, + }, nil + } + + // Set loop time to 1s and sleep for 2s before sending the done signal + ctx, cancel := context.WithTimeout(ctx, 2*time.Second) + defer cancel() + s := CheckForUpgradesScheduler{ + Client: fakeClient, + Config: cfg, + Refresh: 1 * time.Second, + } + assert.ErrorIs(t, context.DeadlineExceeded, s.Start(ctx)) + + // Sleeping leads to some non-deterministic results, but we expect at least 2 executions + // plus one log for the failure to apply the configmap + assert.Assert(t, len(calls) >= 4) + + assert.Assert(t, cmp.Contains(calls[1], `{\"pgo_versions\":[{\"tag\":\"v5.0.4\"},{\"tag\":\"v5.0.3\"},{\"tag\":\"v5.0.2\"},{\"tag\":\"v5.0.1\"},{\"tag\":\"v5.0.0\"}]}`)) + assert.Assert(t, cmp.Contains(calls[3], `{\"pgo_versions\":[{\"tag\":\"v5.0.4\"},{\"tag\":\"v5.0.3\"},{\"tag\":\"v5.0.2\"},{\"tag\":\"v5.0.1\"},{\"tag\":\"v5.0.0\"}]}`)) + }) +} + +func TestCheckForUpgradesSchedulerLeaderOnly(t *testing.T) { + // CheckForUpgradesScheduler should implement this interface. + var s manager.LeaderElectionRunnable = new(CheckForUpgradesScheduler) + + assert.Assert(t, s.NeedLeaderElection(), + "expected to only run on the leader") +} diff --git a/internal/util/secrets.go b/internal/util/secrets.go index af45e3de6f..82768c9386 100644 --- a/internal/util/secrets.go +++ b/internal/util/secrets.go @@ -1,92 +1,79 @@ -package util - -/* - Copyright 2017 - 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 +// Copyright 2017 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +package util import ( "crypto/rand" + "io" "math/big" - "strconv" - "strings" ) -// The following constants are used as a part of password generation. For more -// information on these selections, please consulting the ASCII man page -// (`man ascii`) +// The following constant is used as a part of password generation. const ( // DefaultGeneratedPasswordLength is the default length of what a generated // password should be if it's not set elsewhere DefaultGeneratedPasswordLength = 24 - - // passwordCharLower is the lowest ASCII character to use for generating a - // password, which is 40 - passwordCharLower = 40 - // passwordCharUpper is the highest ASCII character to use for generating a - // password, which is 126 - passwordCharUpper = 126 - // passwordCharExclude is a map of characters that we choose to exclude from - // the password to simplify usage in the shell. There is still enough entropy - // that exclusion of these characters is OK. - passwordCharExclude = "`\\" ) -// passwordCharSelector is a "big int" that we need to select the random ASCII -// character for the password. Since the random integer generator looks for -// values from [0,X), we need to force this to be [40,126] -var passwordCharSelector = big.NewInt(passwordCharUpper - passwordCharLower) +// accumulate gathers n bytes from f and returns them as a string. It returns +// an empty string when f returns an error. +func accumulate(n int, f func() (byte, error)) (string, error) { + result := make([]byte, n) -// GeneratePassword generates a password of a given length out of the acceptable -// ASCII characters suitable for a password -func GeneratePassword(length int) (string, error) { - password := make([]byte, length) - i := 0 - - for i < length { - val, err := rand.Int(rand.Reader, passwordCharSelector) - // if there is an error generating the random integer, return - if err != nil { + for i := range result { + if b, err := f(); err == nil { + result[i] = b + } else { return "", err } + } - char := byte(passwordCharLower + val.Int64()) + return string(result), nil +} - // if the character is in the exclusion list, continue - if idx := strings.IndexAny(string(char), passwordCharExclude); idx > -1 { - continue - } +// randomCharacter builds a function that returns random bytes from class. +func randomCharacter(random io.Reader, class string) func() (byte, error) { + if random == nil { + panic("requires a random source") + } + if len(class) == 0 { + panic("class cannot be empty") + } - password[i] = char - i++ + size := big.NewInt(int64(len(class))) + + return func() (byte, error) { + if i, err := rand.Int(random, size); err == nil { + return class[int(i.Int64())], nil + } else { + return 0, err + } } +} + +var randomAlphaNumeric = randomCharacter(rand.Reader, ``+ + `ABCDEFGHIJKLMNOPQRSTUVWXYZ`+ + `abcdefghijklmnopqrstuvwxyz`+ + `0123456789`) - return string(password), nil +// GenerateAlphaNumericPassword returns a random alphanumeric string. +func GenerateAlphaNumericPassword(length int) (string, error) { + return accumulate(length, randomAlphaNumeric) } -// GeneratedPasswordLength returns the value for what the length of a -// randomly generated password should be. It first determines if the user -// provided this value via a configuration file, and if not and/or the value is -// invalid, uses the default value -func GeneratedPasswordLength(configuredPasswordLength string) int { - // set the generated password length for random password generation - // note that "configuredPasswordLength" may be an empty string, and as such - // the below line could fail. That's ok though! as we have a default set up - generatedPasswordLength, err := strconv.Atoi(configuredPasswordLength) - // if there is an error...set it to a default - if err != nil { - generatedPasswordLength = DefaultGeneratedPasswordLength - } +// policyASCII is the list of acceptable characters from which to generate an +// ASCII password. +const policyASCII = `` + + `()*+,-./` + `:;<=>?@` + `[]^_` + `{|}` + + `ABCDEFGHIJKLMNOPQRSTUVWXYZ` + + `abcdefghijklmnopqrstuvwxyz` + + `0123456789` + +var randomASCII = randomCharacter(rand.Reader, policyASCII) - return generatedPasswordLength +// GenerateASCIIPassword returns a random string of printable ASCII characters. +func GenerateASCIIPassword(length int) (string, error) { + return accumulate(length, randomASCII) } diff --git a/internal/util/secrets_test.go b/internal/util/secrets_test.go index 029ce0808a..5d549ca89e 100644 --- a/internal/util/secrets_test.go +++ b/internal/util/secrets_test.go @@ -1,64 +1,140 @@ -package util - -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +package util import ( + "errors" "strings" "testing" + "testing/iotest" "unicode" + + "gotest.tools/v3/assert" + "gotest.tools/v3/assert/cmp" + "k8s.io/apimachinery/pkg/util/sets" ) -func TestGeneratePassword(t *testing.T) { - // different lengths - for _, length := range []int{1, 2, 3, 5, 20, 200} { - password, err := GeneratePassword(length) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - if expected, actual := length, len(password); expected != actual { - t.Fatalf("expected length %v, got %v", expected, actual) - } - if i := strings.IndexFunc(password, func(r rune) bool { return !unicode.IsPrint(r) }); i > -1 { - t.Fatalf("expected only printable characters, got %q in %q", password[i], password) - } - if i := strings.IndexAny(password, passwordCharExclude); i > -1 { - t.Fatalf("expected no exclude characters, got %q in %q", password[i], password) - } - } +func TestAccumulate(t *testing.T) { + called := 0 + result, err := accumulate(10, func() (byte, error) { + called++ + return byte('A' + called), nil + }) + + assert.NilError(t, err) + assert.Equal(t, called, 10) + assert.Equal(t, result, "BCDEFGHIJK") - // random contents - previous := []string{} + t.Run("Error", func(t *testing.T) { + called := 0 + expected := errors.New("zap") + result, err := accumulate(10, func() (byte, error) { + called++ + if called < 5 { + return byte('A' + called), nil + } else { + return 'Z', expected + } + }) + + assert.Equal(t, err, expected) + assert.Equal(t, called, 5, "expected an early return") + assert.Equal(t, result, "") + }) +} + +func TestGenerateAlphaNumericPassword(t *testing.T) { + for _, length := range []int{0, 1, 2, 3, 5, 20, 200} { + password, err := GenerateAlphaNumericPassword(length) + + assert.NilError(t, err) + assert.Equal(t, length, len(password)) + assert.Assert(t, cmp.Regexp(`^[A-Za-z0-9]*$`, password)) + } + previous := sets.Set[string]{} for i := 0; i < 10; i++ { - password, err := GeneratePassword(5) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - if i := strings.IndexFunc(password, func(r rune) bool { return !unicode.IsPrint(r) }); i > -1 { - t.Fatalf("expected only printable characters, got %q in %q", password[i], password) - } - if i := strings.IndexAny(password, passwordCharExclude); i > -1 { - t.Fatalf("expected no exclude characters, got %q in %q", password[i], password) + password, err := GenerateAlphaNumericPassword(5) + + assert.NilError(t, err) + assert.Assert(t, cmp.Regexp(`^[A-Za-z0-9]{5}$`, password)) + + assert.Assert(t, !previous.Has(password), "%q generated twice", password) + previous.Insert(password) + } +} + +func TestGenerateASCIIPassword(t *testing.T) { + for _, length := range []int{0, 1, 2, 3, 5, 20, 200} { + password, err := GenerateASCIIPassword(length) + + assert.NilError(t, err) + assert.Equal(t, length, len(password)) + + // Check every rune in the string. See [TestPolicyASCII]. + for _, c := range password { + assert.Assert(t, strings.ContainsRune(policyASCII, c), "%q is not acceptable", c) } + } - for i := range previous { - if password == previous[i] { - t.Fatalf("expected passwords to not repeat, got %q after %q", password, previous) - } + previous := sets.Set[string]{} + for i := 0; i < 10; i++ { + password, err := GenerateASCIIPassword(5) + + assert.NilError(t, err) + assert.Equal(t, 5, len(password)) + + // Check every rune in the string. See [TestPolicyASCII]. + for _, c := range password { + assert.Assert(t, strings.ContainsRune(policyASCII, c), "%q is not acceptable", c) } - previous = append(previous, password) + + assert.Assert(t, !previous.Has(password), "%q generated twice", password) + previous.Insert(password) + } +} + +func TestPolicyASCII(t *testing.T) { + // [GenerateASCIIPassword] used to pick random characters by doing + // arithmetic on ASCII codepoints. It now uses a constant set of characters + // that satisfy the following properties. For more information on these + // selections, consult the ASCII man page, `man ascii`. + + // lower and upper are the lowest and highest ASCII characters to use. + const lower = 40 + const upper = 126 + + // exclude is a map of characters that we choose to exclude from + // the password to simplify usage in the shell. + const exclude = "`\\" + + count := map[rune]int{} + + // Check every rune in the string. + for _, c := range policyASCII { + assert.Assert(t, unicode.IsPrint(c), "%q is not printable", c) + assert.Assert(t, c <= unicode.MaxASCII, "%q is not ASCII", c) + assert.Assert(t, lower <= c && c < upper, "%q is outside the range", c) + assert.Assert(t, !strings.ContainsRune(exclude, c), "%q should be excluded", c) + + count[c]++ + assert.Assert(t, count[c] == 1, "%q occurs more than once", c) } + + // Every acceptable byte is in the policy. + assert.Equal(t, len(policyASCII), upper-lower-len(exclude)) +} + +func TestRandomCharacter(t *testing.T) { + // The random source cannot be nil and the character class cannot be empty. + assert.Assert(t, cmp.Panics(func() { randomCharacter(nil, "") })) + assert.Assert(t, cmp.Panics(func() { randomCharacter(nil, "asdf") })) + assert.Assert(t, cmp.Panics(func() { randomCharacter(iotest.ErrReader(nil), "") })) + + // The function returns any error from the random source. + expected := errors.New("doot") + _, err := randomCharacter(iotest.ErrReader(expected), "asdf")() + assert.Equal(t, err, expected) } diff --git a/internal/util/util.go b/internal/util/util.go index 2fae20a171..72634ebbc6 100644 --- a/internal/util/util.go +++ b/internal/util/util.go @@ -1,19 +1,8 @@ -package util - -/* - Copyright 2017 - 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 +// Copyright 2017 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +package util import ( "strings" @@ -23,7 +12,7 @@ import ( // be used as part of an SQL statement. // // Any double quotes in name will be escaped. The quoted identifier will be -// case sensitive when used in a query. If the input string contains a zero +// case-sensitive when used in a query. If the input string contains a zero // byte, the result will be truncated immediately before it. // // Implementation borrowed from lib/pq: https://github.com/lib/pq which is diff --git a/licenses/LICENSE.txt b/licenses/LICENSE.txt index 2aa6942246..e799dc3209 100644 --- a/licenses/LICENSE.txt +++ b/licenses/LICENSE.txt @@ -176,7 +176,7 @@ END OF TERMS AND CONDITIONS - Copyright 2017 - 2018 Crunchy Data Solutions, Inc. + Copyright 2017 - 2024 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/crunchy_bridgecluster_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/crunchy_bridgecluster_types.go new file mode 100644 index 0000000000..0b94a4dae1 --- /dev/null +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/crunchy_bridgecluster_types.go @@ -0,0 +1,239 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// CrunchyBridgeClusterSpec defines the desired state of CrunchyBridgeCluster +// to be managed by Crunchy Data Bridge +type CrunchyBridgeClusterSpec struct { + // +optional + Metadata *Metadata `json:"metadata,omitempty"` + + // Whether the cluster is high availability, + // meaning that it has a secondary it can fail over to quickly + // in case the primary becomes unavailable. + // +kubebuilder:validation:Required + IsHA bool `json:"isHa"` + + // Whether the cluster is protected. Protected clusters can't be destroyed until + // their protected flag is removed + // +kubebuilder:validation:Optional + IsProtected bool `json:"isProtected,omitempty"` + + // The name of the cluster + // --- + // According to Bridge API/GUI errors, + // "Field name should be between 5 and 50 characters in length, containing only unicode characters, unicode numbers, hyphens, spaces, or underscores, and starting with a character", and ending with a character or number. + // +kubebuilder:validation:MinLength=5 + // +kubebuilder:validation:MaxLength=50 + // +kubebuilder:validation:Pattern=`^[A-Za-z][A-Za-z0-9\-_ ]*[A-Za-z0-9]$` + // +kubebuilder:validation:Required + // +kubebuilder:validation:Type=string + ClusterName string `json:"clusterName"` + + // The ID of the cluster's plan. Determines instance, CPU, and memory. + // +kubebuilder:validation:Required + Plan string `json:"plan"` + + // The ID of the cluster's major Postgres version. + // Currently Bridge offers 13-17 + // +kubebuilder:validation:Required + // +kubebuilder:validation:Minimum=13 + // +kubebuilder:validation:Maximum=17 + // +operator-sdk:csv:customresourcedefinitions:type=spec,order=1 + PostgresVersion int `json:"majorVersion"` + + // The cloud provider where the cluster is located. + // Currently Bridge offers aws, azure, and gcp only + // +kubebuilder:validation:Required + // +kubebuilder:validation:Enum={aws,azure,gcp} + // +kubebuilder:validation:XValidation:rule=`self == oldSelf`,message="immutable" + Provider string `json:"provider"` + + // The provider region where the cluster is located. + // +kubebuilder:validation:Required + // +kubebuilder:validation:XValidation:rule=`self == oldSelf`,message="immutable" + Region string `json:"region"` + + // Roles for which to create Secrets that contain their credentials which + // are retrieved from the Bridge API. An empty list creates no role secrets. + // Removing a role from this list does NOT drop the role nor revoke their + // access, but it will delete that role's secret from the kube cluster. + // +kubebuilder:validation:Optional + // +listType=map + // +listMapKey=name + Roles []*CrunchyBridgeClusterRoleSpec `json:"roles,omitempty"` + + // The name of the secret containing the API key and team id + // +kubebuilder:validation:Required + Secret string `json:"secret"` + + // The amount of storage available to the cluster in gigabytes. + // The amount must be an integer, followed by Gi (gibibytes) or G (gigabytes) to match Kubernetes conventions. + // If the amount is given in Gi, we round to the nearest G value. + // The minimum value allowed by Bridge is 10 GB. + // The maximum value allowed by Bridge is 65535 GB. + // +kubebuilder:validation:Required + Storage resource.Quantity `json:"storage"` +} + +type CrunchyBridgeClusterRoleSpec struct { + // Name of the role within Crunchy Bridge. + // More info: https://docs.crunchybridge.com/concepts/users + // +kubebuilder:validation:Required + Name string `json:"name"` + + // The name of the Secret that will hold the role credentials. + // +kubebuilder:validation:Required + // +kubebuilder:validation:Pattern=`^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$` + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:Type=string + SecretName string `json:"secretName"` +} + +// CrunchyBridgeClusterStatus defines the observed state of CrunchyBridgeCluster +type CrunchyBridgeClusterStatus struct { + // The name of the cluster in Bridge. + // +optional + ClusterName string `json:"name,omitempty"` + + // conditions represent the observations of postgres cluster's current state. + // +optional + // +listType=map + // +listMapKey=type + // +operator-sdk:csv:customresourcedefinitions:type=status,xDescriptors={"urn:alm:descriptor:io.kubernetes.conditions"} + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // The Hostname of the postgres cluster in Bridge, provided by Bridge API and null until then. + // +optional + Host string `json:"host,omitempty"` + + // The ID of the postgres cluster in Bridge, provided by Bridge API and null until then. + // +optional + ID string `json:"id,omitempty"` + + // Whether the cluster is high availability, meaning that it has a secondary it can fail + // over to quickly in case the primary becomes unavailable. + // +optional + IsHA *bool `json:"isHa"` + + // Whether the cluster is protected. Protected clusters can't be destroyed until + // their protected flag is removed + // +optional + IsProtected *bool `json:"isProtected"` + + // The cluster's major Postgres version. + // +optional + MajorVersion int `json:"majorVersion"` + + // observedGeneration represents the .metadata.generation on which the status was based. + // +optional + // +kubebuilder:validation:Minimum=0 + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // The cluster upgrade as represented by Bridge + // +optional + OngoingUpgrade []*UpgradeOperation `json:"ongoingUpgrade,omitempty"` + + // The ID of the cluster's plan. Determines instance, CPU, and memory. + // +optional + Plan string `json:"plan"` + + // Most recent, raw responses from Bridge API + // +optional + // +kubebuilder:pruning:PreserveUnknownFields + // +kubebuilder:validation:Schemaless + // +kubebuilder:validation:Type=object + Responses APIResponses `json:"responses"` + + // State of cluster in Bridge. + // +optional + State string `json:"state,omitempty"` + + // The amount of storage available to the cluster. + // +optional + Storage *resource.Quantity `json:"storage"` +} + +type APIResponses struct { + Cluster SchemalessObject `json:"cluster,omitempty"` + Status SchemalessObject `json:"status,omitempty"` + Upgrade SchemalessObject `json:"upgrade,omitempty"` +} + +type ClusterUpgrade struct { + Operations []*UpgradeOperation `json:"operations,omitempty"` +} + +type UpgradeOperation struct { + Flavor string `json:"flavor"` + StartingFrom string `json:"starting_from"` + State string `json:"state"` +} + +// TODO(crunchybridgecluster) Think through conditions +// CrunchyBridgeClusterStatus condition types. +const ( + ConditionUnknown = "" + ConditionUpgrading = "Upgrading" + ConditionReady = "Ready" + ConditionDeleting = "Deleting" +) + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +operator-sdk:csv:customresourcedefinitions:resources={{ConfigMap,v1},{Secret,v1},{Service,v1},{CronJob,v1beta1},{Deployment,v1},{Job,v1},{StatefulSet,v1},{PersistentVolumeClaim,v1}} + +// CrunchyBridgeCluster is the Schema for the crunchybridgeclusters API +type CrunchyBridgeCluster struct { + // ObjectMeta.Name is a DNS subdomain. + // - https://docs.k8s.io/concepts/overview/working-with-objects/names/#dns-subdomain-names + // - https://releases.k8s.io/v1.21.0/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/validator.go#L60 + + // In Bridge json, meta.name is "name" + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // NOTE(cbandy): Every CrunchyBridgeCluster needs a Spec, but it is optional here + // so ObjectMeta can be managed independently. + + Spec CrunchyBridgeClusterSpec `json:"spec,omitempty"` + Status CrunchyBridgeClusterStatus `json:"status,omitempty"` +} + +// Default implements "sigs.k8s.io/controller-runtime/pkg/webhook.Defaulter" so +// a webhook can be registered for the type. +// - https://book.kubebuilder.io/reference/webhook-overview.html +func (c *CrunchyBridgeCluster) Default() { + if len(c.APIVersion) == 0 { + c.APIVersion = GroupVersion.String() + } + if len(c.Kind) == 0 { + c.Kind = "CrunchyBridgeCluster" + } +} + +// +kubebuilder:object:root=true + +// CrunchyBridgeClusterList contains a list of CrunchyBridgeCluster +type CrunchyBridgeClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []CrunchyBridgeCluster `json:"items"` +} + +func init() { + SchemeBuilder.Register(&CrunchyBridgeCluster{}, &CrunchyBridgeClusterList{}) +} + +func NewCrunchyBridgeCluster() *CrunchyBridgeCluster { + cluster := &CrunchyBridgeCluster{} + cluster.SetGroupVersionKind(GroupVersion.WithKind("CrunchyBridgeCluster")) + return cluster +} diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/groupversion_info.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/groupversion_info.go index 91aea1afeb..15773a1815 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/groupversion_info.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/groupversion_info.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 // Package v1beta1 contains API Schema definitions for the postgres-operator v1beta1 API group // +kubebuilder:object:generate=true diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/patroni_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/patroni_types.go index f1dd16de77..2f01399372 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/patroni_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/patroni_types.go @@ -1,65 +1,82 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package v1beta1 -import ( - "k8s.io/apimachinery/pkg/runtime" -) - type PatroniSpec struct { - // TODO(cbandy): Find a better way to have a map[string]interface{} here. - // See: https://github.com/kubernetes-sigs/controller-tools/commit/557da250b8 - // TODO(cbandy): Describe this field. - + // Patroni dynamic configuration settings. Changes to this value will be + // automatically reloaded without validation. Changes to certain PostgreSQL + // parameters cause PostgreSQL to restart. + // More info: https://patroni.readthedocs.io/en/latest/dynamic_configuration.html // +optional - // +kubebuilder:validation:XPreserveUnknownFields - DynamicConfiguration runtime.RawExtension `json:"dynamicConfiguration,omitempty"` - - // TODO(cbandy): Describe the downtime involved with changing. + // +kubebuilder:pruning:PreserveUnknownFields + // +kubebuilder:validation:Schemaless + // +kubebuilder:validation:Type=object + DynamicConfiguration SchemalessObject `json:"dynamicConfiguration,omitempty"` // TTL of the cluster leader lock. "Think of it as the // length of time before initiation of the automatic failover process." + // Changing this value causes PostgreSQL to restart. // +optional // +kubebuilder:default=30 // +kubebuilder:validation:Minimum=3 LeaderLeaseDurationSeconds *int32 `json:"leaderLeaseDurationSeconds,omitempty"` - // TODO(cbandy): Describe the downtime involved with changing. - // The port on which Patroni should listen. + // Changing this value causes PostgreSQL to restart. // +optional // +kubebuilder:default=8008 // +kubebuilder:validation:Minimum=1024 Port *int32 `json:"port,omitempty"` - // TODO(cbandy): Describe the downtime involved with changing. - // The interval for refreshing the leader lock and applying // dynamicConfiguration. Must be less than leaderLeaseDurationSeconds. + // Changing this value causes PostgreSQL to restart. // +optional // +kubebuilder:default=10 // +kubebuilder:validation:Minimum=1 SyncPeriodSeconds *int32 `json:"syncPeriodSeconds,omitempty"` + // Switchover gives options to perform ad hoc switchovers in a PostgresCluster. + // +optional + Switchover *PatroniSwitchover `json:"switchover,omitempty"` + // TODO(cbandy): Add UseConfigMaps bool, default false. // TODO(cbandy): Allow other DCS: etcd, raft, etc? // N.B. changing this will cause downtime. // - https://patroni.readthedocs.io/en/latest/kubernetes.html } +type PatroniSwitchover struct { + + // Whether or not the operator should allow switchovers in a PostgresCluster + // +required + Enabled bool `json:"enabled"` + + // The instance that should become primary during a switchover. This field is + // optional when Type is "Switchover" and required when Type is "Failover". + // When it is not specified, a healthy replica is automatically selected. + // +optional + TargetInstance *string `json:"targetInstance,omitempty"` + + // Type of switchover to perform. Valid options are Switchover and Failover. + // "Switchover" changes the primary instance of a healthy PostgresCluster. + // "Failover" forces a particular instance to be primary, regardless of other + // factors. A TargetInstance must be specified to failover. + // NOTE: The Failover type is reserved as the "last resort" case. + // +kubebuilder:validation:Enum={Switchover,Failover} + // +kubebuilder:default:=Switchover + // +optional + Type string `json:"type,omitempty"` +} + +// PatroniSwitchover types. +const ( + PatroniSwitchoverTypeFailover = "Failover" + PatroniSwitchoverTypeSwitchover = "Switchover" +) + // Default sets the default values for certain Patroni configuration attributes, // including: // - Lock Lease Duration @@ -89,4 +106,12 @@ type PatroniStatus struct { // The PostgreSQL system identifier reported by Patroni. // +optional SystemIdentifier string `json:"systemIdentifier,omitempty"` + + // Tracks the execution of the switchover requests. + // +optional + Switchover *string `json:"switchover,omitempty"` + + // Tracks the current timeline during switchovers + // +optional + SwitchoverTimeline *int64 `json:"switchoverTimeline,omitempty"` } diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgadmin_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgadmin_types.go new file mode 100644 index 0000000000..06c7321bc4 --- /dev/null +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgadmin_types.go @@ -0,0 +1,109 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" +) + +// PGAdminConfiguration represents pgAdmin configuration files. +type PGAdminConfiguration struct { + // Files allows the user to mount projected volumes into the pgAdmin + // container so that files can be referenced by pgAdmin as needed. + Files []corev1.VolumeProjection `json:"files,omitempty"` + + // A Secret containing the value for the LDAP_BIND_PASSWORD setting. + // More info: https://www.pgadmin.org/docs/pgadmin4/latest/ldap.html + // +optional + LDAPBindPassword *corev1.SecretKeySelector `json:"ldapBindPassword,omitempty"` + + // Settings for the pgAdmin server process. Keys should be uppercase and + // values must be constants. + // More info: https://www.pgadmin.org/docs/pgadmin4/latest/config_py.html + // +optional + // +kubebuilder:pruning:PreserveUnknownFields + // +kubebuilder:validation:Schemaless + // +kubebuilder:validation:Type=object + Settings SchemalessObject `json:"settings,omitempty"` +} + +// PGAdminPodSpec defines the desired state of a pgAdmin deployment. +type PGAdminPodSpec struct { + // +optional + Metadata *Metadata `json:"metadata,omitempty"` + + // Scheduling constraints of a pgAdmin pod. Changing this value causes + // pgAdmin to restart. + // More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node + // +optional + Affinity *corev1.Affinity `json:"affinity,omitempty"` + + // Configuration settings for the pgAdmin process. Changes to any of these + // values will be loaded without validation. Be careful, as + // you may put pgAdmin into an unusable state. + // +optional + Config PGAdminConfiguration `json:"config,omitempty"` + + // Defines a PersistentVolumeClaim for pgAdmin data. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes + // +kubebuilder:validation:Required + DataVolumeClaimSpec corev1.PersistentVolumeClaimSpec `json:"dataVolumeClaimSpec"` + + // Name of a container image that can run pgAdmin 4. Changing this value causes + // pgAdmin to restart. The image may also be set using the RELATED_IMAGE_PGADMIN + // environment variable. + // More info: https://kubernetes.io/docs/concepts/containers/images + // +optional + Image string `json:"image,omitempty"` + + // Priority class name for the pgAdmin pod. Changing this value causes pgAdmin + // to restart. + // More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/ + // +optional + PriorityClassName *string `json:"priorityClassName,omitempty"` + + // Number of desired pgAdmin pods. + // +optional + // +kubebuilder:default=1 + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=1 + Replicas *int32 `json:"replicas,omitempty"` + + // Compute resources of a pgAdmin container. Changing this value causes + // pgAdmin to restart. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers + // +optional + Resources corev1.ResourceRequirements `json:"resources,omitempty"` + + // Specification of the service that exposes pgAdmin. + // +optional + Service *ServiceSpec `json:"service,omitempty"` + + // Tolerations of a pgAdmin pod. Changing this value causes pgAdmin to restart. + // More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration + // +optional + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` + + // Topology spread constraints of a pgAdmin pod. Changing this value causes + // pgAdmin to restart. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + // +optional + TopologySpreadConstraints []corev1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"` +} + +// Default sets the port and replica count for pgAdmin if not set +func (s *PGAdminPodSpec) Default() { + if s.Replicas == nil { + s.Replicas = new(int32) + *s.Replicas = 1 + } +} + +// PGAdminPodStatus represents the observed state of a pgAdmin deployment. +type PGAdminPodStatus struct { + + // Hash that indicates which users have been installed into pgAdmin. + UsersRevision string `json:"usersRevision,omitempty"` +} diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgbackrest_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgbackrest_types.go index 300f66203b..3e3098a602 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgbackrest_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgbackrest_types.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package v1beta1 @@ -60,15 +49,15 @@ type PGBackRestJobStatus struct { type PGBackRestScheduledBackupStatus struct { // The name of the associated pgBackRest scheduled backup CronJob - // +kubebuilder:validation:Required + // +kubebuilder:validation:Optional CronJobName string `json:"cronJobName,omitempty"` // The name of the associated pgBackRest repository - // +kubebuilder:validation:Required + // +kubebuilder:validation:Optional RepoName string `json:"repo,omitempty"` // The pgBackRest backup type for this Job - // +kubebuilder:validation:Required + // +kubebuilder:validation:Optional Type string `json:"type,omitempty"` // Represents the time the manual backup Job was acknowledged by the Job controller. @@ -120,11 +109,15 @@ type PGBackRestArchive struct { // +optional Image string `json:"image,omitempty"` + // Jobs field allows configuration for all backup jobs + // +optional + Jobs *BackupJobs `json:"jobs,omitempty"` + // Defines a pgBackRest repository - // +kubebuilder:validation:Required + // +kubebuilder:validation:MinItems=1 // +listType=map // +listMapKey=name - Repos []PGBackRestRepo `json:"repos,omitempty"` + Repos []PGBackRestRepo `json:"repos"` // Defines configuration for a pgBackRest dedicated repository host. This section is only // applicable if at least one "volume" (i.e. PVC-based) repository is defined in the "repos" @@ -139,6 +132,49 @@ type PGBackRestArchive struct { // Defines details for performing an in-place restore using pgBackRest // +optional Restore *PGBackRestRestore `json:"restore,omitempty"` + + // Configuration for pgBackRest sidecar containers + // +optional + Sidecars *PGBackRestSidecars `json:"sidecars,omitempty"` +} + +// PGBackRestSidecars defines the configuration for pgBackRest sidecar containers +type PGBackRestSidecars struct { + // Defines the configuration for the pgBackRest sidecar container + // +optional + PGBackRest *Sidecar `json:"pgbackrest,omitempty"` + + // Defines the configuration for the pgBackRest config sidecar container + // +optional + PGBackRestConfig *Sidecar `json:"pgbackrestConfig,omitempty"` +} + +type BackupJobs struct { + // Resource limits for backup jobs. Includes manual, scheduled and replica + // create backups + // +optional + Resources corev1.ResourceRequirements `json:"resources,omitempty"` + + // Priority class name for the pgBackRest backup Job pods. + // More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/ + // +optional + PriorityClassName *string `json:"priorityClassName,omitempty"` + + // Scheduling constraints of pgBackRest backup Job pods. + // More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node + // +optional + Affinity *corev1.Affinity `json:"affinity,omitempty"` + + // Tolerations of pgBackRest backup Job pods. + // More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration + // +optional + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` + + // Limit the lifetime of a Job that has finished. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/job + // +optional + // +kubebuilder:validation:Minimum=60 + TTLSecondsAfterFinished *int32 `json:"ttlSecondsAfterFinished,omitempty"` } // PGBackRestManualBackup contains information that is used for creating a @@ -164,6 +200,12 @@ type PGBackRestRepoHost struct { // +optional Affinity *corev1.Affinity `json:"affinity,omitempty"` + // Priority class name for the pgBackRest repo host pod. Changing this value + // causes PostgreSQL to restart. + // More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/ + // +optional + PriorityClassName *string `json:"priorityClassName,omitempty"` + // Resource requirements for a pgBackRest repository host // +optional Resources corev1.ResourceRequirements `json:"resources,omitempty"` @@ -173,11 +215,19 @@ type PGBackRestRepoHost struct { // +optional Tolerations []corev1.Toleration `json:"tolerations,omitempty"` - // ConfigMap containing custom SSH configuration + // Topology spread constraints of a Dedicated repo host pod. Changing this + // value causes the repo host to restart. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + // +optional + TopologySpreadConstraints []corev1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"` + + // ConfigMap containing custom SSH configuration. + // Deprecated: Repository hosts use mTLS for encryption, authentication, and authorization. // +optional SSHConfiguration *corev1.ConfigMapProjection `json:"sshConfigMap,omitempty"` - // Secret containing custom SSH keys + // Secret containing custom SSH keys. + // Deprecated: Repository hosts use mTLS for encryption, authentication, and authorization. // +optional SSHSecret *corev1.SecretProjection `json:"sshSecret,omitempty"` } @@ -250,7 +300,7 @@ type PGBackRestRepo struct { // will be applicable once implemented: // https://github.com/kubernetes/enhancements/tree/master/keps/sig-api-machinery/1027-api-unions - // The name of the the repository + // The name of the repository // +kubebuilder:validation:Required // +kubebuilder:validation:Pattern=^repo[1-4] Name string `json:"name"` @@ -292,7 +342,20 @@ type RepoHostStatus struct { type RepoPVC struct { // Defines a PersistentVolumeClaim spec used to create and/or bind a volume + // --- // +kubebuilder:validation:Required + // + // NOTE(validation): Every PVC must have at least one accessMode. NOTE(KEP-4153) + // TODO(k8s-1.28): fieldPath=`.accessModes`,reason="FieldValueRequired" + // - https://releases.k8s.io/v1.25.0/pkg/apis/core/validation/validation.go#L2098-L2100 + // - https://releases.k8s.io/v1.31.0/pkg/apis/core/validation/validation.go#L2292-L2294 + // +kubebuilder:validation:XValidation:rule=`has(self.accessModes) && size(self.accessModes) > 0`,message=`missing accessModes` + // + // NOTE(validation): Every PVC must have a positive storage request. NOTE(KEP-4153) + // TODO(k8s-1.28): fieldPath=`.resources.requests.storage`,reason="FieldValueRequired" + // - https://releases.k8s.io/v1.25.0/pkg/apis/core/validation/validation.go#L2126-L2133 + // - https://releases.k8s.io/v1.31.0/pkg/apis/core/validation/validation.go#L2318-L2325 + // +kubebuilder:validation:XValidation:rule=`has(self.resources) && has(self.resources.requests) && has(self.resources.requests.storage)`,message=`missing storage request` VolumeClaimSpec corev1.PersistentVolumeClaimSpec `json:"volumeClaimSpec"` } @@ -353,8 +416,59 @@ type RepoStatus struct { ReplicaCreateBackupComplete bool `json:"replicaCreateBackupComplete,omitempty"` // A hash of the required fields in the spec for defining an Azure, GCS or S3 repository, - // Utilizd to detect changes to these fields and then execute pgBackRest stanza-create + // Utilized to detect changes to these fields and then execute pgBackRest stanza-create // commands accordingly. // +optional RepoOptionsHash string `json:"repoOptionsHash,omitempty"` } + +// PGBackRestDataSource defines a pgBackRest configuration specifically for restoring from cloud-based data source +type PGBackRestDataSource struct { + // Projected volumes containing custom pgBackRest configuration. These files are mounted + // under "/etc/pgbackrest/conf.d" alongside any pgBackRest configuration generated by the + // PostgreSQL Operator: + // https://pgbackrest.org/configuration.html + // +optional + Configuration []corev1.VolumeProjection `json:"configuration,omitempty"` + + // Global pgBackRest configuration settings. These settings are included in the "global" + // section of the pgBackRest configuration generated by the PostgreSQL Operator, and then + // mounted under "/etc/pgbackrest/conf.d": + // https://pgbackrest.org/configuration.html + // +optional + Global map[string]string `json:"global,omitempty"` + + // Defines a pgBackRest repository + // +kubebuilder:validation:Required + Repo PGBackRestRepo `json:"repo"` + + // The name of an existing pgBackRest stanza to use as the data source for the new PostgresCluster. + // Defaults to `db` if not provided. + // +kubebuilder:default="db" + Stanza string `json:"stanza"` + + // Command line options to include when running the pgBackRest restore command. + // https://pgbackrest.org/command.html#command-restore + // +optional + Options []string `json:"options,omitempty"` + + // Resource requirements for the pgBackRest restore Job. + // +optional + Resources corev1.ResourceRequirements `json:"resources,omitempty"` + + // Scheduling constraints of the pgBackRest restore Job. + // More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node + // +optional + Affinity *corev1.Affinity `json:"affinity,omitempty"` + + // Priority class name for the pgBackRest restore Job pod. Changing this + // value causes PostgreSQL to restart. + // More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/ + // +optional + PriorityClassName *string `json:"priorityClassName,omitempty"` + + // Tolerations of the pgBackRest restore Job. + // More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration + // +optional + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` +} diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgbouncer_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgbouncer_types.go index 5105aeb87e..e940a9300d 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgbouncer_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgbouncer_types.go @@ -1,22 +1,12 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package v1beta1 import ( corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/intstr" ) // PGBouncerConfiguration represents PgBouncer configuration files. @@ -72,6 +62,11 @@ type PGBouncerPodSpec struct { // +optional Config PGBouncerConfiguration `json:"config,omitempty"` + // Custom sidecars for a PgBouncer pod. Changing this value causes + // PgBouncer to restart. + // +optional + Containers []corev1.Container `json:"containers,omitempty"` + // A secret projection containing a certificate and key with which to encrypt // connections to PgBouncer. The "tls.crt", "tls.key", and "ca.crt" paths must // be PEM-encoded certificates and keys. Changing this value causes PgBouncer @@ -94,12 +89,23 @@ type PGBouncerPodSpec struct { // +kubebuilder:validation:Minimum=1024 Port *int32 `json:"port,omitempty"` + // Priority class name for the pgBouncer pod. Changing this value causes + // PostgreSQL to restart. + // More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/ + // +optional + PriorityClassName *string `json:"priorityClassName,omitempty"` + // Number of desired PgBouncer pods. // +optional // +kubebuilder:default=1 // +kubebuilder:validation:Minimum=0 Replicas *int32 `json:"replicas,omitempty"` + // Minimum number of pods that should be available at a time. + // Defaults to one when the replicas field is greater than one. + // +optional + MinAvailable *intstr.IntOrString `json:"minAvailable,omitempty"` + // Compute resources of a PgBouncer container. Changing this value causes // PgBouncer to restart. // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers @@ -110,11 +116,28 @@ type PGBouncerPodSpec struct { // +optional Service *ServiceSpec `json:"service,omitempty"` + // Configuration for pgBouncer sidecar containers + // +optional + Sidecars *PGBouncerSidecars `json:"sidecars,omitempty"` + // Tolerations of a PgBouncer pod. Changing this value causes PgBouncer to // restart. // More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration // +optional Tolerations []corev1.Toleration `json:"tolerations,omitempty"` + + // Topology spread constraints of a PgBouncer pod. Changing this value causes + // PgBouncer to restart. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + // +optional + TopologySpreadConstraints []corev1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"` +} + +// PGBouncerSidecars defines the configuration for pgBouncer sidecar containers +type PGBouncerSidecars struct { + // Defines the configuration for the pgBouncer config sidecar container + // +optional + PGBouncerConfig *Sidecar `json:"pgbouncerConfig,omitempty"` } // Default returns the default port for PgBouncer (5432) if a port is not diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgmonitor_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgmonitor_types.go new file mode 100644 index 0000000000..f2cd78335a --- /dev/null +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgmonitor_types.go @@ -0,0 +1,39 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package v1beta1 + +import corev1 "k8s.io/api/core/v1" + +// PGMonitorSpec defines the desired state of the pgMonitor tool suite +type PGMonitorSpec struct { + // +optional + Exporter *ExporterSpec `json:"exporter,omitempty"` +} + +type ExporterSpec struct { + + // Projected volumes containing custom PostgreSQL Exporter configuration. Currently supports + // the customization of PostgreSQL Exporter queries. If a "queries.yml" file is detected in + // any volume projected using this field, it will be loaded using the "extend.query-path" flag: + // https://github.com/prometheus-community/postgres_exporter#flags + // Changing the values of field causes PostgreSQL and the exporter to restart. + // +optional + Configuration []corev1.VolumeProjection `json:"configuration,omitempty"` + + // Projected secret containing custom TLS certificates to encrypt output from the exporter + // web server + // +optional + CustomTLSSecret *corev1.SecretProjection `json:"customTLSSecret,omitempty"` + + // The image name to use for crunchy-postgres-exporter containers. The image may + // also be set using the RELATED_IMAGE_PGEXPORTER environment variable. + // +optional + Image string `json:"image,omitempty"` + + // Changing this value causes PostgreSQL and the exporter to restart. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers + // +optional + Resources corev1.ResourceRequirements `json:"resources,omitempty"` +} diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgupgrade_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgupgrade_types.go new file mode 100644 index 0000000000..8e99f8239f --- /dev/null +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgupgrade_types.go @@ -0,0 +1,132 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// PGUpgradeSpec defines the desired state of PGUpgrade +type PGUpgradeSpec struct { + + // +optional + Metadata *Metadata `json:"metadata,omitempty"` + + // The name of the cluster to be updated + // +required + // +kubebuilder:validation:MinLength=1 + PostgresClusterName string `json:"postgresClusterName"` + + // The image name to use for major PostgreSQL upgrades. + // +optional + Image *string `json:"image,omitempty"` + + // ImagePullPolicy is used to determine when Kubernetes will attempt to + // pull (download) container images. + // More info: https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy + // +kubebuilder:validation:Enum={Always,Never,IfNotPresent} + // +optional + ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"` + + // TODO(benjaminjb) Check the behavior: does updating ImagePullSecrets cause + // all running PGUpgrade pods to restart? + + // The image pull secrets used to pull from a private registry. + // Changing this value causes all running PGUpgrade pods to restart. + // https://k8s.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + // +optional + ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty"` + + // TODO(benjaminjb): define webhook validation to make sure + // `fromPostgresVersion` is below `toPostgresVersion` + // or leverage other validation rules, such as the Common Expression Language + // rules currently in alpha as of Kubernetes 1.23 + // - https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#validation-rules + + // The major version of PostgreSQL before the upgrade. + // +kubebuilder:validation:Required + // +kubebuilder:validation:Minimum=11 + // +kubebuilder:validation:Maximum=17 + FromPostgresVersion int `json:"fromPostgresVersion"` + + // TODO(benjaminjb): define webhook validation to make sure + // `fromPostgresVersion` is below `toPostgresVersion` + // or leverage other validation rules, such as the Common Expression Language + // rules currently in alpha as of Kubernetes 1.23 + + // The major version of PostgreSQL to be upgraded to. + // +kubebuilder:validation:Required + // +kubebuilder:validation:Minimum=11 + // +kubebuilder:validation:Maximum=17 + ToPostgresVersion int `json:"toPostgresVersion"` + + // The image name to use for PostgreSQL containers after upgrade. + // When omitted, the value comes from an operator environment variable. + // +optional + ToPostgresImage string `json:"toPostgresImage,omitempty"` + + // Resource requirements for the PGUpgrade container. + // +optional + Resources corev1.ResourceRequirements `json:"resources,omitempty"` + + // Scheduling constraints of the PGUpgrade pod. + // More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node + // +optional + Affinity *corev1.Affinity `json:"affinity,omitempty"` + + // TODO(benjaminjb) Check the behavior: does updating PriorityClassName cause + // PGUpgrade to restart? + + // Priority class name for the PGUpgrade pod. Changing this + // value causes PGUpgrade pod to restart. + // More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/ + // +optional + PriorityClassName *string `json:"priorityClassName,omitempty"` + + // Tolerations of the PGUpgrade pod. + // More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration + // +optional + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` +} + +// PGUpgradeStatus defines the observed state of PGUpgrade +type PGUpgradeStatus struct { + // conditions represent the observations of PGUpgrade's current state. + // +optional + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // observedGeneration represents the .metadata.generation on which the status was based. + // +optional + // +kubebuilder:validation:Minimum=0 + ObservedGeneration int64 `json:"observedGeneration,omitempty"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status + +// PGUpgrade is the Schema for the pgupgrades API +type PGUpgrade struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec PGUpgradeSpec `json:"spec,omitempty"` + Status PGUpgradeStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// PGUpgradeList contains a list of PGUpgrade +type PGUpgradeList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []PGUpgrade `json:"items"` +} + +func init() { + SchemeBuilder.Register(&PGUpgrade{}, &PGUpgradeList{}) +} diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgres_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgres_types.go index 6d9696c782..b7baa72942 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgres_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgres_types.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package v1beta1 @@ -22,6 +11,22 @@ package v1beta1 // +kubebuilder:validation:MaxLength=63 type PostgresIdentifier string +type PostgresPasswordSpec struct { + // Type of password to generate. Defaults to ASCII. Valid options are ASCII + // and AlphaNumeric. + // "ASCII" passwords contain letters, numbers, and symbols from the US-ASCII character set. + // "AlphaNumeric" passwords contain letters and numbers from the US-ASCII character set. + // +kubebuilder:default=ASCII + // +kubebuilder:validation:Enum={ASCII,AlphaNumeric} + Type string `json:"type"` +} + +// PostgresPasswordSpec types. +const ( + PostgresPasswordTypeAlphaNumeric = "AlphaNumeric" + PostgresPasswordTypeASCII = "ASCII" +) + type PostgresUserSpec struct { // This value goes into the name of a corev1.Secret and a label value, so @@ -44,7 +49,14 @@ type PostgresUserSpec struct { // ALTER ROLE options except for PASSWORD. This field is ignored for the // "postgres" user. // More info: https://www.postgresql.org/docs/current/role-attributes.html + // +kubebuilder:validation:MaxLength=200 // +kubebuilder:validation:Pattern=`^[^;]*$` + // +kubebuilder:validation:XValidation:rule=`!self.matches("(?i:PASSWORD)")`,message="cannot assign password" + // +kubebuilder:validation:XValidation:rule=`!self.matches("(?:--|/[*]|[*]/)")`,message="cannot contain comments" // +optional Options string `json:"options,omitempty"` + + // Properties of the password generated for this user. + // +optional + Password *PostgresPasswordSpec `json:"password,omitempty"` } diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_test.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_test.go index 3cfb01c19e..83396902d0 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_test.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package v1beta1 @@ -51,10 +40,11 @@ metadata: creationTimestamp: null spec: backups: - pgbackrest: {} + pgbackrest: + repos: null + config: {} instances: null patroni: - dynamicConfiguration: null leaderLeaseDurationSeconds: 30 port: 8008 syncPeriodSeconds: 10 @@ -62,9 +52,11 @@ spec: postgresVersion: 0 status: monitoring: {} + patroni: {} + postgresVersion: 0 proxy: pgBouncer: {} - `)+"\n") + `)+"\n") }) t.Run("one instance set", func(t *testing.T) { @@ -81,7 +73,9 @@ metadata: creationTimestamp: null spec: backups: - pgbackrest: {} + pgbackrest: + repos: null + config: {} instances: - dataVolumeClaimSpec: resources: {} @@ -89,7 +83,6 @@ spec: replicas: 1 resources: {} patroni: - dynamicConfiguration: null leaderLeaseDurationSeconds: 30 port: 8008 syncPeriodSeconds: 10 @@ -97,9 +90,11 @@ spec: postgresVersion: 0 status: monitoring: {} + patroni: {} + postgresVersion: 0 proxy: pgBouncer: {} - `)+"\n") + `)+"\n") }) t.Run("empty proxy", func(t *testing.T) { diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_types.go index e86861621f..54e42baa3b 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_types.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package v1beta1 @@ -20,6 +9,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" ) // PostgresClusterSpec defines the desired state of PostgresCluster @@ -32,8 +22,8 @@ type PostgresClusterSpec struct { DataSource *DataSource `json:"dataSource,omitempty"` // PostgreSQL backup configuration - // +kubebuilder:validation:Required - Backups Backups `json:"backups"` + // +optional + Backups Backups `json:"backups,omitempty"` // The secret containing the Certificates and Keys to encrypt PostgreSQL // traffic will need to contain the server TLS certificate, TLS key and the @@ -56,6 +46,18 @@ type PostgresClusterSpec struct { // +optional CustomReplicationClientTLSSecret *corev1.SecretProjection `json:"customReplicationTLSSecret,omitempty"` + // DatabaseInitSQL defines a ConfigMap containing custom SQL that will + // be run after the cluster is initialized. This ConfigMap must be in the same + // namespace as the cluster. + // +optional + DatabaseInitSQL *DatabaseInitSQL `json:"databaseInitSQL,omitempty"` + // Whether or not the PostgreSQL cluster should use the defined default + // scheduling constraints. If the field is unset or false, the default + // scheduling constraints will be used in addition to any custom constraints + // provided. + // +optional + DisableDefaultPodScheduling *bool `json:"disableDefaultPodScheduling,omitempty"` + // The image name to use for PostgreSQL containers. When omitted, the value // comes from an operator environment variable. For standard PostgreSQL images, // the format is RELATED_IMAGE_POSTGRES_{postgresVersion}, @@ -66,12 +68,21 @@ type PostgresClusterSpec struct { // +operator-sdk:csv:customresourcedefinitions:type=spec,order=1 Image string `json:"image,omitempty"` + // ImagePullPolicy is used to determine when Kubernetes will attempt to + // pull (download) container images. + // More info: https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy + // +kubebuilder:validation:Enum={Always,Never,IfNotPresent} + // +optional + ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"` + // The image pull secrets used to pull from a private registry // Changing this value causes all running pods to restart. // https://k8s.io/docs/tasks/configure-pod-container/pull-image-private-registry/ // +optional ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty"` + // Specifies one or more sets of PostgreSQL pods that replicate data for + // this cluster. // +listType=map // +listMapKey=name // +kubebuilder:validation:MinItems=1 @@ -87,6 +98,11 @@ type PostgresClusterSpec struct { // +optional Patroni *PatroniSpec `json:"patroni,omitempty"` + // Suspends the rollout and reconciliation of changes made to the + // PostgresCluster spec. + // +optional + Paused *bool `json:"paused,omitempty"` + // The port on which PostgreSQL should listen. // +optional // +kubebuilder:default=5432 @@ -95,8 +111,8 @@ type PostgresClusterSpec struct { // The major version of PostgreSQL installed in the PostgreSQL image // +kubebuilder:validation:Required - // +kubebuilder:validation:Minimum=10 - // +kubebuilder:validation:Maximum=13 + // +kubebuilder:validation:Minimum=11 + // +kubebuilder:validation:Maximum=17 // +operator-sdk:csv:customresourcedefinitions:type=spec,order=1 PostgresVersion int `json:"postgresVersion"` @@ -109,6 +125,10 @@ type PostgresClusterSpec struct { // +optional Proxy *PostgresProxySpec `json:"proxy,omitempty"` + // The specification of a user interface that connects to PostgreSQL. + // +optional + UserInterface *UserInterfaceSpec `json:"userInterface,omitempty"` + // The specification of monitoring tools that connect to PostgreSQL // +optional Monitoring *MonitoringSpec `json:"monitoring,omitempty"` @@ -117,6 +137,10 @@ type PostgresClusterSpec struct { // +optional Service *ServiceSpec `json:"service,omitempty"` + // Specification of the service that exposes PostgreSQL replica instances + // +optional + ReplicaService *ServiceSpec `json:"replicaService,omitempty"` + // Whether or not the PostgreSQL cluster should be stopped. // When this is true, workloads are scaled to zero and CronJobs // are suspended. @@ -131,7 +155,17 @@ type PostgresClusterSpec struct { // A list of group IDs applied to the process of a container. These can be // useful when accessing shared file systems with constrained permissions. // More info: https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context - // +optional + // --- + // +kubebuilder:validation:Optional + // + // Containers should not run with a root GID. + // - https://kubernetes.io/docs/concepts/security/pod-security-standards/ + // +kubebuilder:validation:items:Minimum=1 + // + // Supplementary GIDs must fit within int32. + // - https://releases.k8s.io/v1.18.0/pkg/apis/core/validation/validation.go#L3659-L3663 + // - https://releases.k8s.io/v1.22.0/pkg/apis/core/validation/validation.go#L3923-L3927 + // +kubebuilder:validation:items:Maximum=2147483647 SupplementalGroups []int64 `json:"supplementalGroups,omitempty"` // Users to create inside PostgreSQL and the databases they should access. @@ -140,16 +174,75 @@ type PostgresClusterSpec struct { // from this list does NOT drop the user nor revoke their access. // +listType=map // +listMapKey=name + // +kubebuilder:validation:MaxItems=64 // +optional Users []PostgresUserSpec `json:"users,omitempty"` + + Config PostgresAdditionalConfig `json:"config,omitempty"` } -// DataSource defines the source of the PostgreSQL data directory for a new PostgresCluster. +// DataSource defines data sources for a new PostgresCluster. type DataSource struct { + // Defines a pgBackRest cloud-based data source that can be used to pre-populate the + // PostgreSQL data directory for a new PostgreSQL cluster using a pgBackRest restore. + // The PGBackRest field is incompatible with the PostgresCluster field: only one + // data source can be used for pre-populating a new PostgreSQL cluster + // +optional + PGBackRest *PGBackRestDataSource `json:"pgbackrest,omitempty"` + // Defines a pgBackRest data source that can be used to pre-populate the PostgreSQL data // directory for a new PostgreSQL cluster using a pgBackRest restore. + // The PGBackRest field is incompatible with the PostgresCluster field: only one + // data source can be used for pre-populating a new PostgreSQL cluster // +optional PostgresCluster *PostgresClusterDataSource `json:"postgresCluster,omitempty"` + + // Defines any existing volumes to reuse for this PostgresCluster. + // +optional + Volumes *DataSourceVolumes `json:"volumes,omitempty"` +} + +// DataSourceVolumes defines any existing volumes to reuse for this PostgresCluster. +type DataSourceVolumes struct { + // Defines the existing pgData volume and directory to use in the current + // PostgresCluster. + // +optional + PGDataVolume *DataSourceVolume `json:"pgDataVolume,omitempty"` + + // Defines the existing pg_wal volume and directory to use in the current + // PostgresCluster. Note that a defined pg_wal volume MUST be accompanied by + // a pgData volume. + // +optional + PGWALVolume *DataSourceVolume `json:"pgWALVolume,omitempty"` + + // Defines the existing pgBackRest repo volume and directory to use in the + // current PostgresCluster. + // +optional + PGBackRestVolume *DataSourceVolume `json:"pgBackRestVolume,omitempty"` +} + +// DataSourceVolume defines the PVC name and data directory path for an existing cluster volume. +type DataSourceVolume struct { + // The existing PVC name. + PVCName string `json:"pvcName"` + + // The existing directory. When not set, a move Job is not created for the + // associated volume. + // +optional + Directory string `json:"directory,omitempty"` +} + +// DatabaseInitSQL defines a ConfigMap containing custom SQL that will +// be run after the cluster is initialized. This ConfigMap must be in the same +// namespace as the cluster. +type DatabaseInitSQL struct { + // Name is the name of a ConfigMap + // +required + Name string `json:"name"` + + // Key is the ConfigMap data key that points to a SQL string + // +required + Key string `json:"key"` } // PostgresClusterDataSource defines a data source for bootstrapping PostgreSQL clusters using a @@ -187,6 +280,12 @@ type PostgresClusterDataSource struct { // +optional Affinity *corev1.Affinity `json:"affinity,omitempty"` + // Priority class name for the pgBackRest restore Job pod. Changing this + // value causes PostgreSQL to restart. + // More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/ + // +optional + PriorityClassName *string `json:"priorityClassName,omitempty"` + // Tolerations of the pgBackRest restore Job. // More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration // +optional @@ -212,14 +311,22 @@ func (s *PostgresClusterSpec) Default() { if s.Proxy != nil { s.Proxy.Default() } + + if s.UserInterface != nil { + s.UserInterface.Default() + } } // Backups defines a PostgreSQL archive configuration type Backups struct { // pgBackRest archive configuration - // +kubebuilder:validation:Required + // +optional PGBackRest PGBackRestArchive `json:"pgbackrest"` + + // VolumeSnapshot configuration + // +optional + Snapshots *VolumeSnapshots `json:"snapshots,omitempty"` } // PostgresClusterStatus defines the observed state of PostgresCluster @@ -235,12 +342,23 @@ type PostgresClusterStatus struct { InstanceSets []PostgresInstanceSetStatus `json:"instances,omitempty"` // +optional - Patroni *PatroniStatus `json:"patroni,omitempty"` + Patroni PatroniStatus `json:"patroni,omitempty"` // Status information for pgBackRest // +optional PGBackRest *PGBackRestStatus `json:"pgbackrest,omitempty"` + // +optional + RegistrationRequired *RegistrationRequirementStatus `json:"registrationRequired,omitempty"` + + // +optional + TokenRequired string `json:"tokenRequired,omitempty"` + + // Stores the current PostgreSQL major version following a successful + // major PostgreSQL upgrade. + // +optional + PostgresVersion int `json:"postgresVersion"` + // Current state of the PostgreSQL proxy. // +optional Proxy PostgresProxyStatus `json:"proxy,omitempty"` @@ -254,6 +372,10 @@ type PostgresClusterStatus struct { // +optional StartupInstanceSet string `json:"startupInstanceSet,omitempty"` + // Current state of the PostgreSQL user interface. + // +optional + UserInterface *PostgresUserInterfaceStatus `json:"userInterface,omitempty"` + // Identifies the users that have been installed into PostgreSQL. UsersRevision string `json:"usersRevision,omitempty"` @@ -261,6 +383,10 @@ type PostgresClusterStatus struct { // +optional Monitoring MonitoringStatus `json:"monitoring,omitempty"` + // DatabaseInitSQL state of custom database initialization in the cluster + // +optional + DatabaseInitSQL *string `json:"databaseInitSQL,omitempty"` + // observedGeneration represents the .metadata.generation on which the status was based. // +optional // +kubebuilder:validation:Minimum=0 @@ -268,7 +394,7 @@ type PostgresClusterStatus struct { // conditions represent the observations of postgrescluster's current state. // Known .status.conditions.type are: "PersistentVolumeResizing", - // "ProxyAvailable" + // "Progressing", "ProxyAvailable" // +optional // +listType=map // +listMapKey=type @@ -278,16 +404,37 @@ type PostgresClusterStatus struct { // PostgresClusterStatus condition types. const ( - PersistentVolumeResizing = "PersistentVolumeResizing" - ProxyAvailable = "ProxyAvailable" + PersistentVolumeResizing = "PersistentVolumeResizing" + PostgresClusterProgressing = "Progressing" + ProxyAvailable = "ProxyAvailable" + Registered = "Registered" ) type PostgresInstanceSetSpec struct { // +optional Metadata *Metadata `json:"metadata,omitempty"` + // This value goes into the name of an appsv1.StatefulSet, the hostname of + // a corev1.Pod, and label values. The pattern below is IsDNS1123Label + // wrapped in "()?" to accommodate the empty default. + // + // The Pods created by a StatefulSet have a "controller-revision-hash" label + // comprised of the StatefulSet name, a dash, and a 10-character hash. + // The length below is derived from limitations on label values: + // + // 63 (max) ≥ len(cluster) + 1 (dash) + // + len(set) + 1 (dash) + 4 (id) + // + 1 (dash) + 10 (hash) + // + // See: https://issue.k8s.io/64023 + + // Name that associates this set of PostgreSQL pods. This field is optional + // when only one instance set is defined. Each instance set in a cluster + // must have a unique name. The combined length of this and the cluster name + // must be 46 characters or less. // +optional // +kubebuilder:default="" + // +kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?)?$` Name string `json:"name"` // Scheduling constraints of a PostgreSQL pod. Changing this value causes @@ -296,29 +443,131 @@ type PostgresInstanceSetSpec struct { // +optional Affinity *corev1.Affinity `json:"affinity,omitempty"` + // Custom sidecars for PostgreSQL instance pods. Changing this value causes + // PostgreSQL to restart. + // +optional + Containers []corev1.Container `json:"containers,omitempty"` + // Defines a PersistentVolumeClaim for PostgreSQL data. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes + // --- // +kubebuilder:validation:Required + // + // NOTE(validation): Every PVC must have at least one accessMode. NOTE(KEP-4153) + // TODO(k8s-1.28): fieldPath=`.accessModes`,reason="FieldValueRequired" + // - https://releases.k8s.io/v1.25.0/pkg/apis/core/validation/validation.go#L2098-L2100 + // - https://releases.k8s.io/v1.31.0/pkg/apis/core/validation/validation.go#L2292-L2294 + // +kubebuilder:validation:XValidation:rule=`has(self.accessModes) && size(self.accessModes) > 0`,message=`missing accessModes` + // + // NOTE(validation): Every PVC must have a positive storage request. NOTE(KEP-4153) + // TODO(k8s-1.28): fieldPath=`.resources.requests.storage`,reason="FieldValueRequired" + // - https://releases.k8s.io/v1.25.0/pkg/apis/core/validation/validation.go#L2126-L2133 + // - https://releases.k8s.io/v1.31.0/pkg/apis/core/validation/validation.go#L2318-L2325 + // +kubebuilder:validation:XValidation:rule=`has(self.resources) && has(self.resources.requests) && has(self.resources.requests.storage)`,message=`missing storage request` DataVolumeClaimSpec corev1.PersistentVolumeClaimSpec `json:"dataVolumeClaimSpec"` + // Priority class name for the PostgreSQL pod. Changing this value causes + // PostgreSQL to restart. + // More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/ + // +optional + PriorityClassName *string `json:"priorityClassName,omitempty"` + + // Number of desired PostgreSQL pods. // +optional // +kubebuilder:default=1 // +kubebuilder:validation:Minimum=1 Replicas *int32 `json:"replicas,omitempty"` + // Minimum number of pods that should be available at a time. + // Defaults to one when the replicas field is greater than one. + // +optional + MinAvailable *intstr.IntOrString `json:"minAvailable,omitempty"` + // Compute resources of a PostgreSQL container. // +optional Resources corev1.ResourceRequirements `json:"resources,omitempty"` + // Configuration for instance sidecar containers + // +optional + Sidecars *InstanceSidecars `json:"sidecars,omitempty"` + // Tolerations of a PostgreSQL pod. Changing this value causes PostgreSQL to restart. // More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration // +optional Tolerations []corev1.Toleration `json:"tolerations,omitempty"` + // Topology spread constraints of a PostgreSQL pod. Changing this value causes + // PostgreSQL to restart. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + // +optional + TopologySpreadConstraints []corev1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"` + // Defines a separate PersistentVolumeClaim for PostgreSQL's write-ahead log. // More info: https://www.postgresql.org/docs/current/wal.html - // +optional + // --- + // +kubebuilder:validation:Optional + // + // NOTE(validation): Every PVC must have at least one accessMode. NOTE(KEP-4153) + // TODO(k8s-1.28): fieldPath=`.accessModes`,reason="FieldValueRequired" + // - https://releases.k8s.io/v1.25.0/pkg/apis/core/validation/validation.go#L2098-L2100 + // - https://releases.k8s.io/v1.31.0/pkg/apis/core/validation/validation.go#L2292-L2294 + // +kubebuilder:validation:XValidation:rule=`has(self.accessModes) && size(self.accessModes) > 0`,message=`missing accessModes` + // + // NOTE(validation): Every PVC must have a positive storage request. NOTE(KEP-4153) + // TODO(k8s-1.28): fieldPath=`.resources.requests.storage`,reason="FieldValueRequired" + // - https://releases.k8s.io/v1.25.0/pkg/apis/core/validation/validation.go#L2126-L2133 + // - https://releases.k8s.io/v1.31.0/pkg/apis/core/validation/validation.go#L2318-L2325 + // +kubebuilder:validation:XValidation:rule=`has(self.resources) && has(self.resources.requests) && has(self.resources.requests.storage)`,message=`missing storage request` WALVolumeClaimSpec *corev1.PersistentVolumeClaimSpec `json:"walVolumeClaimSpec,omitempty"` + + // The list of tablespaces volumes to mount for this postgrescluster + // This field requires enabling TablespaceVolumes feature gate + // +listType=map + // +listMapKey=name + // +optional + TablespaceVolumes []TablespaceVolume `json:"tablespaceVolumes,omitempty"` +} + +type TablespaceVolume struct { + // This value goes into + // a. the name of a corev1.PersistentVolumeClaim, + // b. a label value, and + // c. a path name. + // So it must match both IsDNS1123Subdomain and IsValidLabelValue; + // and be valid as a file path. + + // The name for the tablespace, used as the path name for the volume. + // Must be unique in the instance set since they become the directory names. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:Pattern=`^[a-z][a-z0-9]*$` + // +kubebuilder:validation:Type=string + Name string `json:"name"` + + // Defines a PersistentVolumeClaim for a tablespace. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes + // --- + // +kubebuilder:validation:Required + // + // NOTE(validation): Every PVC must have at least one accessMode. NOTE(KEP-4153) + // TODO(k8s-1.28): fieldPath=`.accessModes`,reason="FieldValueRequired" + // - https://releases.k8s.io/v1.25.0/pkg/apis/core/validation/validation.go#L2098-L2100 + // - https://releases.k8s.io/v1.31.0/pkg/apis/core/validation/validation.go#L2292-L2294 + // +kubebuilder:validation:XValidation:rule=`has(self.accessModes) && size(self.accessModes) > 0`,message=`missing accessModes` + // + // NOTE(validation): Every PVC must have a positive storage request. NOTE(KEP-4153) + // TODO(k8s-1.28): fieldPath=`.resources.requests.storage`,reason="FieldValueRequired" + // - https://releases.k8s.io/v1.25.0/pkg/apis/core/validation/validation.go#L2126-L2133 + // - https://releases.k8s.io/v1.31.0/pkg/apis/core/validation/validation.go#L2318-L2325 + // +kubebuilder:validation:XValidation:rule=`has(self.resources) && has(self.resources.requests) && has(self.resources.requests.storage)`,message=`missing storage request` + DataVolumeClaimSpec corev1.PersistentVolumeClaimSpec `json:"dataVolumeClaimSpec"` +} + +// InstanceSidecars defines the configuration for instance sidecar containers +type InstanceSidecars struct { + // Defines the configuration for the replica cert copy sidecar container + // +optional + ReplicaCertCopy *Sidecar `json:"replicaCertCopy,omitempty"` } // Default sets the default values for an instance set spec, including the name @@ -340,13 +589,17 @@ type PostgresInstanceSetStatus struct { // +optional ReadyReplicas int32 `json:"readyReplicas,omitempty"` - // Total number of non-terminated pods. + // Total number of pods. // +optional Replicas int32 `json:"replicas,omitempty"` - // Total number of non-terminated pods that have the desired specification. + // Total number of pods that have the desired specification. // +optional UpdatedReplicas int32 `json:"updatedReplicas,omitempty"` + + // Desired Size of the pgData volume + // +optional + DesiredPGDataVolume map[string]string `json:"desiredPGDataVolume,omitempty"` } // PostgresProxySpec is a union of the supported PostgreSQL proxies. @@ -363,6 +616,10 @@ func (s *PostgresProxySpec) Default() { } } +type RegistrationRequirementStatus struct { + PGOVersion string `json:"pgoVersion,omitempty"` +} + type PostgresProxyStatus struct { PGBouncer PGBouncerPodStatus `json:"pgBouncer,omitempty"` } @@ -370,15 +627,51 @@ type PostgresProxyStatus struct { // PostgresStandbySpec defines if/how the cluster should be a hot standby. type PostgresStandbySpec struct { // Whether or not the PostgreSQL cluster should be read-only. When this is - // true, WAL files are applied from the pgBackRest repository. + // true, WAL files are applied from a pgBackRest repository or another + // PostgreSQL server. // +optional // +kubebuilder:default=true Enabled bool `json:"enabled"` // The name of the pgBackRest repository to follow for WAL files. - // +kubebuilder:validation:Required + // +optional // +kubebuilder:validation:Pattern=^repo[1-4] - RepoName string `json:"repoName"` + RepoName string `json:"repoName,omitempty"` + + // Network address of the PostgreSQL server to follow via streaming replication. + // +optional + Host string `json:"host,omitempty"` + + // Network port of the PostgreSQL server to follow via streaming replication. + // +optional + // +kubebuilder:validation:Minimum=1024 + Port *int32 `json:"port,omitempty"` +} + +// UserInterfaceSpec is a union of the supported PostgreSQL user interfaces. +type UserInterfaceSpec struct { + + // Defines a pgAdmin user interface. + PGAdmin *PGAdminPodSpec `json:"pgAdmin"` +} + +// Default sets the defaults for any user interfaces that are set. +func (s *UserInterfaceSpec) Default() { + if s.PGAdmin != nil { + s.PGAdmin.Default() + } +} + +// PostgresUserInterfaceStatus is a union of the supported PostgreSQL user +// interface statuses. +type PostgresUserInterfaceStatus struct { + + // The state of the pgAdmin user interface. + PGAdmin PGAdminPodStatus `json:"pgAdmin,omitempty"` +} + +type PostgresAdditionalConfig struct { + Files []corev1.VolumeProjection `json:"files,omitempty"` } // +kubebuilder:object:root=true @@ -427,33 +720,6 @@ func init() { SchemeBuilder.Register(&PostgresCluster{}, &PostgresClusterList{}) } -// Metadata contains metadata for PostgresCluster resources -type Metadata struct { - // +optional - Labels map[string]string `json:"labels,omitempty"` - - // +optional - Annotations map[string]string `json:"annotations,omitempty"` -} - -// GetLabelsOrNil gets labels from a Metadata pointer, if Metadata -// hasn't been set return nil -func (meta *Metadata) GetLabelsOrNil() map[string]string { - if meta == nil { - return nil - } - return meta.Labels -} - -// GetAnnotationsOrNil gets annotations from a Metadata pointer, if Metadata -// hasn't been set return nil -func (meta *Metadata) GetAnnotationsOrNil() map[string]string { - if meta == nil { - return nil - } - return meta.Annotations -} - // MonitoringSpec is a union of the supported PostgreSQL Monitoring tools type MonitoringSpec struct { // +optional @@ -467,29 +733,16 @@ type MonitoringStatus struct { ExporterConfiguration string `json:"exporterConfiguration,omitempty"` } -// PGMonitorSpec defines the desired state of the pgMonitor tool suite -type PGMonitorSpec struct { - // +optional - Exporter *ExporterSpec `json:"exporter,omitempty"` +func NewPostgresCluster() *PostgresCluster { + cluster := &PostgresCluster{} + cluster.SetGroupVersionKind(GroupVersion.WithKind("PostgresCluster")) + return cluster } -type ExporterSpec struct { - - // Projected volumes containing custom PostgreSQL Exporter configuration. Currently supports - // the customization of PostgreSQL Exporter queries. If a "queries.yaml" file is detected in - // any volume projected using this field, it will be loaded using the "extend.query-path" flag: - // https://github.com/prometheus-community/postgres_exporter#flags - // Changing the values of field causes PostgreSQL and the exporter to restart. - // +optional - Configuration []corev1.VolumeProjection `json:"configuration,omitempty"` - - // The image name to use for crunchy-postgres-exporter containers. The image may - // also be set using the RELATED_IMAGE_PGEXPORTER environment variable. - // +optional - Image string `json:"image,omitempty"` - - // Changing this value causes PostgreSQL and the exporter to restart. - // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers - // +optional - Resources corev1.ResourceRequirements `json:"resources,omitempty"` +// VolumeSnapshots defines the configuration for VolumeSnapshots +type VolumeSnapshots struct { + // Name of the VolumeSnapshotClass that should be used by VolumeSnapshots + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + VolumeSnapshotClassName string `json:"volumeSnapshotClassName"` } diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/shared_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/shared_types.go index f5586dec5f..1dc4e3627e 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/shared_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/shared_types.go @@ -1,24 +1,93 @@ -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 - http://www.apache.org/licenses/LICENSE-2.0 +package v1beta1 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" +) -package v1beta1 +// SchemalessObject is a map compatible with JSON object. +// +// Use with the following markers: +// - kubebuilder:pruning:PreserveUnknownFields +// - kubebuilder:validation:Schemaless +// - kubebuilder:validation:Type=object +type SchemalessObject map[string]any + +// DeepCopy creates a new SchemalessObject by copying the receiver. +func (in *SchemalessObject) DeepCopy() *SchemalessObject { + if in == nil { + return nil + } + out := new(SchemalessObject) + *out = runtime.DeepCopyJSON(*in) + return out +} type ServiceSpec struct { + // +optional + Metadata *Metadata `json:"metadata,omitempty"` + + // The port on which this service is exposed when type is NodePort or + // LoadBalancer. Value must be in-range and not in use or the operation will + // fail. If unspecified, a port will be allocated if this Service requires one. + // - https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + // +optional + NodePort *int32 `json:"nodePort,omitempty"` + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types // - // +kubebuilder:validation:Required + // +optional + // +kubebuilder:default=ClusterIP // +kubebuilder:validation:Enum={ClusterIP,NodePort,LoadBalancer} Type string `json:"type"` + + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#traffic-policies + // + // +optional + // +kubebuilder:validation:Enum={Cluster,Local} + InternalTrafficPolicy *corev1.ServiceInternalTrafficPolicyType `json:"internalTrafficPolicy,omitempty"` + + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#traffic-policies + // + // +optional + // +kubebuilder:validation:Enum={Cluster,Local} + ExternalTrafficPolicy *corev1.ServiceExternalTrafficPolicyType `json:"externalTrafficPolicy,omitempty"` +} + +// Sidecar defines the configuration of a sidecar container +type Sidecar struct { + // Resource requirements for a sidecar container + // +optional + Resources *corev1.ResourceRequirements `json:"resources,omitempty"` +} + +// Metadata contains metadata for custom resources +type Metadata struct { + // +optional + Labels map[string]string `json:"labels,omitempty"` + + // +optional + Annotations map[string]string `json:"annotations,omitempty"` +} + +// GetLabelsOrNil gets labels from a Metadata pointer, if Metadata +// hasn't been set return nil +func (meta *Metadata) GetLabelsOrNil() map[string]string { + if meta == nil { + return nil + } + return meta.Labels +} + +// GetAnnotationsOrNil gets annotations from a Metadata pointer, if Metadata +// hasn't been set return nil +func (meta *Metadata) GetAnnotationsOrNil() map[string]string { + if meta == nil { + return nil + } + return meta.Annotations } diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/shared_types_test.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/shared_types_test.go new file mode 100644 index 0000000000..96cd4da073 --- /dev/null +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/shared_types_test.go @@ -0,0 +1,59 @@ +// Copyright 2022 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package v1beta1 + +import ( + "reflect" + "testing" + + "gotest.tools/v3/assert" + "sigs.k8s.io/yaml" +) + +func TestSchemalessObjectDeepCopy(t *testing.T) { + t.Parallel() + + var n *SchemalessObject + assert.DeepEqual(t, n, n.DeepCopy()) + + var z SchemalessObject + assert.DeepEqual(t, z, *z.DeepCopy()) + + var one SchemalessObject + assert.NilError(t, yaml.Unmarshal( + []byte(`{ str: value, num: 1, arr: [a, 2, true] }`), &one, + )) + + // reflect and go-cmp agree the original and copy are equivalent. + same := *one.DeepCopy() + assert.DeepEqual(t, one, same) + assert.Assert(t, reflect.DeepEqual(one, same)) + + // Changes to the copy do not affect the original. + { + change := *one.DeepCopy() + change["str"] = "banana" + assert.Assert(t, reflect.DeepEqual(one, same)) + assert.Assert(t, !reflect.DeepEqual(one, change)) + } + { + change := *one.DeepCopy() + change["num"] = 99 + assert.Assert(t, reflect.DeepEqual(one, same)) + assert.Assert(t, !reflect.DeepEqual(one, change)) + } + { + change := *one.DeepCopy() + change["arr"].([]any)[0] = "rock" + assert.Assert(t, reflect.DeepEqual(one, same)) + assert.Assert(t, !reflect.DeepEqual(one, change)) + } + { + change := *one.DeepCopy() + change["arr"] = append(change["arr"].([]any), "more") + assert.Assert(t, reflect.DeepEqual(one, same)) + assert.Assert(t, !reflect.DeepEqual(one, change)) + } +} diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/standalone_pgadmin_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/standalone_pgadmin_types.go new file mode 100644 index 0000000000..4fbc90a3b9 --- /dev/null +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/standalone_pgadmin_types.go @@ -0,0 +1,219 @@ +// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// PGAdminConfiguration represents pgAdmin configuration files. +type StandalonePGAdminConfiguration struct { + // Files allows the user to mount projected volumes into the pgAdmin + // container so that files can be referenced by pgAdmin as needed. + // +optional + Files []corev1.VolumeProjection `json:"files,omitempty"` + + // A Secret containing the value for the CONFIG_DATABASE_URI setting. + // More info: https://www.pgadmin.org/docs/pgadmin4/latest/external_database.html + // +optional + ConfigDatabaseURI *corev1.SecretKeySelector `json:"configDatabaseURI,omitempty"` + + // Settings for the gunicorn server. + // More info: https://docs.gunicorn.org/en/latest/settings.html + // +optional + // +kubebuilder:pruning:PreserveUnknownFields + // +kubebuilder:validation:Schemaless + // +kubebuilder:validation:Type=object + Gunicorn SchemalessObject `json:"gunicorn,omitempty"` + + // A Secret containing the value for the LDAP_BIND_PASSWORD setting. + // More info: https://www.pgadmin.org/docs/pgadmin4/latest/ldap.html + // +optional + LDAPBindPassword *corev1.SecretKeySelector `json:"ldapBindPassword,omitempty"` + + // Settings for the pgAdmin server process. Keys should be uppercase and + // values must be constants. + // More info: https://www.pgadmin.org/docs/pgadmin4/latest/config_py.html + // +optional + // +kubebuilder:pruning:PreserveUnknownFields + // +kubebuilder:validation:Schemaless + // +kubebuilder:validation:Type=object + Settings SchemalessObject `json:"settings,omitempty"` +} + +// PGAdminSpec defines the desired state of PGAdmin +type PGAdminSpec struct { + + // +optional + Metadata *Metadata `json:"metadata,omitempty"` + + // Configuration settings for the pgAdmin process. Changes to any of these + // values will be loaded without validation. Be careful, as + // you may put pgAdmin into an unusable state. + // +optional + Config StandalonePGAdminConfiguration `json:"config,omitempty"` + + // Defines a PersistentVolumeClaim for pgAdmin data. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes + // +kubebuilder:validation:Required + DataVolumeClaimSpec corev1.PersistentVolumeClaimSpec `json:"dataVolumeClaimSpec"` + + // The image name to use for pgAdmin instance. + // +optional + Image *string `json:"image,omitempty"` + + // ImagePullPolicy is used to determine when Kubernetes will attempt to + // pull (download) container images. + // More info: https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy + // +kubebuilder:validation:Enum={Always,Never,IfNotPresent} + // +optional + ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"` + + // The image pull secrets used to pull from a private registry. + // Changing this value causes all running PGAdmin pods to restart. + // https://k8s.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + // +optional + ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty"` + + // Resource requirements for the PGAdmin container. + // +optional + Resources corev1.ResourceRequirements `json:"resources,omitempty"` + + // Scheduling constraints of the PGAdmin pod. + // More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node + // +optional + Affinity *corev1.Affinity `json:"affinity,omitempty"` + + // Priority class name for the PGAdmin pod. Changing this + // value causes PGAdmin pod to restart. + // More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/ + // +optional + PriorityClassName *string `json:"priorityClassName,omitempty"` + + // Tolerations of the PGAdmin pod. + // More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration + // +optional + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` + + // ServerGroups for importing PostgresClusters to pgAdmin. + // To create a pgAdmin with no selectors, leave this field empty. + // A pgAdmin created with no `ServerGroups` will not automatically + // add any servers through discovery. PostgresClusters can still be + // added manually. + // +optional + ServerGroups []ServerGroup `json:"serverGroups"` + + // pgAdmin users that are managed via the PGAdmin spec. Users can still + // be added via the pgAdmin GUI, but those users will not show up here. + // +listType=map + // +listMapKey=username + // +optional + Users []PGAdminUser `json:"users,omitempty"` + + // ServiceName will be used as the name of a ClusterIP service pointing + // to the pgAdmin pod and port. If the service already exists, PGO will + // update the service. For more information about services reference + // the Kubernetes and CrunchyData documentation. + // https://kubernetes.io/docs/concepts/services-networking/service/ + // +optional + ServiceName string `json:"serviceName,omitempty"` +} + +// +kubebuilder:validation:XValidation:rule=`[has(self.postgresClusterName),has(self.postgresClusterSelector)].exists_one(x,x)`,message=`exactly one of "postgresClusterName" or "postgresClusterSelector" is required` +type ServerGroup struct { + // The name for the ServerGroup in pgAdmin. + // Must be unique in the pgAdmin's ServerGroups since it becomes the ServerGroup name in pgAdmin. + // +kubebuilder:validation:Required + Name string `json:"name"` + + // PostgresClusterName selects one cluster to add to pgAdmin by name. + // +kubebuilder:validation:Optional + PostgresClusterName string `json:"postgresClusterName,omitempty"` + + // PostgresClusterSelector selects clusters to dynamically add to pgAdmin by matching labels. + // An empty selector like `{}` will select ALL clusters in the namespace. + // +kubebuilder:validation:Optional + PostgresClusterSelector metav1.LabelSelector `json:"postgresClusterSelector,omitempty"` +} + +type PGAdminUser struct { + // A reference to the secret that holds the user's password. + // +kubebuilder:validation:Required + PasswordRef *corev1.SecretKeySelector `json:"passwordRef"` + + // Role determines whether the user has admin privileges or not. + // Defaults to User. Valid options are Administrator and User. + // +kubebuilder:validation:Enum={Administrator,User} + // +optional + Role string `json:"role,omitempty"` + + // The username for User in pgAdmin. + // Must be unique in the pgAdmin's users list. + // +kubebuilder:validation:Required + Username string `json:"username"` +} + +// PGAdminStatus defines the observed state of PGAdmin +type PGAdminStatus struct { + + // conditions represent the observations of pgAdmin's current state. + // Known .status.conditions.type is: "PersistentVolumeResizing" + // +optional + // +listType=map + // +listMapKey=type + // +operator-sdk:csv:customresourcedefinitions:type=status,xDescriptors={"urn:alm:descriptor:io.kubernetes.conditions"} + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // ImageSHA represents the image SHA for the container running pgAdmin. + // +optional + ImageSHA string `json:"imageSHA,omitempty"` + + // MajorVersion represents the major version of the running pgAdmin. + // +optional + MajorVersion int `json:"majorVersion,omitempty"` + + // observedGeneration represents the .metadata.generation on which the status was based. + // +optional + // +kubebuilder:validation:Minimum=0 + ObservedGeneration int64 `json:"observedGeneration,omitempty"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status + +// PGAdmin is the Schema for the PGAdmin API +type PGAdmin struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec PGAdminSpec `json:"spec,omitempty"` + Status PGAdminStatus `json:"status,omitempty"` +} + +// Default implements "sigs.k8s.io/controller-runtime/pkg/webhook.Defaulter" so +// a webhook can be registered for the type. +// - https://book.kubebuilder.io/reference/webhook-overview.html +func (p *PGAdmin) Default() { + if len(p.APIVersion) == 0 { + p.APIVersion = GroupVersion.String() + } + if len(p.Kind) == 0 { + p.Kind = "PGAdmin" + } +} + +//+kubebuilder:object:root=true + +// PGAdminList contains a list of PGAdmin +type PGAdminList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []PGAdmin `json:"items"` +} + +func init() { + SchemeBuilder.Register(&PGAdmin{}, &PGAdminList{}) +} diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/zz_generated.deepcopy.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/zz_generated.deepcopy.go index ab4a1069d0..fa32069d0f 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/zz_generated.deepcopy.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/zz_generated.deepcopy.go @@ -1,34 +1,85 @@ -// +build !ignore_autogenerated +//go:build !ignore_autogenerated -/* - Copyright 2021 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 // Code generated by controller-gen. DO NOT EDIT. package v1beta1 import ( - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIResponses) DeepCopyInto(out *APIResponses) { + *out = *in + in.Cluster.DeepCopyInto(&out.Cluster) + in.Status.DeepCopyInto(&out.Status) + in.Upgrade.DeepCopyInto(&out.Upgrade) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIResponses. +func (in *APIResponses) DeepCopy() *APIResponses { + if in == nil { + return nil + } + out := new(APIResponses) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupJobs) DeepCopyInto(out *BackupJobs) { + *out = *in + in.Resources.DeepCopyInto(&out.Resources) + if in.PriorityClassName != nil { + in, out := &in.PriorityClassName, &out.PriorityClassName + *out = new(string) + **out = **in + } + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(corev1.Affinity) + (*in).DeepCopyInto(*out) + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TTLSecondsAfterFinished != nil { + in, out := &in.TTLSecondsAfterFinished, &out.TTLSecondsAfterFinished + *out = new(int32) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupJobs. +func (in *BackupJobs) DeepCopy() *BackupJobs { + if in == nil { + return nil + } + out := new(BackupJobs) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Backups) DeepCopyInto(out *Backups) { *out = *in in.PGBackRest.DeepCopyInto(&out.PGBackRest) + if in.Snapshots != nil { + in, out := &in.Snapshots, &out.Snapshots + *out = new(VolumeSnapshots) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Backups. @@ -41,14 +92,205 @@ func (in *Backups) DeepCopy() *Backups { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterUpgrade) DeepCopyInto(out *ClusterUpgrade) { + *out = *in + if in.Operations != nil { + in, out := &in.Operations, &out.Operations + *out = make([]*UpgradeOperation, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(UpgradeOperation) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterUpgrade. +func (in *ClusterUpgrade) DeepCopy() *ClusterUpgrade { + if in == nil { + return nil + } + out := new(ClusterUpgrade) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CrunchyBridgeCluster) DeepCopyInto(out *CrunchyBridgeCluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrunchyBridgeCluster. +func (in *CrunchyBridgeCluster) DeepCopy() *CrunchyBridgeCluster { + if in == nil { + return nil + } + out := new(CrunchyBridgeCluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CrunchyBridgeCluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CrunchyBridgeClusterList) DeepCopyInto(out *CrunchyBridgeClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CrunchyBridgeCluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrunchyBridgeClusterList. +func (in *CrunchyBridgeClusterList) DeepCopy() *CrunchyBridgeClusterList { + if in == nil { + return nil + } + out := new(CrunchyBridgeClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CrunchyBridgeClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CrunchyBridgeClusterRoleSpec) DeepCopyInto(out *CrunchyBridgeClusterRoleSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrunchyBridgeClusterRoleSpec. +func (in *CrunchyBridgeClusterRoleSpec) DeepCopy() *CrunchyBridgeClusterRoleSpec { + if in == nil { + return nil + } + out := new(CrunchyBridgeClusterRoleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CrunchyBridgeClusterSpec) DeepCopyInto(out *CrunchyBridgeClusterSpec) { + *out = *in + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = new(Metadata) + (*in).DeepCopyInto(*out) + } + if in.Roles != nil { + in, out := &in.Roles, &out.Roles + *out = make([]*CrunchyBridgeClusterRoleSpec, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(CrunchyBridgeClusterRoleSpec) + **out = **in + } + } + } + out.Storage = in.Storage.DeepCopy() +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrunchyBridgeClusterSpec. +func (in *CrunchyBridgeClusterSpec) DeepCopy() *CrunchyBridgeClusterSpec { + if in == nil { + return nil + } + out := new(CrunchyBridgeClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CrunchyBridgeClusterStatus) DeepCopyInto(out *CrunchyBridgeClusterStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IsHA != nil { + in, out := &in.IsHA, &out.IsHA + *out = new(bool) + **out = **in + } + if in.IsProtected != nil { + in, out := &in.IsProtected, &out.IsProtected + *out = new(bool) + **out = **in + } + if in.OngoingUpgrade != nil { + in, out := &in.OngoingUpgrade, &out.OngoingUpgrade + *out = make([]*UpgradeOperation, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(UpgradeOperation) + **out = **in + } + } + } + in.Responses.DeepCopyInto(&out.Responses) + if in.Storage != nil { + in, out := &in.Storage, &out.Storage + x := (*in).DeepCopy() + *out = &x + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrunchyBridgeClusterStatus. +func (in *CrunchyBridgeClusterStatus) DeepCopy() *CrunchyBridgeClusterStatus { + if in == nil { + return nil + } + out := new(CrunchyBridgeClusterStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DataSource) DeepCopyInto(out *DataSource) { *out = *in + if in.PGBackRest != nil { + in, out := &in.PGBackRest, &out.PGBackRest + *out = new(PGBackRestDataSource) + (*in).DeepCopyInto(*out) + } if in.PostgresCluster != nil { in, out := &in.PostgresCluster, &out.PostgresCluster *out = new(PostgresClusterDataSource) (*in).DeepCopyInto(*out) } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = new(DataSourceVolumes) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSource. @@ -61,89 +303,439 @@ func (in *DataSource) DeepCopy() *DataSource { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSourceVolume) DeepCopyInto(out *DataSourceVolume) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSourceVolume. +func (in *DataSourceVolume) DeepCopy() *DataSourceVolume { + if in == nil { + return nil + } + out := new(DataSourceVolume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSourceVolumes) DeepCopyInto(out *DataSourceVolumes) { + *out = *in + if in.PGDataVolume != nil { + in, out := &in.PGDataVolume, &out.PGDataVolume + *out = new(DataSourceVolume) + **out = **in + } + if in.PGWALVolume != nil { + in, out := &in.PGWALVolume, &out.PGWALVolume + *out = new(DataSourceVolume) + **out = **in + } + if in.PGBackRestVolume != nil { + in, out := &in.PGBackRestVolume, &out.PGBackRestVolume + *out = new(DataSourceVolume) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSourceVolumes. +func (in *DataSourceVolumes) DeepCopy() *DataSourceVolumes { + if in == nil { + return nil + } + out := new(DataSourceVolumes) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseInitSQL) DeepCopyInto(out *DatabaseInitSQL) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseInitSQL. +func (in *DatabaseInitSQL) DeepCopy() *DatabaseInitSQL { + if in == nil { + return nil + } + out := new(DatabaseInitSQL) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ExporterSpec) DeepCopyInto(out *ExporterSpec) { *out = *in if in.Configuration != nil { in, out := &in.Configuration, &out.Configuration - *out = make([]v1.VolumeProjection, len(*in)) + *out = make([]corev1.VolumeProjection, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CustomTLSSecret != nil { + in, out := &in.CustomTLSSecret, &out.CustomTLSSecret + *out = new(corev1.SecretProjection) + (*in).DeepCopyInto(*out) + } + in.Resources.DeepCopyInto(&out.Resources) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExporterSpec. +func (in *ExporterSpec) DeepCopy() *ExporterSpec { + if in == nil { + return nil + } + out := new(ExporterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceSidecars) DeepCopyInto(out *InstanceSidecars) { + *out = *in + if in.ReplicaCertCopy != nil { + in, out := &in.ReplicaCertCopy, &out.ReplicaCertCopy + *out = new(Sidecar) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceSidecars. +func (in *InstanceSidecars) DeepCopy() *InstanceSidecars { + if in == nil { + return nil + } + out := new(InstanceSidecars) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Metadata) DeepCopyInto(out *Metadata) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Metadata. +func (in *Metadata) DeepCopy() *Metadata { + if in == nil { + return nil + } + out := new(Metadata) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitoringSpec) DeepCopyInto(out *MonitoringSpec) { + *out = *in + if in.PGMonitor != nil { + in, out := &in.PGMonitor, &out.PGMonitor + *out = new(PGMonitorSpec) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringSpec. +func (in *MonitoringSpec) DeepCopy() *MonitoringSpec { + if in == nil { + return nil + } + out := new(MonitoringSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitoringStatus) DeepCopyInto(out *MonitoringStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringStatus. +func (in *MonitoringStatus) DeepCopy() *MonitoringStatus { + if in == nil { + return nil + } + out := new(MonitoringStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PGAdmin) DeepCopyInto(out *PGAdmin) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PGAdmin. +func (in *PGAdmin) DeepCopy() *PGAdmin { + if in == nil { + return nil + } + out := new(PGAdmin) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PGAdmin) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PGAdminConfiguration) DeepCopyInto(out *PGAdminConfiguration) { + *out = *in + if in.Files != nil { + in, out := &in.Files, &out.Files + *out = make([]corev1.VolumeProjection, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LDAPBindPassword != nil { + in, out := &in.LDAPBindPassword, &out.LDAPBindPassword + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + in.Settings.DeepCopyInto(&out.Settings) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PGAdminConfiguration. +func (in *PGAdminConfiguration) DeepCopy() *PGAdminConfiguration { + if in == nil { + return nil + } + out := new(PGAdminConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PGAdminList) DeepCopyInto(out *PGAdminList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PGAdmin, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PGAdminList. +func (in *PGAdminList) DeepCopy() *PGAdminList { + if in == nil { + return nil + } + out := new(PGAdminList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PGAdminList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PGAdminPodSpec) DeepCopyInto(out *PGAdminPodSpec) { + *out = *in + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = new(Metadata) + (*in).DeepCopyInto(*out) + } + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(corev1.Affinity) + (*in).DeepCopyInto(*out) + } + in.Config.DeepCopyInto(&out.Config) + in.DataVolumeClaimSpec.DeepCopyInto(&out.DataVolumeClaimSpec) + if in.PriorityClassName != nil { + in, out := &in.PriorityClassName, &out.PriorityClassName + *out = new(string) + **out = **in + } + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + in.Resources.DeepCopyInto(&out.Resources) + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(ServiceSpec) + (*in).DeepCopyInto(*out) + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TopologySpreadConstraints != nil { + in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints + *out = make([]corev1.TopologySpreadConstraint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PGAdminPodSpec. +func (in *PGAdminPodSpec) DeepCopy() *PGAdminPodSpec { + if in == nil { + return nil + } + out := new(PGAdminPodSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PGAdminPodStatus) DeepCopyInto(out *PGAdminPodStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PGAdminPodStatus. +func (in *PGAdminPodStatus) DeepCopy() *PGAdminPodStatus { + if in == nil { + return nil + } + out := new(PGAdminPodStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PGAdminSpec) DeepCopyInto(out *PGAdminSpec) { + *out = *in + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = new(Metadata) + (*in).DeepCopyInto(*out) + } + in.Config.DeepCopyInto(&out.Config) + in.DataVolumeClaimSpec.DeepCopyInto(&out.DataVolumeClaimSpec) + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(string) + **out = **in + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]corev1.LocalObjectReference, len(*in)) + copy(*out, *in) + } + in.Resources.DeepCopyInto(&out.Resources) + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(corev1.Affinity) + (*in).DeepCopyInto(*out) + } + if in.PriorityClassName != nil { + in, out := &in.PriorityClassName, &out.PriorityClassName + *out = new(string) + **out = **in + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } - in.Resources.DeepCopyInto(&out.Resources) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExporterSpec. -func (in *ExporterSpec) DeepCopy() *ExporterSpec { - if in == nil { - return nil - } - out := new(ExporterSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Metadata) DeepCopyInto(out *Metadata) { - *out = *in - if in.Labels != nil { - in, out := &in.Labels, &out.Labels - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val + if in.ServerGroups != nil { + in, out := &in.ServerGroups, &out.ServerGroups + *out = make([]ServerGroup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) } } - if in.Annotations != nil { - in, out := &in.Annotations, &out.Annotations - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val + if in.Users != nil { + in, out := &in.Users, &out.Users + *out = make([]PGAdminUser, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) } } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Metadata. -func (in *Metadata) DeepCopy() *Metadata { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PGAdminSpec. +func (in *PGAdminSpec) DeepCopy() *PGAdminSpec { if in == nil { return nil } - out := new(Metadata) + out := new(PGAdminSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MonitoringSpec) DeepCopyInto(out *MonitoringSpec) { +func (in *PGAdminStatus) DeepCopyInto(out *PGAdminStatus) { *out = *in - if in.PGMonitor != nil { - in, out := &in.PGMonitor, &out.PGMonitor - *out = new(PGMonitorSpec) - (*in).DeepCopyInto(*out) + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringSpec. -func (in *MonitoringSpec) DeepCopy() *MonitoringSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PGAdminStatus. +func (in *PGAdminStatus) DeepCopy() *PGAdminStatus { if in == nil { return nil } - out := new(MonitoringSpec) + out := new(PGAdminStatus) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MonitoringStatus) DeepCopyInto(out *MonitoringStatus) { +func (in *PGAdminUser) DeepCopyInto(out *PGAdminUser) { *out = *in + if in.PasswordRef != nil { + in, out := &in.PasswordRef, &out.PasswordRef + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringStatus. -func (in *MonitoringStatus) DeepCopy() *MonitoringStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PGAdminUser. +func (in *PGAdminUser) DeepCopy() *PGAdminUser { if in == nil { return nil } - out := new(MonitoringStatus) + out := new(PGAdminUser) in.DeepCopyInto(out) return out } @@ -158,7 +750,7 @@ func (in *PGBackRestArchive) DeepCopyInto(out *PGBackRestArchive) { } if in.Configuration != nil { in, out := &in.Configuration, &out.Configuration - *out = make([]v1.VolumeProjection, len(*in)) + *out = make([]corev1.VolumeProjection, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -170,6 +762,11 @@ func (in *PGBackRestArchive) DeepCopyInto(out *PGBackRestArchive) { (*out)[key] = val } } + if in.Jobs != nil { + in, out := &in.Jobs, &out.Jobs + *out = new(BackupJobs) + (*in).DeepCopyInto(*out) + } if in.Repos != nil { in, out := &in.Repos, &out.Repos *out = make([]PGBackRestRepo, len(*in)) @@ -192,6 +789,11 @@ func (in *PGBackRestArchive) DeepCopyInto(out *PGBackRestArchive) { *out = new(PGBackRestRestore) (*in).DeepCopyInto(*out) } + if in.Sidecars != nil { + in, out := &in.Sidecars, &out.Sidecars + *out = new(PGBackRestSidecars) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PGBackRestArchive. @@ -234,6 +836,59 @@ func (in *PGBackRestBackupSchedules) DeepCopy() *PGBackRestBackupSchedules { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PGBackRestDataSource) DeepCopyInto(out *PGBackRestDataSource) { + *out = *in + if in.Configuration != nil { + in, out := &in.Configuration, &out.Configuration + *out = make([]corev1.VolumeProjection, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Global != nil { + in, out := &in.Global, &out.Global + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.Repo.DeepCopyInto(&out.Repo) + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.Resources.DeepCopyInto(&out.Resources) + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(corev1.Affinity) + (*in).DeepCopyInto(*out) + } + if in.PriorityClassName != nil { + in, out := &in.PriorityClassName, &out.PriorityClassName + *out = new(string) + **out = **in + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PGBackRestDataSource. +func (in *PGBackRestDataSource) DeepCopy() *PGBackRestDataSource { + if in == nil { + return nil + } + out := new(PGBackRestDataSource) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PGBackRestJobStatus) DeepCopyInto(out *PGBackRestJobStatus) { *out = *in @@ -322,25 +977,37 @@ func (in *PGBackRestRepoHost) DeepCopyInto(out *PGBackRestRepoHost) { *out = *in if in.Affinity != nil { in, out := &in.Affinity, &out.Affinity - *out = new(v1.Affinity) + *out = new(corev1.Affinity) (*in).DeepCopyInto(*out) } + if in.PriorityClassName != nil { + in, out := &in.PriorityClassName, &out.PriorityClassName + *out = new(string) + **out = **in + } in.Resources.DeepCopyInto(&out.Resources) if in.Tolerations != nil { in, out := &in.Tolerations, &out.Tolerations - *out = make([]v1.Toleration, len(*in)) + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TopologySpreadConstraints != nil { + in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints + *out = make([]corev1.TopologySpreadConstraint, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.SSHConfiguration != nil { in, out := &in.SSHConfiguration, &out.SSHConfiguration - *out = new(v1.ConfigMapProjection) + *out = new(corev1.ConfigMapProjection) (*in).DeepCopyInto(*out) } if in.SSHSecret != nil { in, out := &in.SSHSecret, &out.SSHSecret - *out = new(v1.SecretProjection) + *out = new(corev1.SecretProjection) (*in).DeepCopyInto(*out) } } @@ -403,6 +1070,31 @@ func (in *PGBackRestScheduledBackupStatus) DeepCopy() *PGBackRestScheduledBackup return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PGBackRestSidecars) DeepCopyInto(out *PGBackRestSidecars) { + *out = *in + if in.PGBackRest != nil { + in, out := &in.PGBackRest, &out.PGBackRest + *out = new(Sidecar) + (*in).DeepCopyInto(*out) + } + if in.PGBackRestConfig != nil { + in, out := &in.PGBackRestConfig, &out.PGBackRestConfig + *out = new(Sidecar) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PGBackRestSidecars. +func (in *PGBackRestSidecars) DeepCopy() *PGBackRestSidecars { + if in == nil { + return nil + } + out := new(PGBackRestSidecars) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PGBackRestStatus) DeepCopyInto(out *PGBackRestStatus) { *out = *in @@ -450,7 +1142,7 @@ func (in *PGBouncerConfiguration) DeepCopyInto(out *PGBouncerConfiguration) { *out = *in if in.Files != nil { in, out := &in.Files, &out.Files - *out = make([]v1.VolumeProjection, len(*in)) + *out = make([]corev1.VolumeProjection, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -498,13 +1190,20 @@ func (in *PGBouncerPodSpec) DeepCopyInto(out *PGBouncerPodSpec) { } if in.Affinity != nil { in, out := &in.Affinity, &out.Affinity - *out = new(v1.Affinity) + *out = new(corev1.Affinity) (*in).DeepCopyInto(*out) } in.Config.DeepCopyInto(&out.Config) + if in.Containers != nil { + in, out := &in.Containers, &out.Containers + *out = make([]corev1.Container, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } if in.CustomTLSSecret != nil { in, out := &in.CustomTLSSecret, &out.CustomTLSSecret - *out = new(v1.SecretProjection) + *out = new(corev1.SecretProjection) (*in).DeepCopyInto(*out) } if in.Port != nil { @@ -512,20 +1211,42 @@ func (in *PGBouncerPodSpec) DeepCopyInto(out *PGBouncerPodSpec) { *out = new(int32) **out = **in } + if in.PriorityClassName != nil { + in, out := &in.PriorityClassName, &out.PriorityClassName + *out = new(string) + **out = **in + } if in.Replicas != nil { in, out := &in.Replicas, &out.Replicas *out = new(int32) **out = **in } + if in.MinAvailable != nil { + in, out := &in.MinAvailable, &out.MinAvailable + *out = new(intstr.IntOrString) + **out = **in + } in.Resources.DeepCopyInto(&out.Resources) if in.Service != nil { in, out := &in.Service, &out.Service *out = new(ServiceSpec) - **out = **in + (*in).DeepCopyInto(*out) + } + if in.Sidecars != nil { + in, out := &in.Sidecars, &out.Sidecars + *out = new(PGBouncerSidecars) + (*in).DeepCopyInto(*out) } if in.Tolerations != nil { in, out := &in.Tolerations, &out.Tolerations - *out = make([]v1.Toleration, len(*in)) + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TopologySpreadConstraints != nil { + in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints + *out = make([]corev1.TopologySpreadConstraint, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -557,6 +1278,26 @@ func (in *PGBouncerPodStatus) DeepCopy() *PGBouncerPodStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PGBouncerSidecars) DeepCopyInto(out *PGBouncerSidecars) { + *out = *in + if in.PGBouncerConfig != nil { + in, out := &in.PGBouncerConfig, &out.PGBouncerConfig + *out = new(Sidecar) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PGBouncerSidecars. +func (in *PGBouncerSidecars) DeepCopy() *PGBouncerSidecars { + if in == nil { + return nil + } + out := new(PGBouncerSidecars) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PGMonitorSpec) DeepCopyInto(out *PGMonitorSpec) { *out = *in @@ -567,12 +1308,141 @@ func (in *PGMonitorSpec) DeepCopyInto(out *PGMonitorSpec) { } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PGMonitorSpec. -func (in *PGMonitorSpec) DeepCopy() *PGMonitorSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PGMonitorSpec. +func (in *PGMonitorSpec) DeepCopy() *PGMonitorSpec { + if in == nil { + return nil + } + out := new(PGMonitorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PGUpgrade) DeepCopyInto(out *PGUpgrade) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PGUpgrade. +func (in *PGUpgrade) DeepCopy() *PGUpgrade { + if in == nil { + return nil + } + out := new(PGUpgrade) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PGUpgrade) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PGUpgradeList) DeepCopyInto(out *PGUpgradeList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PGUpgrade, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PGUpgradeList. +func (in *PGUpgradeList) DeepCopy() *PGUpgradeList { + if in == nil { + return nil + } + out := new(PGUpgradeList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PGUpgradeList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PGUpgradeSpec) DeepCopyInto(out *PGUpgradeSpec) { + *out = *in + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = new(Metadata) + (*in).DeepCopyInto(*out) + } + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(string) + **out = **in + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]corev1.LocalObjectReference, len(*in)) + copy(*out, *in) + } + in.Resources.DeepCopyInto(&out.Resources) + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(corev1.Affinity) + (*in).DeepCopyInto(*out) + } + if in.PriorityClassName != nil { + in, out := &in.PriorityClassName, &out.PriorityClassName + *out = new(string) + **out = **in + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PGUpgradeSpec. +func (in *PGUpgradeSpec) DeepCopy() *PGUpgradeSpec { + if in == nil { + return nil + } + out := new(PGUpgradeSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PGUpgradeStatus) DeepCopyInto(out *PGUpgradeStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PGUpgradeStatus. +func (in *PGUpgradeStatus) DeepCopy() *PGUpgradeStatus { if in == nil { return nil } - out := new(PGMonitorSpec) + out := new(PGUpgradeStatus) in.DeepCopyInto(out) return out } @@ -596,6 +1466,11 @@ func (in *PatroniSpec) DeepCopyInto(out *PatroniSpec) { *out = new(int32) **out = **in } + if in.Switchover != nil { + in, out := &in.Switchover, &out.Switchover + *out = new(PatroniSwitchover) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PatroniSpec. @@ -611,6 +1486,16 @@ func (in *PatroniSpec) DeepCopy() *PatroniSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PatroniStatus) DeepCopyInto(out *PatroniStatus) { *out = *in + if in.Switchover != nil { + in, out := &in.Switchover, &out.Switchover + *out = new(string) + **out = **in + } + if in.SwitchoverTimeline != nil { + in, out := &in.SwitchoverTimeline, &out.SwitchoverTimeline + *out = new(int64) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PatroniStatus. @@ -623,6 +1508,48 @@ func (in *PatroniStatus) DeepCopy() *PatroniStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PatroniSwitchover) DeepCopyInto(out *PatroniSwitchover) { + *out = *in + if in.TargetInstance != nil { + in, out := &in.TargetInstance, &out.TargetInstance + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PatroniSwitchover. +func (in *PatroniSwitchover) DeepCopy() *PatroniSwitchover { + if in == nil { + return nil + } + out := new(PatroniSwitchover) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresAdditionalConfig) DeepCopyInto(out *PostgresAdditionalConfig) { + *out = *in + if in.Files != nil { + in, out := &in.Files, &out.Files + *out = make([]corev1.VolumeProjection, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresAdditionalConfig. +func (in *PostgresAdditionalConfig) DeepCopy() *PostgresAdditionalConfig { + if in == nil { + return nil + } + out := new(PostgresAdditionalConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PostgresCluster) DeepCopyInto(out *PostgresCluster) { *out = *in @@ -661,12 +1588,17 @@ func (in *PostgresClusterDataSource) DeepCopyInto(out *PostgresClusterDataSource in.Resources.DeepCopyInto(&out.Resources) if in.Affinity != nil { in, out := &in.Affinity, &out.Affinity - *out = new(v1.Affinity) + *out = new(corev1.Affinity) (*in).DeepCopyInto(*out) } + if in.PriorityClassName != nil { + in, out := &in.PriorityClassName, &out.PriorityClassName + *out = new(string) + **out = **in + } if in.Tolerations != nil { in, out := &in.Tolerations, &out.Tolerations - *out = make([]v1.Toleration, len(*in)) + *out = make([]corev1.Toleration, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -731,17 +1663,27 @@ func (in *PostgresClusterSpec) DeepCopyInto(out *PostgresClusterSpec) { in.Backups.DeepCopyInto(&out.Backups) if in.CustomTLSSecret != nil { in, out := &in.CustomTLSSecret, &out.CustomTLSSecret - *out = new(v1.SecretProjection) + *out = new(corev1.SecretProjection) (*in).DeepCopyInto(*out) } if in.CustomReplicationClientTLSSecret != nil { in, out := &in.CustomReplicationClientTLSSecret, &out.CustomReplicationClientTLSSecret - *out = new(v1.SecretProjection) + *out = new(corev1.SecretProjection) (*in).DeepCopyInto(*out) } + if in.DatabaseInitSQL != nil { + in, out := &in.DatabaseInitSQL, &out.DatabaseInitSQL + *out = new(DatabaseInitSQL) + **out = **in + } + if in.DisableDefaultPodScheduling != nil { + in, out := &in.DisableDefaultPodScheduling, &out.DisableDefaultPodScheduling + *out = new(bool) + **out = **in + } if in.ImagePullSecrets != nil { in, out := &in.ImagePullSecrets, &out.ImagePullSecrets - *out = make([]v1.LocalObjectReference, len(*in)) + *out = make([]corev1.LocalObjectReference, len(*in)) copy(*out, *in) } if in.InstanceSets != nil { @@ -761,6 +1703,11 @@ func (in *PostgresClusterSpec) DeepCopyInto(out *PostgresClusterSpec) { *out = new(PatroniSpec) (*in).DeepCopyInto(*out) } + if in.Paused != nil { + in, out := &in.Paused, &out.Paused + *out = new(bool) + **out = **in + } if in.Port != nil { in, out := &in.Port, &out.Port *out = new(int32) @@ -771,6 +1718,11 @@ func (in *PostgresClusterSpec) DeepCopyInto(out *PostgresClusterSpec) { *out = new(PostgresProxySpec) (*in).DeepCopyInto(*out) } + if in.UserInterface != nil { + in, out := &in.UserInterface, &out.UserInterface + *out = new(UserInterfaceSpec) + (*in).DeepCopyInto(*out) + } if in.Monitoring != nil { in, out := &in.Monitoring, &out.Monitoring *out = new(MonitoringSpec) @@ -779,7 +1731,12 @@ func (in *PostgresClusterSpec) DeepCopyInto(out *PostgresClusterSpec) { if in.Service != nil { in, out := &in.Service, &out.Service *out = new(ServiceSpec) - **out = **in + (*in).DeepCopyInto(*out) + } + if in.ReplicaService != nil { + in, out := &in.ReplicaService, &out.ReplicaService + *out = new(ServiceSpec) + (*in).DeepCopyInto(*out) } if in.Shutdown != nil { in, out := &in.Shutdown, &out.Shutdown @@ -789,7 +1746,7 @@ func (in *PostgresClusterSpec) DeepCopyInto(out *PostgresClusterSpec) { if in.Standby != nil { in, out := &in.Standby, &out.Standby *out = new(PostgresStandbySpec) - **out = **in + (*in).DeepCopyInto(*out) } if in.SupplementalGroups != nil { in, out := &in.SupplementalGroups, &out.SupplementalGroups @@ -803,6 +1760,7 @@ func (in *PostgresClusterSpec) DeepCopyInto(out *PostgresClusterSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + in.Config.DeepCopyInto(&out.Config) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresClusterSpec. @@ -821,23 +1779,36 @@ func (in *PostgresClusterStatus) DeepCopyInto(out *PostgresClusterStatus) { if in.InstanceSets != nil { in, out := &in.InstanceSets, &out.InstanceSets *out = make([]PostgresInstanceSetStatus, len(*in)) - copy(*out, *in) - } - if in.Patroni != nil { - in, out := &in.Patroni, &out.Patroni - *out = new(PatroniStatus) - **out = **in + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } + in.Patroni.DeepCopyInto(&out.Patroni) if in.PGBackRest != nil { in, out := &in.PGBackRest, &out.PGBackRest *out = new(PGBackRestStatus) (*in).DeepCopyInto(*out) } + if in.RegistrationRequired != nil { + in, out := &in.RegistrationRequired, &out.RegistrationRequired + *out = new(RegistrationRequirementStatus) + **out = **in + } out.Proxy = in.Proxy + if in.UserInterface != nil { + in, out := &in.UserInterface, &out.UserInterface + *out = new(PostgresUserInterfaceStatus) + **out = **in + } out.Monitoring = in.Monitoring + if in.DatabaseInitSQL != nil { + in, out := &in.DatabaseInitSQL, &out.DatabaseInitSQL + *out = new(string) + **out = **in + } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make([]metav1.Condition, len(*in)) + *out = make([]v1.Condition, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -864,28 +1835,64 @@ func (in *PostgresInstanceSetSpec) DeepCopyInto(out *PostgresInstanceSetSpec) { } if in.Affinity != nil { in, out := &in.Affinity, &out.Affinity - *out = new(v1.Affinity) + *out = new(corev1.Affinity) (*in).DeepCopyInto(*out) } + if in.Containers != nil { + in, out := &in.Containers, &out.Containers + *out = make([]corev1.Container, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } in.DataVolumeClaimSpec.DeepCopyInto(&out.DataVolumeClaimSpec) + if in.PriorityClassName != nil { + in, out := &in.PriorityClassName, &out.PriorityClassName + *out = new(string) + **out = **in + } if in.Replicas != nil { in, out := &in.Replicas, &out.Replicas *out = new(int32) **out = **in } + if in.MinAvailable != nil { + in, out := &in.MinAvailable, &out.MinAvailable + *out = new(intstr.IntOrString) + **out = **in + } in.Resources.DeepCopyInto(&out.Resources) + if in.Sidecars != nil { + in, out := &in.Sidecars, &out.Sidecars + *out = new(InstanceSidecars) + (*in).DeepCopyInto(*out) + } if in.Tolerations != nil { in, out := &in.Tolerations, &out.Tolerations - *out = make([]v1.Toleration, len(*in)) + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TopologySpreadConstraints != nil { + in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints + *out = make([]corev1.TopologySpreadConstraint, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.WALVolumeClaimSpec != nil { in, out := &in.WALVolumeClaimSpec, &out.WALVolumeClaimSpec - *out = new(v1.PersistentVolumeClaimSpec) + *out = new(corev1.PersistentVolumeClaimSpec) (*in).DeepCopyInto(*out) } + if in.TablespaceVolumes != nil { + in, out := &in.TablespaceVolumes, &out.TablespaceVolumes + *out = make([]TablespaceVolume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresInstanceSetSpec. @@ -901,6 +1908,13 @@ func (in *PostgresInstanceSetSpec) DeepCopy() *PostgresInstanceSetSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PostgresInstanceSetStatus) DeepCopyInto(out *PostgresInstanceSetStatus) { *out = *in + if in.DesiredPGDataVolume != nil { + in, out := &in.DesiredPGDataVolume, &out.DesiredPGDataVolume + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresInstanceSetStatus. @@ -913,6 +1927,21 @@ func (in *PostgresInstanceSetStatus) DeepCopy() *PostgresInstanceSetStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresPasswordSpec) DeepCopyInto(out *PostgresPasswordSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresPasswordSpec. +func (in *PostgresPasswordSpec) DeepCopy() *PostgresPasswordSpec { + if in == nil { + return nil + } + out := new(PostgresPasswordSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PostgresProxySpec) DeepCopyInto(out *PostgresProxySpec) { *out = *in @@ -952,6 +1981,11 @@ func (in *PostgresProxyStatus) DeepCopy() *PostgresProxyStatus { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PostgresStandbySpec) DeepCopyInto(out *PostgresStandbySpec) { *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int32) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresStandbySpec. @@ -964,6 +1998,22 @@ func (in *PostgresStandbySpec) DeepCopy() *PostgresStandbySpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresUserInterfaceStatus) DeepCopyInto(out *PostgresUserInterfaceStatus) { + *out = *in + out.PGAdmin = in.PGAdmin +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresUserInterfaceStatus. +func (in *PostgresUserInterfaceStatus) DeepCopy() *PostgresUserInterfaceStatus { + if in == nil { + return nil + } + out := new(PostgresUserInterfaceStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PostgresUserSpec) DeepCopyInto(out *PostgresUserSpec) { *out = *in @@ -972,6 +2022,11 @@ func (in *PostgresUserSpec) DeepCopyInto(out *PostgresUserSpec) { *out = make([]PostgresIdentifier, len(*in)) copy(*out, *in) } + if in.Password != nil { + in, out := &in.Password, &out.Password + *out = new(PostgresPasswordSpec) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresUserSpec. @@ -984,6 +2039,21 @@ func (in *PostgresUserSpec) DeepCopy() *PostgresUserSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegistrationRequirementStatus) DeepCopyInto(out *RegistrationRequirementStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistrationRequirementStatus. +func (in *RegistrationRequirementStatus) DeepCopy() *RegistrationRequirementStatus { + if in == nil { + return nil + } + out := new(RegistrationRequirementStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RepoAzure) DeepCopyInto(out *RepoAzure) { *out = *in @@ -1076,9 +2146,54 @@ func (in *RepoStatus) DeepCopy() *RepoStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in SchemalessObject) DeepCopyInto(out *SchemalessObject) { + { + in := &in + clone := in.DeepCopy() + *out = *clone + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerGroup) DeepCopyInto(out *ServerGroup) { + *out = *in + in.PostgresClusterSelector.DeepCopyInto(&out.PostgresClusterSelector) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerGroup. +func (in *ServerGroup) DeepCopy() *ServerGroup { + if in == nil { + return nil + } + out := new(ServerGroup) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) { *out = *in + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = new(Metadata) + (*in).DeepCopyInto(*out) + } + if in.NodePort != nil { + in, out := &in.NodePort, &out.NodePort + *out = new(int32) + **out = **in + } + if in.InternalTrafficPolicy != nil { + in, out := &in.InternalTrafficPolicy, &out.InternalTrafficPolicy + *out = new(corev1.ServiceInternalTrafficPolicy) + **out = **in + } + if in.ExternalTrafficPolicy != nil { + in, out := &in.ExternalTrafficPolicy, &out.ExternalTrafficPolicy + *out = new(corev1.ServiceExternalTrafficPolicy) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceSpec. @@ -1090,3 +2205,123 @@ func (in *ServiceSpec) DeepCopy() *ServiceSpec { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Sidecar) DeepCopyInto(out *Sidecar) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(corev1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Sidecar. +func (in *Sidecar) DeepCopy() *Sidecar { + if in == nil { + return nil + } + out := new(Sidecar) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StandalonePGAdminConfiguration) DeepCopyInto(out *StandalonePGAdminConfiguration) { + *out = *in + if in.Files != nil { + in, out := &in.Files, &out.Files + *out = make([]corev1.VolumeProjection, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ConfigDatabaseURI != nil { + in, out := &in.ConfigDatabaseURI, &out.ConfigDatabaseURI + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + in.Gunicorn.DeepCopyInto(&out.Gunicorn) + if in.LDAPBindPassword != nil { + in, out := &in.LDAPBindPassword, &out.LDAPBindPassword + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + in.Settings.DeepCopyInto(&out.Settings) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StandalonePGAdminConfiguration. +func (in *StandalonePGAdminConfiguration) DeepCopy() *StandalonePGAdminConfiguration { + if in == nil { + return nil + } + out := new(StandalonePGAdminConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TablespaceVolume) DeepCopyInto(out *TablespaceVolume) { + *out = *in + in.DataVolumeClaimSpec.DeepCopyInto(&out.DataVolumeClaimSpec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TablespaceVolume. +func (in *TablespaceVolume) DeepCopy() *TablespaceVolume { + if in == nil { + return nil + } + out := new(TablespaceVolume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpgradeOperation) DeepCopyInto(out *UpgradeOperation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpgradeOperation. +func (in *UpgradeOperation) DeepCopy() *UpgradeOperation { + if in == nil { + return nil + } + out := new(UpgradeOperation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserInterfaceSpec) DeepCopyInto(out *UserInterfaceSpec) { + *out = *in + if in.PGAdmin != nil { + in, out := &in.PGAdmin, &out.PGAdmin + *out = new(PGAdminPodSpec) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserInterfaceSpec. +func (in *UserInterfaceSpec) DeepCopy() *UserInterfaceSpec { + if in == nil { + return nil + } + out := new(UserInterfaceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeSnapshots) DeepCopyInto(out *VolumeSnapshots) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSnapshots. +func (in *VolumeSnapshots) DeepCopy() *VolumeSnapshots { + if in == nil { + return nil + } + out := new(VolumeSnapshots) + in.DeepCopyInto(out) + return out +} diff --git a/redhat/atomic/help.1 b/redhat/atomic/help.1 deleted file mode 100644 index bc21518dd8..0000000000 --- a/redhat/atomic/help.1 +++ /dev/null @@ -1,59 +0,0 @@ -.TH "postgres-operator " "1" " Container Image Pages" "Crunchy Data" "December 23, 2019" -.nh -.ad l - - -.SH NAME -.PP -postgres-operator \- Trusted open-source PostgreSQL-as-a-Service - - -.SH DESCRIPTION -.PP -The Crunchy PostgreSQL Operator automates and simplifies deploying and managing open source PostgreSQL clusters on Kubernetes and other Kubernetes-enabled platforms by providing the essential features you need to keep your PostgreSQL clusters up and running, including: - \- PostgreSQL Cluster Provisioning - \- High-Availability - \- Disaster Recovery - \- Monitoring - \- PostgreSQL User Management - \- Upgrade Management - \- Advanced Replication Support - \- Clone - \- Connection Pooling - \- Node Affinity - \- Scheduled Backups - \- Multi-Namespace Support - -.PP -and more. - - -.SH USAGE -.PP -For more information on the PostgreSQL Operator, see the official PostgreSQL Operator Documentation: https://access.crunchydata.com/documentation/postgres-operator/ - - -.SH LABELS -.PP -The starter container includes the following LABEL settings: - -.PP -That atomic command runs the Docker command set in this label: - -.PP -\fB\fCName=\fR - -.PP -The registry location and name of the image. For example, Name="registry.developers.crunchydata.com/crunchydata/postgres-operator". - -.PP -\fB\fCVersion=\fR - -.PP -The Red Hat Enterprise Linux version from which the container was built. For example, Version="7.7" - -.PP -\fB\fCRelease=\fR - -.PP -The specific release number of the container. For example, Release="4.5.0" diff --git a/redhat/atomic/help.md b/redhat/atomic/help.md deleted file mode 100644 index 8950e24d47..0000000000 --- a/redhat/atomic/help.md +++ /dev/null @@ -1,48 +0,0 @@ -= postgres-operator (1) -Crunchy Data -December 23, 2019 - -== NAME -postgres-operator - Trusted open-source PostgreSQL-as-a-Service - -== DESCRIPTION -The Crunchy PostgreSQL Operator automates and simplifies deploying and managing -open source PostgreSQL clusters on Kubernetes and other Kubernetes-enabled -platforms by providing the essential features you need to keep your PostgreSQL -clusters up and running, including: - -- PostgreSQL Cluster Provisioning -- High-Availability -- Disaster Recovery -- Monitoring -- PostgreSQL User Management -- Upgrade Management -- Advanced Replication Support -- Clone -- Connection Pooling -- Node Affinity -- Scheduled Backups -- Multi-Namespace Support - -and more. - -== USAGE -For more information on the PostgreSQL Operator, see the official -[PostgreSQL Operator Documentation](https://access.crunchydata.com/documentation/postgres-operator/) - -== LABELS -The starter container includes the following LABEL settings: - -That atomic command runs the Docker command set in this label: - -`Name=` - -The registry location and name of the image. For example, Name="registry.developers.crunchydata.com/crunchydata/postgres-operator". - -`Version=` - -The Red Hat Enterprise Linux version from which the container was built. For example, Version="7.7" - -`Release=` - -The specific release number of the container. For example, Release="4.5.0" diff --git a/testing/kuttl/README.md b/testing/kuttl/README.md new file mode 100644 index 0000000000..555ce9a26d --- /dev/null +++ b/testing/kuttl/README.md @@ -0,0 +1,92 @@ +# KUTTL + +## Installing + +Docs for install: https://kuttl.dev/docs/cli.html#setup-the-kuttl-kubectl-plugin + +Options: + - Download and install the binary + - Install the `kubectl krew` [plugin manager](https://github.com/kubernetes-sigs/krew) + and `kubectl krew install kuttl` + +## Cheat sheet + +### Suppressing Noisy Logs + +KUTTL gives you the option to suppress events from the test logging output. To enable this feature +update the `kuttl` parameter when calling the `make` target + +``` +KUTTL_TEST='kuttl test --suppress-log=events' make check-kuttl +``` + +To suppress the events permanently, you can add the following to the KUTTL config (kuttl-test.yaml) +``` +suppress: +- events +``` + +### Run test suite + +Make sure that the operator is running in your Kubernetes environment and that your `kubeconfig` is +set up. Then run the make targets: + +``` +make generate-kuttl check-kuttl +``` + +### Running a single test + +A single test is considered to be one directory under `kuttl/e2e-generated`, for example +`kuttl/e2e-generated/restore` is the `restore` test. + +There are two ways to run a single test in isolation: +- using an env var with the make target: `KUTTL_TEST='kuttl test --test ' make check-kuttl` +- using `kubectl kuttl --test` flag: `kubectl kuttl test testing/kuttl/e2e-generated --test ` + +### Writing additional tests + +To make it easier to read tests, we want to put our `assert.yaml`/`errors.yaml` files after the +files that create/update the objects for a step. To achieve this, infix an extra `-` between the +step number and the object/step name. + +For example, if the `00` test step wants to create a cluster and then assert that the cluster is ready, +the files would be named + +```yaml +00--cluster.yaml # note the extra `-` to ensure that it sorts above the following file +00-assert.yaml +``` + +### Generating tests + +KUTTL is good at setting up K8s objects for testing, but does not have a native way to dynamically +change those K8s objects before applying them. That means that, if we wanted to write a cluster +connection test for PG 13 and PG 14, we would end up writing two nearly identical tests. + +Rather than write those multiple tests, we are using `envsubst` to replace some common variables +and writing those files to the `testing/kuttl/e2e-generated*` directories. + +These templated test files can be generated by setting some variables in the command line and +calling the `make generate-kuttl` target: + +```console +KUTTL_PG_VERSION=13 KUTTL_POSTGIS_VERSION=3.0 make generate-kuttl +``` + +This will loop through the files under the `e2e` and `e2e-other` directories and create matching +files under the `e2e-generated` and `e2e-generated-other` directories that can be checked for +correctness before running the tests. + +Please note, `make check-kuttl` does not run the `e2e-other` tests. To run the `postgis-cluster` +test, you can use: + +``` +kubectl kuttl test testing/kuttl/e2e-generated-other/ --timeout=180 --test postgis-cluster` +``` + +To run the `gssapi` test, please see testing/kuttl/e2e-other/gssapi/README.md. + +To prevent errors, we want to set defaults for all the environment variables used in the source +YAML files; so if you add a new test with a new variable, please update the Makefile with a +reasonable/preferred default. diff --git a/testing/kuttl/e2e-other/autogrow-volume/00-assert.yaml b/testing/kuttl/e2e-other/autogrow-volume/00-assert.yaml new file mode 100644 index 0000000000..b4372b75e7 --- /dev/null +++ b/testing/kuttl/e2e-other/autogrow-volume/00-assert.yaml @@ -0,0 +1,7 @@ +# Ensure that the default StorageClass supports VolumeExpansion +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + annotations: + storageclass.kubernetes.io/is-default-class: "true" +allowVolumeExpansion: true diff --git a/testing/kuttl/e2e-other/autogrow-volume/01-create.yaml b/testing/kuttl/e2e-other/autogrow-volume/01-create.yaml new file mode 100644 index 0000000000..fc947a538f --- /dev/null +++ b/testing/kuttl/e2e-other/autogrow-volume/01-create.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/01-create-cluster.yaml +assert: +- files/01-cluster-and-pvc-created.yaml diff --git a/testing/kuttl/e2e-other/autogrow-volume/02-add-data.yaml b/testing/kuttl/e2e-other/autogrow-volume/02-add-data.yaml new file mode 100644 index 0000000000..261c274a51 --- /dev/null +++ b/testing/kuttl/e2e-other/autogrow-volume/02-add-data.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/02-create-data.yaml +assert: +- files/02-create-data-completed.yaml diff --git a/testing/kuttl/e2e-other/autogrow-volume/03-assert.yaml b/testing/kuttl/e2e-other/autogrow-volume/03-assert.yaml new file mode 100644 index 0000000000..ad31b61401 --- /dev/null +++ b/testing/kuttl/e2e-other/autogrow-volume/03-assert.yaml @@ -0,0 +1,12 @@ +--- +# Check that annotation is set +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: auto-grow-volume + postgres-operator.crunchydata.com/data: postgres + postgres-operator.crunchydata.com/instance-set: instance1 + postgres-operator.crunchydata.com/patroni: auto-grow-volume-ha + annotations: + suggested-pgdata-pvc-size: 1461Mi diff --git a/testing/kuttl/e2e-other/autogrow-volume/04-assert.yaml b/testing/kuttl/e2e-other/autogrow-volume/04-assert.yaml new file mode 100644 index 0000000000..d486f9de18 --- /dev/null +++ b/testing/kuttl/e2e-other/autogrow-volume/04-assert.yaml @@ -0,0 +1,19 @@ +# We know that the PVC sizes have changed so now we can check that they have been +# updated to have the expected size +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + postgres-operator.crunchydata.com/cluster: auto-grow-volume + postgres-operator.crunchydata.com/instance-set: instance1 +spec: + resources: + requests: + storage: 1461Mi +status: + accessModes: + - ReadWriteOnce + capacity: + storage: 2Gi + phase: Bound diff --git a/testing/kuttl/e2e-other/autogrow-volume/05-check-event.yaml b/testing/kuttl/e2e-other/autogrow-volume/05-check-event.yaml new file mode 100644 index 0000000000..475177d242 --- /dev/null +++ b/testing/kuttl/e2e-other/autogrow-volume/05-check-event.yaml @@ -0,0 +1,12 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + # Verify expected event has occurred + - script: | + EVENT=$( + kubectl get events --namespace="${NAMESPACE}" \ + --field-selector reason="VolumeAutoGrow" --output=jsonpath={.items..message} + ) + + if [[ "${EVENT}" != "pgData volume expansion to 1461Mi requested for auto-grow-volume/instance1." ]]; then exit 1; fi diff --git a/testing/kuttl/e2e-other/autogrow-volume/README.md b/testing/kuttl/e2e-other/autogrow-volume/README.md new file mode 100644 index 0000000000..674bc69b40 --- /dev/null +++ b/testing/kuttl/e2e-other/autogrow-volume/README.md @@ -0,0 +1,9 @@ +### AutoGrow Volume + +* 00: Assert the storage class allows volume expansion +* 01: Create and verify PostgresCluster and PVC +* 02: Add data to trigger growth and verify Job completes +* 03: Verify annotation on the instance Pod +* 04: Verify the PVC request has been set and the PVC has grown +* 05: Verify the expansion request Event has been created + Note: This Event should be created between steps 03 and 04 but is checked at the end for timing purposes. diff --git a/testing/kuttl/e2e-other/autogrow-volume/files/01-cluster-and-pvc-created.yaml b/testing/kuttl/e2e-other/autogrow-volume/files/01-cluster-and-pvc-created.yaml new file mode 100644 index 0000000000..17804b8205 --- /dev/null +++ b/testing/kuttl/e2e-other/autogrow-volume/files/01-cluster-and-pvc-created.yaml @@ -0,0 +1,27 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: auto-grow-volume +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + postgres-operator.crunchydata.com/cluster: auto-grow-volume + postgres-operator.crunchydata.com/instance-set: instance1 +spec: + resources: + requests: + storage: 1Gi +status: + accessModes: + - ReadWriteOnce + capacity: + storage: 1Gi + phase: Bound diff --git a/testing/kuttl/e2e-other/autogrow-volume/files/01-create-cluster.yaml b/testing/kuttl/e2e-other/autogrow-volume/files/01-create-cluster.yaml new file mode 100644 index 0000000000..01eaf7a684 --- /dev/null +++ b/testing/kuttl/e2e-other/autogrow-volume/files/01-create-cluster.yaml @@ -0,0 +1,27 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: auto-grow-volume +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + limits: + storage: 2Gi + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi diff --git a/testing/kuttl/e2e-other/autogrow-volume/files/02-create-data-completed.yaml b/testing/kuttl/e2e-other/autogrow-volume/files/02-create-data-completed.yaml new file mode 100644 index 0000000000..fdb42e68f5 --- /dev/null +++ b/testing/kuttl/e2e-other/autogrow-volume/files/02-create-data-completed.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: create-data +status: + succeeded: 1 diff --git a/testing/kuttl/e2e-other/autogrow-volume/files/02-create-data.yaml b/testing/kuttl/e2e-other/autogrow-volume/files/02-create-data.yaml new file mode 100644 index 0000000000..c42f0dec10 --- /dev/null +++ b/testing/kuttl/e2e-other/autogrow-volume/files/02-create-data.yaml @@ -0,0 +1,32 @@ +--- +# Create some data that should be present after resizing. +apiVersion: batch/v1 +kind: Job +metadata: + name: create-data + labels: { postgres-operator-test: kuttl } +spec: + backoffLimit: 3 + template: + metadata: + labels: { postgres-operator-test: kuttl } + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + env: + - name: PGURI + valueFrom: { secretKeyRef: { name: auto-grow-volume-pguser-auto-grow-volume, key: uri } } + + # Do not wait indefinitely, but leave enough time to create the data. + - { name: PGCONNECT_TIMEOUT, value: '60' } + + command: + - psql + - $(PGURI) + - --set=ON_ERROR_STOP=1 + - --command + - | # create schema for user and add enough data to get over 75% usage + CREATE SCHEMA "auto-grow-volume" AUTHORIZATION "auto-grow-volume"; + CREATE TABLE big_table AS SELECT 'data' || s AS mydata FROM generate_series(1,6000000) AS s; diff --git a/testing/kuttl/e2e-other/cluster-migrate/01--non-crunchy-cluster.yaml b/testing/kuttl/e2e-other/cluster-migrate/01--non-crunchy-cluster.yaml new file mode 100644 index 0000000000..1ccceb7098 --- /dev/null +++ b/testing/kuttl/e2e-other/cluster-migrate/01--non-crunchy-cluster.yaml @@ -0,0 +1,193 @@ +apiVersion: v1 +kind: Secret +metadata: + name: non-crunchy-cluster + labels: + postgres-operator-test: kuttl + app.kubernetes.io/name: postgresql + app.kubernetes.io/instance: non-crunchy-cluster +type: Opaque +stringData: + postgres-password: "SR6kNAFXvX" +--- +apiVersion: v1 +kind: Service +metadata: + name: non-crunchy-cluster-hl + labels: + postgres-operator-test: kuttl + app.kubernetes.io/name: postgresql + app.kubernetes.io/instance: non-crunchy-cluster + app.kubernetes.io/component: primary + service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + - name: tcp-postgresql + port: 5432 + targetPort: tcp-postgresql + selector: + app.kubernetes.io/name: postgresql + app.kubernetes.io/instance: non-crunchy-cluster + app.kubernetes.io/component: primary +--- +apiVersion: v1 +kind: Service +metadata: + name: non-crunchy-cluster + labels: + postgres-operator-test: kuttl + app.kubernetes.io/name: postgresql + app.kubernetes.io/instance: non-crunchy-cluster + app.kubernetes.io/component: primary +spec: + type: ClusterIP + sessionAffinity: None + ports: + - name: tcp-postgresql + port: 5432 + targetPort: tcp-postgresql + nodePort: null + selector: + app.kubernetes.io/name: postgresql + app.kubernetes.io/instance: non-crunchy-cluster + app.kubernetes.io/component: primary +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: non-crunchy-cluster + labels: + postgres-operator-test: kuttl + app.kubernetes.io/name: postgresql + app.kubernetes.io/instance: non-crunchy-cluster + app.kubernetes.io/component: primary +spec: + replicas: 1 + serviceName: non-crunchy-cluster-hl + updateStrategy: + rollingUpdate: {} + type: RollingUpdate + selector: + matchLabels: + postgres-operator-test: kuttl + app.kubernetes.io/name: postgresql + app.kubernetes.io/instance: non-crunchy-cluster + app.kubernetes.io/component: primary + template: + metadata: + name: non-crunchy-cluster + labels: + postgres-operator-test: kuttl + app.kubernetes.io/name: postgresql + app.kubernetes.io/instance: non-crunchy-cluster + app.kubernetes.io/component: primary + spec: + serviceAccountName: default + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: + postgres-operator-test: kuttl + app.kubernetes.io/name: postgresql + app.kubernetes.io/instance: non-crunchy-cluster + app.kubernetes.io/component: primary + namespaces: + - "default" + topologyKey: kubernetes.io/hostname + weight: 1 + securityContext: + fsGroup: 1001 + hostNetwork: false + hostIPC: false + containers: + - name: postgresql + image: docker.io/bitnami/postgresql:${KUTTL_BITNAMI_IMAGE_TAG} + imagePullPolicy: "IfNotPresent" + securityContext: + runAsUser: 1001 + env: + - name: BITNAMI_DEBUG + value: "false" + - name: POSTGRESQL_PORT_NUMBER + value: "5432" + - name: POSTGRESQL_VOLUME_DIR + value: "/bitnami/postgresql" + - name: PGDATA + value: "/bitnami/postgresql/data" + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: non-crunchy-cluster + key: postgres-password + - name: POSTGRESQL_ENABLE_LDAP + value: "no" + - name: POSTGRESQL_ENABLE_TLS + value: "no" + - name: POSTGRESQL_LOG_HOSTNAME + value: "false" + - name: POSTGRESQL_LOG_CONNECTIONS + value: "false" + - name: POSTGRESQL_LOG_DISCONNECTIONS + value: "false" + - name: POSTGRESQL_PGAUDIT_LOG_CATALOG + value: "off" + - name: POSTGRESQL_CLIENT_MIN_MESSAGES + value: "error" + - name: POSTGRESQL_SHARED_PRELOAD_LIBRARIES + value: "pgaudit" + ports: + - name: tcp-postgresql + containerPort: 5432 + livenessProbe: + failureThreshold: 6 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + exec: + command: + - /bin/sh + - -c + - exec pg_isready -U "postgres" -h localhost -p 5432 + readinessProbe: + failureThreshold: 6 + initialDelaySeconds: 5 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + exec: + command: + - /bin/sh + - -c + - -e + - | + exec pg_isready -U "postgres" -h localhost -p 5432 + [ -f /opt/bitnami/postgresql/tmp/.initialized ] || [ -f /bitnami/postgresql/.initialized ] + resources: + limits: {} + requests: + cpu: 250m + memory: 256Mi + volumeMounts: + - name: dshm + mountPath: /dev/shm + - name: data + mountPath: /bitnami/postgresql + volumes: + - name: dshm + emptyDir: + medium: Memory + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "1Gi" diff --git a/testing/kuttl/e2e-other/cluster-migrate/01-assert.yaml b/testing/kuttl/e2e-other/cluster-migrate/01-assert.yaml new file mode 100644 index 0000000000..c45fe79261 --- /dev/null +++ b/testing/kuttl/e2e-other/cluster-migrate/01-assert.yaml @@ -0,0 +1,8 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: non-crunchy-cluster +status: + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 diff --git a/testing/kuttl/e2e-other/cluster-migrate/02--create-data.yaml b/testing/kuttl/e2e-other/cluster-migrate/02--create-data.yaml new file mode 100644 index 0000000000..a9b7ebf152 --- /dev/null +++ b/testing/kuttl/e2e-other/cluster-migrate/02--create-data.yaml @@ -0,0 +1,30 @@ +--- +# Create some data that will be preserved after migration. +apiVersion: batch/v1 +kind: Job +metadata: + name: original-data + labels: { postgres-operator-test: kuttl } +spec: + backoffLimit: 3 + template: + metadata: + labels: { postgres-operator-test: kuttl } + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + env: + - { name: PGHOST, value: "non-crunchy-cluster" } + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } + - { name: PGPASSWORD, valueFrom: { secretKeyRef: { name: non-crunchy-cluster, key: postgres-password } } } + command: + - psql + - --username=postgres + - --dbname=postgres + - --set=ON_ERROR_STOP=1 + - --command + - | + CREATE TABLE IF NOT EXISTS important (data) AS VALUES ('treasure'); diff --git a/testing/kuttl/e2e-other/cluster-migrate/02-assert.yaml b/testing/kuttl/e2e-other/cluster-migrate/02-assert.yaml new file mode 100644 index 0000000000..5115ba97c9 --- /dev/null +++ b/testing/kuttl/e2e-other/cluster-migrate/02-assert.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: original-data +status: + succeeded: 1 diff --git a/testing/kuttl/e2e-other/cluster-migrate/03--alter-pv.yaml b/testing/kuttl/e2e-other/cluster-migrate/03--alter-pv.yaml new file mode 100644 index 0000000000..64fa700297 --- /dev/null +++ b/testing/kuttl/e2e-other/cluster-migrate/03--alter-pv.yaml @@ -0,0 +1,23 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - script: | + set -e + VOLUME_NAME=$( + kubectl get pvc --namespace "${NAMESPACE}" \ + --output=jsonpath={.items..spec.volumeName} + ) + + ORIGINAL_POLICY=$( + kubectl get pv "${VOLUME_NAME}" \ + --output=jsonpath={.spec.persistentVolumeReclaimPolicy} + ) + + kubectl create configmap persistent-volume-reclaim-policy --namespace "${NAMESPACE}" \ + --from-literal=ORIGINAL_POLICY="${ORIGINAL_POLICY}" \ + --from-literal=VOLUME_NAME="${VOLUME_NAME}" + + kubectl patch pv "${VOLUME_NAME}" -p '{"spec":{"persistentVolumeReclaimPolicy":"Retain"}}' + + kubectl label pv "${VOLUME_NAME}" postgres-operator-test=kuttl app.kubernetes.io/name=postgresql app.kubernetes.io/instance=non-crunchy-cluster test-namespace="${NAMESPACE}" diff --git a/testing/kuttl/e2e-other/cluster-migrate/04--delete.yaml b/testing/kuttl/e2e-other/cluster-migrate/04--delete.yaml new file mode 100644 index 0000000000..ed38b23d9f --- /dev/null +++ b/testing/kuttl/e2e-other/cluster-migrate/04--delete.yaml @@ -0,0 +1,15 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +delete: +- apiVersion: apps/v1 + kind: StatefulSet + name: non-crunchy-cluster +- apiVersion: v1 + kind: Service + name: non-crunchy-cluster +- apiVersion: v1 + kind: Service + name: non-crunchy-cluster-hl +- apiVersion: v1 + kind: Secret + name: non-crunchy-cluster diff --git a/testing/kuttl/e2e-other/cluster-migrate/04-errors.yaml b/testing/kuttl/e2e-other/cluster-migrate/04-errors.yaml new file mode 100644 index 0000000000..1767e8040f --- /dev/null +++ b/testing/kuttl/e2e-other/cluster-migrate/04-errors.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Pod +metadata: + name: non-crunchy-cluster-0 diff --git a/testing/kuttl/e2e-other/cluster-migrate/05--cluster.yaml b/testing/kuttl/e2e-other/cluster-migrate/05--cluster.yaml new file mode 100644 index 0000000000..a81666ed01 --- /dev/null +++ b/testing/kuttl/e2e-other/cluster-migrate/05--cluster.yaml @@ -0,0 +1,30 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: cluster-migrate +spec: + dataSource: + volumes: + pgDataVolume: + pvcName: data-non-crunchy-cluster-0 + directory: data + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi diff --git a/testing/kuttl/e2e-other/cluster-migrate/06-assert.yaml b/testing/kuttl/e2e-other/cluster-migrate/06-assert.yaml new file mode 100644 index 0000000000..1a25966abb --- /dev/null +++ b/testing/kuttl/e2e-other/cluster-migrate/06-assert.yaml @@ -0,0 +1,21 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: cluster-migrate +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: cluster-migrate + postgres-operator.crunchydata.com/data: postgres + postgres-operator.crunchydata.com/instance-set: instance1 + postgres-operator.crunchydata.com/role: master +status: + phase: Running diff --git a/testing/kuttl/e2e-other/cluster-migrate/07--set-collation.yaml b/testing/kuttl/e2e-other/cluster-migrate/07--set-collation.yaml new file mode 100644 index 0000000000..00eb741f80 --- /dev/null +++ b/testing/kuttl/e2e-other/cluster-migrate/07--set-collation.yaml @@ -0,0 +1,23 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - script: | + set -e + if [[ ${KUTTL_PG_VERSION} -ge 15 ]]; then + PRIMARY= + while [[ -z "${PRIMARY}" ]]; do + PRIMARY=$( + kubectl get pod --namespace "${NAMESPACE}" \ + --output name --selector ' + postgres-operator.crunchydata.com/cluster=cluster-migrate, + postgres-operator.crunchydata.com/role=master' + ) + done + + # Ignore warnings about collation changes. This is DANGEROUS on real data! + # Only do this automatic step in test conditions; with real data, this may cause + # more problems as you may need to reindex. + kubectl exec --namespace "${NAMESPACE}" "${PRIMARY}" -c database \ + -- psql -qAt --command \ + 'ALTER DATABASE postgres REFRESH COLLATION VERSION; ALTER DATABASE template1 REFRESH COLLATION VERSION;' + fi diff --git a/testing/kuttl/e2e-other/cluster-migrate/08--alter-pv.yaml b/testing/kuttl/e2e-other/cluster-migrate/08--alter-pv.yaml new file mode 100644 index 0000000000..c5edfb4c99 --- /dev/null +++ b/testing/kuttl/e2e-other/cluster-migrate/08--alter-pv.yaml @@ -0,0 +1,16 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - script: | + set -e + SAVED_DATA=$( + kubectl get configmap persistent-volume-reclaim-policy --namespace "${NAMESPACE}" \ + --output=jsonpath="{.data..['ORIGINAL_POLICY','VOLUME_NAME']}" + ) + + IFS=' ' + read ORIGINAL_POLICY VOLUME_NAME <<< "${SAVED_DATA}" + + kubectl patch pv "${VOLUME_NAME}" -p '{"spec":{"persistentVolumeReclaimPolicy":"'${ORIGINAL_POLICY}'"}}' + diff --git a/testing/kuttl/e2e-other/cluster-migrate/09--check-data.yaml b/testing/kuttl/e2e-other/cluster-migrate/09--check-data.yaml new file mode 100644 index 0000000000..6a46bd8e9a --- /dev/null +++ b/testing/kuttl/e2e-other/cluster-migrate/09--check-data.yaml @@ -0,0 +1,23 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - script: | + set -e + PRIMARY=$( + kubectl get pod --namespace "${NAMESPACE}" \ + --output name --selector ' + postgres-operator.crunchydata.com/cluster=cluster-migrate, + postgres-operator.crunchydata.com/role=master' + ) + + TREASURE=$( + kubectl exec "${PRIMARY}" --namespace "${NAMESPACE}" \ + --container database \ + -- psql -U postgres -qt -c "select data from important" + ) + + if [[ "${TREASURE}" != " treasure" ]]; then + echo "Migration from 3rd-party PG pod failed, result from query: ${TREASURE}" + exit 1 + fi diff --git a/testing/kuttl/e2e-other/cluster-migrate/README.md b/testing/kuttl/e2e-other/cluster-migrate/README.md new file mode 100644 index 0000000000..09026f9e8b --- /dev/null +++ b/testing/kuttl/e2e-other/cluster-migrate/README.md @@ -0,0 +1,45 @@ +## Cluster Migrate + +This test was developed to check that users could bypass some known problems when +migrating from a non-Crunchy PostgreSQL image to a Crunchy PostgreSQL image: + +1) it changes the ownership of the data directory (which depends on fsGroup +behavior to change group ownership which is not available in all providers); +2) it makes sure a postgresql.conf file is available, as required by Patroni. + +Important note on *environment*: +As noted above, this work relies on fsGroup, so this test will not work in the current +form in all environments. For instance, this creates a PG cluster with fsGroup set, +which will result in an error in OpenShift. + +Important note on *PV permissions*: +This test involves changing permissions on PersistentVolumes, which may not be available +in all environments to all users (since this is a cluster-wide permission). + +Important note on migrating between different builds of *Postgres 15*: +PG 15 introduced new behavior around database collation versions, which result in errors like: + +``` +WARNING: database \"postgres\" has a collation version mismatch +DETAIL: The database was created using collation version 2.31, but the operating system provides version 2.28 +``` + +This error occurred in `reconcilePostgresDatabases` and prevented PGO from finishing the reconcile +loop. For _testing purposes_, this problem is worked around in steps 06 and 07, which wait for +the PG pod to be ready and then send a command to `REFRESH COLLATION VERSION` on the `postgres` +and `template1` databases (which were the only databases where this error was observed during +testing). + +This solution is fine for testing purposes, but is not a solution that should be done in production +as an automatic step. User intervention and supervision is recommended in that case. + +### Steps + +* 01: Create a non-Crunchy PostgreSQL cluster and wait for it to be ready +* 02: Create data on that cluster +* 03: Alter the Reclaim policy of the PV so that it will survive deletion of the cluster +* 04: Delete the original cluster, leaving the PV +* 05: Create a PGO-managed `postgrescluster` with the remaining PV as the datasource +* 06-07: Wait for the PG pod to be ready and alter the collation (PG 15 only, see above) +* 08: Alter the PV to the original Reclaim policy +* 09: Check that the data successfully migrated diff --git a/testing/kuttl/e2e-other/delete-with-replica-and-check-timestamps/10--cluster.yaml b/testing/kuttl/e2e-other/delete-with-replica-and-check-timestamps/10--cluster.yaml new file mode 100644 index 0000000000..a3236da358 --- /dev/null +++ b/testing/kuttl/e2e-other/delete-with-replica-and-check-timestamps/10--cluster.yaml @@ -0,0 +1,29 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: delete-switchover-with-timestamp +spec: + postgresVersion: ${KUTTL_PG_VERSION} + patroni: + switchover: + enabled: true + instances: + - name: instance1 + replicas: 2 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi diff --git a/testing/kuttl/e2e-other/delete-with-replica-and-check-timestamps/10-assert.yaml b/testing/kuttl/e2e-other/delete-with-replica-and-check-timestamps/10-assert.yaml new file mode 100644 index 0000000000..d77e27e307 --- /dev/null +++ b/testing/kuttl/e2e-other/delete-with-replica-and-check-timestamps/10-assert.yaml @@ -0,0 +1,36 @@ +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: delete-switchover-with-timestamp +status: + instances: + - name: instance1 + readyReplicas: 2 + replicas: 2 + updatedReplicas: 2 +--- +# Patroni labels and readiness happen separately. +# The next step expects to find pods by their role label; wait for them here. +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete-switchover-with-timestamp + postgres-operator.crunchydata.com/role: master +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete-switchover-with-timestamp + postgres-operator.crunchydata.com/role: replica +--- +apiVersion: batch/v1 +kind: Job +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete-switchover-with-timestamp + postgres-operator.crunchydata.com/pgbackrest-backup: replica-create +status: + succeeded: 1 diff --git a/testing/kuttl/e2e-other/delete-with-replica-and-check-timestamps/11-annotate.yaml b/testing/kuttl/e2e-other/delete-with-replica-and-check-timestamps/11-annotate.yaml new file mode 100644 index 0000000000..844d5f1336 --- /dev/null +++ b/testing/kuttl/e2e-other/delete-with-replica-and-check-timestamps/11-annotate.yaml @@ -0,0 +1,19 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + # Label instance pods with their current role. These labels will stick around + # because switchover does not recreate any pods. + - script: | + kubectl label --namespace="${NAMESPACE}" pods \ + --selector='postgres-operator.crunchydata.com/role=master' \ + 'testing/role-before=master' + - script: | + kubectl label --namespace="${NAMESPACE}" pods \ + --selector='postgres-operator.crunchydata.com/role=replica' \ + 'testing/role-before=replica' + + # Annotate the cluster to trigger a switchover. + - script: | + kubectl annotate --namespace="${NAMESPACE}" postgrescluster/delete-switchover-with-timestamp \ + "postgres-operator.crunchydata.com/trigger-switchover=$(date)" diff --git a/testing/kuttl/e2e-other/delete-with-replica-and-check-timestamps/12-assert.yaml b/testing/kuttl/e2e-other/delete-with-replica-and-check-timestamps/12-assert.yaml new file mode 100644 index 0000000000..76f0f8dff6 --- /dev/null +++ b/testing/kuttl/e2e-other/delete-with-replica-and-check-timestamps/12-assert.yaml @@ -0,0 +1,32 @@ +--- +# Wait for switchover to finish. A former replica should now be the primary. +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete-switchover-with-timestamp + postgres-operator.crunchydata.com/data: postgres + postgres-operator.crunchydata.com/role: master + testing/role-before: replica +--- +# The former primary should now be a replica. +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete-switchover-with-timestamp + postgres-operator.crunchydata.com/data: postgres + postgres-operator.crunchydata.com/role: replica + testing/role-before: master +--- +# All instances should be healthy. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: delete-switchover-with-timestamp +status: + instances: + - name: instance1 + replicas: 2 + readyReplicas: 2 + updatedReplicas: 2 diff --git a/testing/kuttl/e2e-other/delete-with-replica-and-check-timestamps/13-delete-cluster-and-check.yaml b/testing/kuttl/e2e-other/delete-with-replica-and-check-timestamps/13-delete-cluster-and-check.yaml new file mode 100644 index 0000000000..45352cca2e --- /dev/null +++ b/testing/kuttl/e2e-other/delete-with-replica-and-check-timestamps/13-delete-cluster-and-check.yaml @@ -0,0 +1,47 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + # Get the names of the current primary and replica -- error if either is blank + # Delete the cluster + # Get the delete event for the pods + # Verify that the replica delete event is greater than the primary delete event + - script: | + PRIMARY=$( + kubectl get pods --namespace="${NAMESPACE}" \ + --selector='postgres-operator.crunchydata.com/role=master' \ + --output=jsonpath={.items..metadata.name} + ) + + REPLICA=$( + kubectl get pods --namespace="${NAMESPACE}" \ + --selector='postgres-operator.crunchydata.com/role=replica' \ + --output=jsonpath={.items..metadata.name} + ) + + echo "DELETE: Found primary ${PRIMARY} and replica ${REPLICA} pods" + + if [ -z "$PRIMARY" ]; then exit 1; fi + if [ -z "$REPLICA" ]; then exit 1; fi + + kubectl delete postgrescluster -n "${NAMESPACE}" delete-switchover-with-timestamp + + kubectl wait "pod/${REPLICA}" --namespace "${NAMESPACE}" --for=delete --timeout=180s + + KILLING_REPLICA_TIMESTAMP=$( + kubectl get events --namespace="${NAMESPACE}" \ + --field-selector reason="Killing",involvedObject.fieldPath="spec.containers{database}",involvedObject.name="${REPLICA}" \ + --output=jsonpath={.items..firstTimestamp} + ) + + kubectl wait "pod/${PRIMARY}" --namespace "${NAMESPACE}" --for=delete --timeout=180s + + KILLING_PRIMARY_TIMESTAMP=$( + kubectl get events --namespace="${NAMESPACE}" \ + --field-selector reason="Killing",involvedObject.fieldPath="spec.containers{database}",involvedObject.name="${PRIMARY}" \ + --output=jsonpath={.items..firstTimestamp} + ) + + echo "DELETE: Found primary ${KILLING_PRIMARY_TIMESTAMP} and replica ${KILLING_REPLICA_TIMESTAMP} timestamps" + + if [[ "${KILLING_PRIMARY_TIMESTAMP}" < "${KILLING_REPLICA_TIMESTAMP}" ]]; then exit 1; fi diff --git a/testing/kuttl/e2e-other/delete-with-replica-and-check-timestamps/14-errors.yaml b/testing/kuttl/e2e-other/delete-with-replica-and-check-timestamps/14-errors.yaml new file mode 100644 index 0000000000..2a1015824b --- /dev/null +++ b/testing/kuttl/e2e-other/delete-with-replica-and-check-timestamps/14-errors.yaml @@ -0,0 +1,42 @@ +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: delete-switchover-with-timestamp +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete-switchover-with-timestamp +--- +# Patroni DCS objects are not owned by the PostgresCluster. +apiVersion: v1 +kind: Endpoints +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete-switchover-with-timestamp +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete-switchover-with-timestamp +--- +apiVersion: v1 +kind: Service +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete-switchover-with-timestamp +--- +apiVersion: v1 +kind: Secret +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete-switchover-with-timestamp +--- +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete-switchover-with-timestamp diff --git a/testing/kuttl/e2e-other/delete-with-replica-and-check-timestamps/README.md b/testing/kuttl/e2e-other/delete-with-replica-and-check-timestamps/README.md new file mode 100644 index 0000000000..bf914aa6cf --- /dev/null +++ b/testing/kuttl/e2e-other/delete-with-replica-and-check-timestamps/README.md @@ -0,0 +1,7 @@ +This test originally existed as the second test-case in the `delete` KUTTL test. +The test as written was prone to occasional flakes, sometimes due to missing events +(which were being used to check the timestamp of the container delete event). + +After discussion, we decided that this behavior (replica deleting before the primary) +was no longer required in v5, and the decision was made to sequester this test-case for +further testing and refinement. \ No newline at end of file diff --git a/testing/kuttl/e2e-other/exporter-append-custom-queries/00--create-cluster.yaml b/testing/kuttl/e2e-other/exporter-append-custom-queries/00--create-cluster.yaml new file mode 100644 index 0000000000..bc515e3534 --- /dev/null +++ b/testing/kuttl/e2e-other/exporter-append-custom-queries/00--create-cluster.yaml @@ -0,0 +1,7 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/exporter-append-queries-configmap.yaml +- files/exporter-append-queries-cluster.yaml +assert: +- files/exporter-append-queries-cluster-checks.yaml diff --git a/testing/kuttl/e2e-other/exporter-append-custom-queries/00-assert.yaml b/testing/kuttl/e2e-other/exporter-append-custom-queries/00-assert.yaml new file mode 100644 index 0000000000..2655841597 --- /dev/null +++ b/testing/kuttl/e2e-other/exporter-append-custom-queries/00-assert.yaml @@ -0,0 +1,50 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +commands: +# First, check that all containers in the instance pod are ready +# Then, list the query files mounted to the exporter and check for expected files +# Finally, check the contents of the queries to ensure queries.yml was generated correctly +- script: | + retry() { bash -ceu 'printf "$1\nSleeping...\n" && sleep 5' - "$@"; } + check_containers_ready() { bash -ceu 'echo "$1" | jq -e ".[] | select(.type==\"ContainersReady\") | .status==\"True\""' - "$@"; } + contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } + + pod=$(kubectl get pods -o name -n "${NAMESPACE}" \ + -l postgres-operator.crunchydata.com/cluster=exporter-append-queries \ + -l postgres-operator.crunchydata.com/crunchy-postgres-exporter=true) + [ "$pod" = "" ] && retry "Pod not found" && exit 1 + + condition_json=$(kubectl get "${pod}" -n "${NAMESPACE}" -o jsonpath="{.status.conditions}") + [ "$condition_json" = "" ] && retry "conditions not found" && exit 1 + { check_containers_ready "$condition_json"; } || { + retry "containers not ready" + exit 1 + } + + queries_files=$( + kubectl exec --namespace "${NAMESPACE}" "${pod}" -c exporter \ + -- ls /conf + ) + + { + contains "${queries_files}" "queries.yml" && + contains "${queries_files}" "defaultQueries.yml" + } || { + echo >&2 'The /conf directory should contain queries.yml and defaultQueries.yml. Instead it has:' + echo "${queries_files}" + exit 1 + } + + master_queries_contents=$( + kubectl exec --namespace "${NAMESPACE}" "${pod}" -c exporter \ + -- cat /tmp/queries.yml + ) + + { + contains "${master_queries_contents}" "# This is a test." && + contains "${master_queries_contents}" "ccp_postgresql_version" + } || { + echo >&2 'The master queries.yml file should contain the contents of both defaultQueries.yml and the custom queries.yml file. Instead it contains:' + echo "${master_queries_contents}" + exit 1 + } diff --git a/testing/kuttl/e2e-other/exporter-append-custom-queries/README.md b/testing/kuttl/e2e-other/exporter-append-custom-queries/README.md new file mode 100644 index 0000000000..a24aa444c7 --- /dev/null +++ b/testing/kuttl/e2e-other/exporter-append-custom-queries/README.md @@ -0,0 +1,5 @@ +Exporter - AppendCustomQueries Enabled + +Note: This series of tests depends on PGO being deployed with the AppendCustomQueries feature gate ON. There is a separate set of tests in e2e that tests exporter functionality without the AppendCustomQueries feature. + +When running this test, make sure that the PGO_FEATURE_GATES environment variable is set to "AppendCustomQueries=true" on the PGO Deployment. diff --git a/testing/kuttl/e2e-other/exporter-append-custom-queries/files/exporter-append-queries-cluster-checks.yaml b/testing/kuttl/e2e-other/exporter-append-custom-queries/files/exporter-append-queries-cluster-checks.yaml new file mode 100644 index 0000000000..459356ddfc --- /dev/null +++ b/testing/kuttl/e2e-other/exporter-append-custom-queries/files/exporter-append-queries-cluster-checks.yaml @@ -0,0 +1,29 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: exporter-append-queries +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: exporter-append-queries + postgres-operator.crunchydata.com/crunchy-postgres-exporter: "true" +status: + phase: Running +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: exporter-append-queries-exporter-queries-config +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: custom-queries-test diff --git a/testing/kuttl/e2e-other/exporter-append-custom-queries/files/exporter-append-queries-cluster.yaml b/testing/kuttl/e2e-other/exporter-append-custom-queries/files/exporter-append-queries-cluster.yaml new file mode 100644 index 0000000000..c4f75771aa --- /dev/null +++ b/testing/kuttl/e2e-other/exporter-append-custom-queries/files/exporter-append-queries-cluster.yaml @@ -0,0 +1,21 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: exporter-append-queries +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + monitoring: + pgmonitor: + exporter: + configuration: + - configMap: + name: custom-queries-test diff --git a/testing/kuttl/e2e-other/exporter-append-custom-queries/files/exporter-append-queries-configmap.yaml b/testing/kuttl/e2e-other/exporter-append-custom-queries/files/exporter-append-queries-configmap.yaml new file mode 100644 index 0000000000..9964d6bc1e --- /dev/null +++ b/testing/kuttl/e2e-other/exporter-append-custom-queries/files/exporter-append-queries-configmap.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: custom-queries-test +data: + queries.yml: "# This is a test." diff --git a/testing/kuttl/e2e-other/exporter-replica/00--create-cluster.yaml b/testing/kuttl/e2e-other/exporter-replica/00--create-cluster.yaml new file mode 100644 index 0000000000..2abec0814e --- /dev/null +++ b/testing/kuttl/e2e-other/exporter-replica/00--create-cluster.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/exporter-replica-cluster.yaml +assert: +- files/exporter-replica-cluster-checks.yaml diff --git a/testing/kuttl/e2e-other/exporter-replica/00-assert.yaml b/testing/kuttl/e2e-other/exporter-replica/00-assert.yaml new file mode 100644 index 0000000000..280be2d395 --- /dev/null +++ b/testing/kuttl/e2e-other/exporter-replica/00-assert.yaml @@ -0,0 +1,45 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +commands: +# First, check that all containers in the instance(s) pod are ready +# Then, grab the exporter metrics output and check that there were no scrape errors +# Finally, ensure the monitoring user exists and is configured +- script: | + retry() { bash -ceu 'printf "$1\nSleeping...\n" && sleep 5' - "$@"; } + check_containers_ready() { bash -ceu 'echo "$1" | jq -e ".[] | select(.type==\"ContainersReady\") | .status==\"True\""' - "$@"; } + contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } + + replica=$(kubectl get pods -o name -n "${NAMESPACE}" \ + -l postgres-operator.crunchydata.com/cluster=exporter-replica \ + -l postgres-operator.crunchydata.com/crunchy-postgres-exporter=true \ + -l postgres-operator.crunchydata.com/role=replica) + [ "$replica" = "" ] && retry "Replica Pod not found" && exit 1 + + replica_condition_json=$(kubectl get "${replica}" -n "${NAMESPACE}" -o jsonpath="{.status.conditions}") + [ "$replica_condition_json" = "" ] && retry "Replica conditions not found" && exit 1 + { + check_containers_ready "$replica_condition_json" + } || { + retry "containers not ready" + exit 1 + } + + scrape_metrics=$(kubectl exec ${replica} -c exporter -n ${NAMESPACE} -- \ + curl --silent http://localhost:9187/metrics | grep "pg_exporter_last_scrape_error") + { + contains "${scrape_metrics}" 'pg_exporter_last_scrape_error 0'; + } || { + retry "${scrape_metrics}" + exit 1 + } + + kubectl exec --stdin "${replica}" --namespace "${NAMESPACE}" -c database \ + -- psql -qb --set ON_ERROR_STOP=1 --file=- <<'SQL' + DO $$ + DECLARE + result record; + BEGIN + SELECT * INTO result FROM pg_catalog.pg_roles WHERE rolname = 'ccp_monitoring'; + ASSERT FOUND, 'user not found'; + END $$ + SQL diff --git a/testing/kuttl/e2e-other/exporter-replica/files/exporter-replica-cluster-checks.yaml b/testing/kuttl/e2e-other/exporter-replica/files/exporter-replica-cluster-checks.yaml new file mode 100644 index 0000000000..7c775b47b1 --- /dev/null +++ b/testing/kuttl/e2e-other/exporter-replica/files/exporter-replica-cluster-checks.yaml @@ -0,0 +1,24 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: exporter-replica +status: + instances: + - name: instance1 + readyReplicas: 2 + replicas: 2 + updatedReplicas: 2 +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: exporter-replica + postgres-operator.crunchydata.com/crunchy-postgres-exporter: "true" +status: + phase: Running +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: exporter-replica-exporter-queries-config diff --git a/testing/kuttl/e2e-other/exporter-replica/files/exporter-replica-cluster.yaml b/testing/kuttl/e2e-other/exporter-replica/files/exporter-replica-cluster.yaml new file mode 100644 index 0000000000..504d33bc3a --- /dev/null +++ b/testing/kuttl/e2e-other/exporter-replica/files/exporter-replica-cluster.yaml @@ -0,0 +1,19 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: exporter-replica +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + replicas: 2 + dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + monitoring: + pgmonitor: + exporter: {} diff --git a/testing/kuttl/e2e-other/exporter-standby/00--create-certs.yaml b/testing/kuttl/e2e-other/exporter-standby/00--create-certs.yaml new file mode 100644 index 0000000000..9c9cd140ac --- /dev/null +++ b/testing/kuttl/e2e-other/exporter-standby/00--create-certs.yaml @@ -0,0 +1,4 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/cluster-certs.yaml diff --git a/testing/kuttl/e2e-other/exporter-standby/01--create-primary.yaml b/testing/kuttl/e2e-other/exporter-standby/01--create-primary.yaml new file mode 100644 index 0000000000..6b5b721d4e --- /dev/null +++ b/testing/kuttl/e2e-other/exporter-standby/01--create-primary.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/primary-cluster.yaml +assert: +- files/primary-cluster-checks.yaml diff --git a/testing/kuttl/e2e-other/exporter-standby/01-assert.yaml b/testing/kuttl/e2e-other/exporter-standby/01-assert.yaml new file mode 100644 index 0000000000..cd2d16c783 --- /dev/null +++ b/testing/kuttl/e2e-other/exporter-standby/01-assert.yaml @@ -0,0 +1,22 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +commands: +# Store the exporter pid as an annotation on the pod +- script: | + retry() { bash -ceu 'printf "$1\nSleeping...\n" && sleep 5' - "$@"; } + check_containers_ready() { bash -ceu 'echo "$1" | jq -e ".[] | select(.type==\"ContainersReady\") | .status==\"True\""' - "$@"; } + + pod=$(kubectl get pods -o name -n $NAMESPACE \ + -l postgres-operator.crunchydata.com/cluster=primary-cluster \ + -l postgres-operator.crunchydata.com/crunchy-postgres-exporter=true) + [ "$pod" = "" ] && retry "Pod not found" && exit 1 + + condition_json=$(kubectl get ${pod} -n ${NAMESPACE} -o jsonpath="{.status.conditions}") + [ "$condition_json" = "" ] && retry "conditions not found" && exit 1 + { check_containers_ready "$condition_json"; } || { + retry "containers not ready" + exit 1 + } + + pid=$(kubectl exec ${pod} -n ${NAMESPACE} -c exporter -- cat /tmp/postgres_exporter.pid) + kubectl annotate --overwrite -n ${NAMESPACE} ${pod} oldpid=${pid} diff --git a/testing/kuttl/e2e-other/exporter-standby/02--set-primary-password.yaml b/testing/kuttl/e2e-other/exporter-standby/02--set-primary-password.yaml new file mode 100644 index 0000000000..4e613a277f --- /dev/null +++ b/testing/kuttl/e2e-other/exporter-standby/02--set-primary-password.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/update-primary-password.yaml +assert: +- files/update-primary-password-checks.yaml diff --git a/testing/kuttl/e2e-other/exporter-standby/03--create-standby.yaml b/testing/kuttl/e2e-other/exporter-standby/03--create-standby.yaml new file mode 100644 index 0000000000..fa2e653353 --- /dev/null +++ b/testing/kuttl/e2e-other/exporter-standby/03--create-standby.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/standby-cluster.yaml +assert: +- files/standby-cluster-checks.yaml diff --git a/testing/kuttl/e2e-other/exporter-standby/03-assert.yaml b/testing/kuttl/e2e-other/exporter-standby/03-assert.yaml new file mode 100644 index 0000000000..327e5562fa --- /dev/null +++ b/testing/kuttl/e2e-other/exporter-standby/03-assert.yaml @@ -0,0 +1,16 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +commands: +# Grab the exporter pod +# Check that the postgres_exporter pid is running +# Store the exporter pid as an annotation on the pod +- script: | + retry() { bash -ceu 'printf "$1\nSleeping...\n" && sleep 5' - "$@"; } + check_containers_ready() { bash -ceu 'echo "$1" | jq -e ".[] | select(.type==\"ContainersReady\") | .status==\"True\""' - "$@"; } + + pod=$(kubectl get pods -o name -n $NAMESPACE \ + -l postgres-operator.crunchydata.com/cluster=standby-cluster,postgres-operator.crunchydata.com/crunchy-postgres-exporter=true) + [ "$pod" = "" ] && retry "Pod not found" && exit 1 + + pid=$(kubectl exec ${pod} -n ${NAMESPACE} -c exporter -- cat /tmp/postgres_exporter.pid) + kubectl annotate --overwrite -n ${NAMESPACE} ${pod} oldpid=${pid} diff --git a/testing/kuttl/e2e-other/exporter-standby/04--set-standby-password.yaml b/testing/kuttl/e2e-other/exporter-standby/04--set-standby-password.yaml new file mode 100644 index 0000000000..18c98e423e --- /dev/null +++ b/testing/kuttl/e2e-other/exporter-standby/04--set-standby-password.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/update-standby-password.yaml +assert: +- files/update-standby-password-checks.yaml diff --git a/testing/kuttl/e2e-other/exporter-standby/04-assert.yaml b/testing/kuttl/e2e-other/exporter-standby/04-assert.yaml new file mode 100644 index 0000000000..7e77784a65 --- /dev/null +++ b/testing/kuttl/e2e-other/exporter-standby/04-assert.yaml @@ -0,0 +1,38 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +commands: +# Grab the exporter pod +# Check that the postgres_exporter pid is running +# Store the exporter pid as an annotation on the pod +- script: | + retry() { bash -ceu 'printf "$1\nSleeping...\n" && sleep 5' - "$@"; } + contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } + check_containers_ready() { bash -ceu ' echo "$1" | jq -e ".[] | select(.type==\"ContainersReady\") | .status==\"True\""' - "$@";} + + pod=$(kubectl get pods -o name -n $NAMESPACE \ + -l postgres-operator.crunchydata.com/cluster=standby-cluster,postgres-operator.crunchydata.com/crunchy-postgres-exporter=true) + [ "$pod" = "" ] && retry "Pod not found" && exit 1 + + oldPid=$(kubectl get ${pod} -n ${NAMESPACE} -o jsonpath="{.metadata.annotations.oldpid}") + newPid=$(kubectl exec ${pod} -n ${NAMESPACE} -c exporter -- cat /tmp/postgres_exporter.pid) + [ "${oldPid}" -eq "${newPid}" ] && retry "pid should have changed" && exit 1 + + password=$(kubectl exec -n ${NAMESPACE} ${pod} -c exporter -- bash -c 'cat /opt/crunchy/password') + { contains "${password}" "password"; } || { + retry "unexpected password: ${password}" + exit 1 + } + + condition_json=$(kubectl get ${pod} -n ${NAMESPACE} -o jsonpath="{.status.conditions}") + [ "$condition_json" = "" ] && retry "conditions not found" && exit 1 + { check_containers_ready "$condition_json"; } || { + retry "containers not ready" + exit 1 + } + + scrape_metrics=$(kubectl exec ${pod} -c exporter -n ${NAMESPACE} -- \ + curl --silent http://localhost:9187/metrics | grep "pg_exporter_last_scrape_error") + { contains "${scrape_metrics}" 'pg_exporter_last_scrape_error 0'; } || { + retry "${scrape_metrics}" + exit 1 + } diff --git a/testing/kuttl/e2e-other/exporter-standby/README.md b/testing/kuttl/e2e-other/exporter-standby/README.md new file mode 100644 index 0000000000..34df4e5b7a --- /dev/null +++ b/testing/kuttl/e2e-other/exporter-standby/README.md @@ -0,0 +1,9 @@ +# Exporter connection on standby cluster + +The exporter standby test will deploy two clusters, one primary and one standby. +Both clusters have monitoring enabled and are created in the same namespace to +allow for easy connections over the network. + +The `ccp_monitoring` password for both clusters are updated to match allowing +the exporter on the standby cluster to query postgres using the proper `ccp_monitoring` +password. diff --git a/testing/kuttl/e2e-other/exporter-standby/files/cluster-certs.yaml b/testing/kuttl/e2e-other/exporter-standby/files/cluster-certs.yaml new file mode 100644 index 0000000000..1f8dd06ccf --- /dev/null +++ b/testing/kuttl/e2e-other/exporter-standby/files/cluster-certs.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +data: + ca.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJnakNDQVNlZ0F3SUJBZ0lRZUpacWMxMmR3TDh6cDNRVjZVMzg0ekFLQmdncWhrak9QUVFEQXpBZk1SMHcKR3dZRFZRUURFeFJ3YjNOMFozSmxjeTF2Y0dWeVlYUnZjaTFqWVRBZUZ3MHlNekEwTVRFeE56UTFNemhhRncwegpNekEwTURneE9EUTFNemhhTUI4eEhUQWJCZ05WQkFNVEZIQnZjM1JuY21WekxXOXdaWEpoZEc5eUxXTmhNRmt3CkV3WUhLb1pJemowQ0FRWUlLb1pJemowREFRY0RRZ0FFWEZwMU1nOFQ0aWxFRFlleVh4Nm5hRU0weEtNUStNZU0KWnM3dUtockdmTnY1cVd3N0puNzJEMEZNWE9raVNTN1BsZUhtN1lwYk1lelZ4UytjLzV6a2NLTkZNRU13RGdZRApWUjBQQVFIL0JBUURBZ0VHTUJJR0ExVWRFd0VCL3dRSU1BWUJBZjhDQVFBd0hRWURWUjBPQkJZRUZGU2JSZzdXCnpIZFdIODN2aEtTcld3dGV4K2FtTUFvR0NDcUdTTTQ5QkFNREEwa0FNRVlDSVFDK3pXTHh4bmpna1ZYYzBFOVAKbWlmZm9jeTIrM3AxREZMUkJRcHlZNFE0RVFJaEFPSDhQVEtvWnRZUWlobVlqTkd3Q1J3aTgvVFRaYWIxSnVIMAo2YnpodHZobgotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNQakNDQWVXZ0F3SUJBZ0lSQU93NURHaGVVZnVNY25KYVdKNkllall3Q2dZSUtvWkl6ajBFQXdNd0h6RWQKTUJzR0ExVUVBeE1VY0c5emRHZHlaWE10YjNCbGNtRjBiM0l0WTJFd0hoY05Nak13TkRFeE1UYzBOVE01V2hjTgpNek13TkRBNE1UZzBOVE01V2pBOU1Uc3dPUVlEVlFRREV6SndjbWx0WVhKNUxXTnNkWE4wWlhJdGNISnBiV0Z5CmVTNWtaV1poZFd4MExuTjJZeTVqYkhWemRHVnlMbXh2WTJGc0xqQlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDkKQXdFSEEwSUFCT3RlNytQWFlDci9RQVJkcHlwYTFHcEpkbW5wOFN3ZG9FOTIzUXoraWt4UllTalgwUHBXcytqUQpVNXlKZ0NDdGxyZmxFZVZ4S2YzaVpiVHdadFlIaHVxamdlTXdnZUF3RGdZRFZSMFBBUUgvQkFRREFnV2dNQXdHCkExVWRFd0VCL3dRQ01BQXdId1lEVlIwakJCZ3dGb0FVVkp0R0R0Yk1kMVlmemUrRXBLdGJDMTdINXFZd2daNEcKQTFVZEVRU0JsakNCazRJeWNISnBiV0Z5ZVMxamJIVnpkR1Z5TFhCeWFXMWhjbmt1WkdWbVlYVnNkQzV6ZG1NdQpZMngxYzNSbGNpNXNiMk5oYkM2Q0kzQnlhVzFoY25rdFkyeDFjM1JsY2kxd2NtbHRZWEo1TG1SbFptRjFiSFF1CmMzWmpnaDl3Y21sdFlYSjVMV05zZFhOMFpYSXRjSEpwYldGeWVTNWtaV1poZFd4MGdoZHdjbWx0WVhKNUxXTnMKZFhOMFpYSXRjSEpwYldGeWVUQUtCZ2dxaGtqT1BRUURBd05IQURCRUFpQjA3Q3YzRHJTNXUxRFdaek1MQjdvbAppcjFFWEpQTnFaOXZWQUF5ZTdDMGJRSWdWQVlDM2F0ekl4a0syNHlQUU1TSjU1OGFaN3JEdkZGZXdOaVpmdSt0CjdETT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= + tls.key: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSUoxYkNXMTByR3o2VWQ1K2R3WmZWcGNUNFlqck9XVG1iVW9XNXRxYTA2b1ZvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFNjE3djQ5ZGdLdjlBQkYybktsclVha2wyYWVueExCMmdUM2JkRFA2S1RGRmhLTmZRK2xhego2TkJUbkltQUlLMld0K1VSNVhFcC9lSmx0UEJtMWdlRzZnPT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo= +kind: Secret +metadata: + name: cluster-cert +type: Opaque +--- +apiVersion: v1 +data: + ca.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJnakNDQVNlZ0F3SUJBZ0lRZUpacWMxMmR3TDh6cDNRVjZVMzg0ekFLQmdncWhrak9QUVFEQXpBZk1SMHcKR3dZRFZRUURFeFJ3YjNOMFozSmxjeTF2Y0dWeVlYUnZjaTFqWVRBZUZ3MHlNekEwTVRFeE56UTFNemhhRncwegpNekEwTURneE9EUTFNemhhTUI4eEhUQWJCZ05WQkFNVEZIQnZjM1JuY21WekxXOXdaWEpoZEc5eUxXTmhNRmt3CkV3WUhLb1pJemowQ0FRWUlLb1pJemowREFRY0RRZ0FFWEZwMU1nOFQ0aWxFRFlleVh4Nm5hRU0weEtNUStNZU0KWnM3dUtockdmTnY1cVd3N0puNzJEMEZNWE9raVNTN1BsZUhtN1lwYk1lelZ4UytjLzV6a2NLTkZNRU13RGdZRApWUjBQQVFIL0JBUURBZ0VHTUJJR0ExVWRFd0VCL3dRSU1BWUJBZjhDQVFBd0hRWURWUjBPQkJZRUZGU2JSZzdXCnpIZFdIODN2aEtTcld3dGV4K2FtTUFvR0NDcUdTTTQ5QkFNREEwa0FNRVlDSVFDK3pXTHh4bmpna1ZYYzBFOVAKbWlmZm9jeTIrM3AxREZMUkJRcHlZNFE0RVFJaEFPSDhQVEtvWnRZUWlobVlqTkd3Q1J3aTgvVFRaYWIxSnVIMAo2YnpodHZobgotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJqekNDQVRTZ0F3SUJBZ0lRRzA0MEprWjYwZkZtanpaVG1SekhyakFLQmdncWhrak9QUVFEQXpBZk1SMHcKR3dZRFZRUURFeFJ3YjNOMFozSmxjeTF2Y0dWeVlYUnZjaTFqWVRBZUZ3MHlNekEwTVRFeE56UTFNemhhRncwegpNekEwTURneE9EUTFNemhhTUJjeEZUQVRCZ05WQkFNTURGOWpjblZ1WTJoNWNtVndiREJaTUJNR0J5cUdTTTQ5CkFnRUdDQ3FHU000OUF3RUhBMElBQk5HVHcvSmVtaGxGK28xUlRBb0VXSndzdjJ6WjIyc1p4N2NjT2VmL1NXdjYKeXphYkpaUmkvREFyK0kwUHNyTlhmand3a0xMa3hERGZsTklvcFZMNVYwT2pXakJZTUE0R0ExVWREd0VCL3dRRQpBd0lGb0RBTUJnTlZIUk1CQWY4RUFqQUFNQjhHQTFVZEl3UVlNQmFBRkZTYlJnN1d6SGRXSDgzdmhLU3JXd3RlCngrYW1NQmNHQTFVZEVRUVFNQTZDREY5amNuVnVZMmg1Y21Wd2JEQUtCZ2dxaGtqT1BRUURBd05KQURCR0FpRUEKcWVsYmUvdTQzRFRPWFdlell1b3Nva0dUbHg1U2ljUFRkNk05Q3pwU2VoWUNJUUNOOS91Znc0SUZzdDZOM1RtYQo4MmZpSElKSUpQY0RjM2ZKUnFna01RQmF0QT09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K + tls.key: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSVBxeTVzNVJxWThKUmdycjJreE9zaG9hc25yTWhUUkJPYjZ0alI3T2ZqTFlvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFMFpQRDhsNmFHVVg2alZGTUNnUlluQ3kvYk5uYmF4bkh0eHc1NS85SmEvckxOcHNsbEdMOApNQ3Y0alEreXMxZCtQRENRc3VURU1OK1UwaWlsVXZsWFF3PT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo= +kind: Secret +metadata: + name: replication-cert +type: Opaque diff --git a/testing/kuttl/e2e-other/exporter-standby/files/primary-cluster-checks.yaml b/testing/kuttl/e2e-other/exporter-standby/files/primary-cluster-checks.yaml new file mode 100644 index 0000000000..c2a59244a5 --- /dev/null +++ b/testing/kuttl/e2e-other/exporter-standby/files/primary-cluster-checks.yaml @@ -0,0 +1,20 @@ +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: primary-cluster +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: primary-cluster + postgres-operator.crunchydata.com/crunchy-postgres-exporter: "true" +status: + phase: Running diff --git a/testing/kuttl/e2e-other/exporter-standby/files/primary-cluster.yaml b/testing/kuttl/e2e-other/exporter-standby/files/primary-cluster.yaml new file mode 100644 index 0000000000..8f51632f5b --- /dev/null +++ b/testing/kuttl/e2e-other/exporter-standby/files/primary-cluster.yaml @@ -0,0 +1,22 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: primary-cluster +spec: + postgresVersion: ${KUTTL_PG_VERSION} + customTLSSecret: + name: cluster-cert + customReplicationTLSSecret: + name: replication-cert + instances: + - name: instance1 + dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + monitoring: + pgmonitor: + exporter: {} diff --git a/testing/kuttl/e2e-other/exporter-standby/files/standby-cluster-checks.yaml b/testing/kuttl/e2e-other/exporter-standby/files/standby-cluster-checks.yaml new file mode 100644 index 0000000000..237dec721e --- /dev/null +++ b/testing/kuttl/e2e-other/exporter-standby/files/standby-cluster-checks.yaml @@ -0,0 +1,21 @@ +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: standby-cluster +status: + instances: + - name: instance1 + replicas: 1 + updatedReplicas: 1 + # The cluster should not become fully ready in this step, the ccp_monitoring password + # on the standby does not match the primary +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: standby-cluster + postgres-operator.crunchydata.com/crunchy-postgres-exporter: "true" +status: + phase: Running diff --git a/testing/kuttl/e2e-other/exporter-standby/files/standby-cluster.yaml b/testing/kuttl/e2e-other/exporter-standby/files/standby-cluster.yaml new file mode 100644 index 0000000000..33e9ec2c2c --- /dev/null +++ b/testing/kuttl/e2e-other/exporter-standby/files/standby-cluster.yaml @@ -0,0 +1,25 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: standby-cluster +spec: + postgresVersion: ${KUTTL_PG_VERSION} + standby: + enabled: true + host: primary-cluster-primary + customTLSSecret: + name: cluster-cert + customReplicationTLSSecret: + name: replication-cert + instances: + - name: instance1 + dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + monitoring: + pgmonitor: + exporter: {} diff --git a/testing/kuttl/e2e-other/exporter-standby/files/update-primary-password-checks.yaml b/testing/kuttl/e2e-other/exporter-standby/files/update-primary-password-checks.yaml new file mode 100644 index 0000000000..1ef72b49c9 --- /dev/null +++ b/testing/kuttl/e2e-other/exporter-standby/files/update-primary-password-checks.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Secret +metadata: + name: primary-cluster-monitoring + labels: + postgres-operator.crunchydata.com/cluster: primary-cluster + postgres-operator.crunchydata.com/role: monitoring + ownerReferences: + - apiVersion: postgres-operator.crunchydata.com/v1beta1 + blockOwnerDeletion: true + controller: true + kind: PostgresCluster + name: primary-cluster +data: + # ensure the password is encoded to 'password' + password: cGFzc3dvcmQ= +--- +# TODO: Check that password is set as a file diff --git a/testing/kuttl/e2e-other/exporter-standby/files/update-primary-password.yaml b/testing/kuttl/e2e-other/exporter-standby/files/update-primary-password.yaml new file mode 100644 index 0000000000..a66450b103 --- /dev/null +++ b/testing/kuttl/e2e-other/exporter-standby/files/update-primary-password.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Secret +metadata: + name: primary-cluster-monitoring + labels: + postgres-operator.crunchydata.com/cluster: primary-cluster + postgres-operator.crunchydata.com/role: monitoring +stringData: + password: password +data: +# Ensure data field is deleted so that password/verifier will be regenerated diff --git a/testing/kuttl/e2e-other/exporter-standby/files/update-standby-password-checks.yaml b/testing/kuttl/e2e-other/exporter-standby/files/update-standby-password-checks.yaml new file mode 100644 index 0000000000..34d5357318 --- /dev/null +++ b/testing/kuttl/e2e-other/exporter-standby/files/update-standby-password-checks.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Secret +metadata: + name: standby-cluster-monitoring + labels: + postgres-operator.crunchydata.com/cluster: standby-cluster + postgres-operator.crunchydata.com/role: monitoring + ownerReferences: + - apiVersion: postgres-operator.crunchydata.com/v1beta1 + blockOwnerDeletion: true + controller: true + kind: PostgresCluster + name: standby-cluster +data: + # ensure the password is encoded to 'password' + password: cGFzc3dvcmQ= +--- +# TODO: Check that password is set as a file diff --git a/testing/kuttl/e2e-other/exporter-standby/files/update-standby-password.yaml b/testing/kuttl/e2e-other/exporter-standby/files/update-standby-password.yaml new file mode 100644 index 0000000000..57371fce93 --- /dev/null +++ b/testing/kuttl/e2e-other/exporter-standby/files/update-standby-password.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Secret +metadata: + name: standby-cluster-monitoring + labels: + postgres-operator.crunchydata.com/cluster: standby-cluster + postgres-operator.crunchydata.com/role: monitoring +stringData: + password: password +data: +# Ensure data field is deleted so that password/verifier will be regenerated diff --git a/testing/kuttl/e2e-other/exporter-upgrade/00--cluster.yaml b/testing/kuttl/e2e-other/exporter-upgrade/00--cluster.yaml new file mode 100644 index 0000000000..0e53eab2de --- /dev/null +++ b/testing/kuttl/e2e-other/exporter-upgrade/00--cluster.yaml @@ -0,0 +1,30 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: exporter +spec: + postgresVersion: 14 + image: us.gcr.io/container-suite/crunchy-postgres:ubi8-14.0-5.0.3-0 + instances: + - name: instance1 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + monitoring: + pgmonitor: + exporter: + image: registry.developers.crunchydata.com/crunchydata/crunchy-postgres-exporter:ubi8-5.3.1-0 diff --git a/testing/kuttl/e2e-other/exporter-upgrade/00-assert.yaml b/testing/kuttl/e2e-other/exporter-upgrade/00-assert.yaml new file mode 100644 index 0000000000..c569c97454 --- /dev/null +++ b/testing/kuttl/e2e-other/exporter-upgrade/00-assert.yaml @@ -0,0 +1,10 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: exporter +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 diff --git a/testing/kuttl/e2e-other/exporter-upgrade/01--check-exporter.yaml b/testing/kuttl/e2e-other/exporter-upgrade/01--check-exporter.yaml new file mode 100644 index 0000000000..0e72f2a0bf --- /dev/null +++ b/testing/kuttl/e2e-other/exporter-upgrade/01--check-exporter.yaml @@ -0,0 +1,31 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - script: | + set -e + PRIMARY=$( + kubectl get pod --namespace "${NAMESPACE}" \ + --output name --selector ' + postgres-operator.crunchydata.com/cluster=exporter, + postgres-operator.crunchydata.com/role=master' + ) + + # Ensure that the metrics endpoint is available from inside the exporter container + for i in {1..5}; do + kubectl exec --namespace "${NAMESPACE}" "${PRIMARY}" -c exporter -- curl http://localhost:9187/metrics + sleep 2 + done + + # Ensure that the monitoring user exists and is configured. + kubectl exec --stdin --namespace "${NAMESPACE}" "${PRIMARY}" \ + -- psql -qb --set ON_ERROR_STOP=1 --file=- <<'SQL' + DO $$ + DECLARE + result record; + BEGIN + SELECT * INTO result FROM pg_catalog.pg_roles WHERE rolname = 'ccp_monitoring'; + ASSERT FOUND, 'user not found'; + ASSERT result.rolconfig @> '{jit=off}', format('got config: %L', result.rolconfig); + END $$ + SQL diff --git a/testing/kuttl/e2e-other/exporter-upgrade/02--update-cluster.yaml b/testing/kuttl/e2e-other/exporter-upgrade/02--update-cluster.yaml new file mode 100644 index 0000000000..cde17d80b4 --- /dev/null +++ b/testing/kuttl/e2e-other/exporter-upgrade/02--update-cluster.yaml @@ -0,0 +1,7 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: exporter +spec: + postgresVersion: 14 + image: registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-14.5-1 diff --git a/testing/kuttl/e2e-other/exporter-upgrade/02-assert.yaml b/testing/kuttl/e2e-other/exporter-upgrade/02-assert.yaml new file mode 100644 index 0000000000..9ad238b944 --- /dev/null +++ b/testing/kuttl/e2e-other/exporter-upgrade/02-assert.yaml @@ -0,0 +1,24 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: exporter +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 +--- +apiVersion: batch/v1 +kind: Job +metadata: + labels: + postgres-operator.crunchydata.com/cluster: exporter + postgres-operator.crunchydata.com/pgbackrest-backup: replica-create +status: + succeeded: 1 +--- +apiVersion: v1 +kind: Service +metadata: + name: exporter-primary diff --git a/testing/kuttl/e2e-other/exporter-upgrade/03--check-exporter.yaml b/testing/kuttl/e2e-other/exporter-upgrade/03--check-exporter.yaml new file mode 100644 index 0000000000..8161e463fc --- /dev/null +++ b/testing/kuttl/e2e-other/exporter-upgrade/03--check-exporter.yaml @@ -0,0 +1,21 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - script: | + PRIMARY=$( + kubectl get pod --namespace "${NAMESPACE}" \ + --output name --selector ' + postgres-operator.crunchydata.com/cluster=exporter, + postgres-operator.crunchydata.com/role=master' + ) + + # Get errors from the exporter + # See the README.md for a discussion of these errors + ERR=$(kubectl logs --namespace "${NAMESPACE}" "${PRIMARY}" -c exporter | grep -e "Error running query on database") + ERR_COUNT=$(echo "$ERR" | wc -l) + + if [[ "$ERR_COUNT" -gt 2 ]]; then + echo "Errors in log from exporter: ${ERR}" + exit 1 + fi diff --git a/testing/kuttl/e2e-other/exporter-upgrade/README.md b/testing/kuttl/e2e-other/exporter-upgrade/README.md new file mode 100644 index 0000000000..fefe28a95c --- /dev/null +++ b/testing/kuttl/e2e-other/exporter-upgrade/README.md @@ -0,0 +1,31 @@ +The exporter-upgrade test makes sure that PGO updates an extension used for monitoring. This +avoids an error where a user might update to a new PG image with a newer extension, but with an +older extension operative. + +Note: This test relies on two `crunchy-postgres` images with known, different `pgnodemx` extensions: +the image created in 00--cluster.yaml has `pgnodemx` 1.1; the image we update the cluster to in +02--update-cluster.yaml has `pgnodemx` 1.3. + +00-01 +This starts up a cluster with a purposely outdated `pgnodemx` extension. Because we want a specific +extension, the image used here is hard-coded (and so outdated it's not publicly available). + +(This image is so outdated that it doesn't finish creating a backup with the current PGO, which is +why the 00-assert.yaml only checks that the pod is ready; and why 01--check-exporter.yaml wraps the +call in a retry loop.) + +02-03 +The cluster is updated with a newer (and hardcoded) image with a newer version of `pgnodemx`. Due +to the change made in https://github.com/CrunchyData/postgres-operator/pull/3400, this should no +longer produce multiple errors. + +Note: a few errors may be logged after the `exporter` container attempts to run the `pgnodemx` +functions but before the extension is updated. So this checks that there are no more than 2 errors, +since that was the observed maximum number of printed errors during manual tests of the check. + +For instance, using these hardcoded images (with `pgnodemx` versions 1.1 and 1.3), those errors were: + +``` +Error running query on database \"localhost:5432\": ccp_nodemx_disk_activity pq: query-specified return tuple and function return type are not compatible" +Error running query on database \"localhost:5432\": ccp_nodemx_data_disk pq: query-specified return tuple and function return type are not compatible +``` diff --git a/testing/kuttl/e2e-other/gssapi/00-assert.yaml b/testing/kuttl/e2e-other/gssapi/00-assert.yaml new file mode 100644 index 0000000000..ea828be0c4 --- /dev/null +++ b/testing/kuttl/e2e-other/gssapi/00-assert.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: krb5 +--- +apiVersion: v1 +kind: Secret +metadata: + name: krb5-keytab diff --git a/testing/kuttl/e2e-other/gssapi/00-krb5-keytab.yaml b/testing/kuttl/e2e-other/gssapi/00-krb5-keytab.yaml new file mode 100644 index 0000000000..6311193d55 --- /dev/null +++ b/testing/kuttl/e2e-other/gssapi/00-krb5-keytab.yaml @@ -0,0 +1,4 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: +- command: kubectl exec -n krb5 -it krb5-kdc-0 -- /krb5-scripts/krb5.sh "${NAMESPACE}" diff --git a/testing/kuttl/e2e-other/gssapi/01-assert.yaml b/testing/kuttl/e2e-other/gssapi/01-assert.yaml new file mode 100644 index 0000000000..dbda953ead --- /dev/null +++ b/testing/kuttl/e2e-other/gssapi/01-assert.yaml @@ -0,0 +1,15 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: gssapi +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 +--- +apiVersion: v1 +kind: Service +metadata: + name: gssapi-primary diff --git a/testing/kuttl/e2e-other/gssapi/01-cluster.yaml b/testing/kuttl/e2e-other/gssapi/01-cluster.yaml new file mode 100644 index 0000000000..8acfe46c4d --- /dev/null +++ b/testing/kuttl/e2e-other/gssapi/01-cluster.yaml @@ -0,0 +1,41 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: gssapi +spec: + config: + files: + - secret: + name: krb5-keytab + - configMap: + name: krb5 + patroni: + dynamicConfiguration: + postgresql: + pg_hba: + - host postgres postgres 0.0.0.0/0 scram-sha-256 + - host all krb5hippo@PGO.CRUNCHYDATA.COM 0.0.0.0/0 gss + parameters: + krb_server_keyfile: /etc/postgres/krb5.keytab + users: + - name: postgres + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi diff --git a/testing/kuttl/e2e-other/gssapi/02-assert.yaml b/testing/kuttl/e2e-other/gssapi/02-assert.yaml new file mode 100644 index 0000000000..36f85d95d4 --- /dev/null +++ b/testing/kuttl/e2e-other/gssapi/02-assert.yaml @@ -0,0 +1,6 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-connect-gssapi +status: + succeeded: 1 diff --git a/testing/kuttl/e2e-other/gssapi/02-psql-connect.yaml b/testing/kuttl/e2e-other/gssapi/02-psql-connect.yaml new file mode 100644 index 0000000000..30f02b3b19 --- /dev/null +++ b/testing/kuttl/e2e-other/gssapi/02-psql-connect.yaml @@ -0,0 +1,47 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-connect-gssapi +spec: + backoffLimit: 6 + template: + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + command: + - bash + - -c + - -- + - |- + psql -c 'create user "krb5hippo@PGO.CRUNCHYDATA.COM";' + kinit -k -t /krb5-conf/krb5.keytab krb5hippo@PGO.CRUNCHYDATA.COM + psql -U krb5hippo@PGO.CRUNCHYDATA.COM -h gssapi-primary.$(NAMESPACE).svc.cluster.local -d postgres \ + -c 'select version();' + env: + - name: NAMESPACE + valueFrom: { fieldRef: { fieldPath: metadata.namespace } } + - name: PGHOST + valueFrom: { secretKeyRef: { name: gssapi-pguser-postgres, key: host } } + - name: PGPORT + valueFrom: { secretKeyRef: { name: gssapi-pguser-postgres, key: port } } + - name: PGUSER + valueFrom: { secretKeyRef: { name: gssapi-pguser-postgres, key: user } } + - name: PGPASSWORD + valueFrom: { secretKeyRef: { name: gssapi-pguser-postgres, key: password } } + - name: PGDATABASE + value: postgres + - name: KRB5_CONFIG + value: /krb5-conf/krb5.conf + volumeMounts: + - name: krb5-conf + mountPath: /krb5-conf + volumes: + - name: krb5-conf + projected: + sources: + - configMap: + name: krb5 + - secret: + name: krb5-keytab diff --git a/testing/kuttl/e2e-other/gssapi/README.md b/testing/kuttl/e2e-other/gssapi/README.md new file mode 100644 index 0000000000..72d8d2b997 --- /dev/null +++ b/testing/kuttl/e2e-other/gssapi/README.md @@ -0,0 +1,14 @@ +# GSSAPI Authentication + +This test verifies that it is possible to properly configure PostgreSQL for GSSAPI +authentication. This is done by configuring a PostgresCluster for GSSAPI authentication, +and then utilizing a Kerberos ticket that has been issued by a Kerberos KDC server to log into +PostgreSQL. + +## Assumptions + +- A Kerberos Key Distribution Center (KDC) Pod named `krb5-kdc-0` is deployed inside of a `krb5` +namespace within the Kubernetes cluster +- The KDC server (`krb5-kdc-0`) contains a `/krb5-conf/krb5.sh` script that can be run as part +of the test to create the Kerberos principals, keytab secret and client configuration needed to +successfully run the test diff --git a/testing/kuttl/e2e-other/postgis-cluster/00--cluster.yaml b/testing/kuttl/e2e-other/postgis-cluster/00--cluster.yaml new file mode 100644 index 0000000000..8dc88788bc --- /dev/null +++ b/testing/kuttl/e2e-other/postgis-cluster/00--cluster.yaml @@ -0,0 +1,26 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: postgis +spec: + postgresVersion: ${KUTTL_PG_VERSION} + postGISVersion: "${KUTTL_POSTGIS_VERSION}" + instances: + - name: instance1 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi diff --git a/testing/kuttl/e2e-other/postgis-cluster/00-assert.yaml b/testing/kuttl/e2e-other/postgis-cluster/00-assert.yaml new file mode 100644 index 0000000000..b0bda7753f --- /dev/null +++ b/testing/kuttl/e2e-other/postgis-cluster/00-assert.yaml @@ -0,0 +1,24 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: postgis +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 +--- +apiVersion: batch/v1 +kind: Job +metadata: + labels: + postgres-operator.crunchydata.com/cluster: postgis + postgres-operator.crunchydata.com/pgbackrest-backup: replica-create +status: + succeeded: 1 +--- +apiVersion: v1 +kind: Service +metadata: + name: postgis-primary diff --git a/testing/kuttl/e2e-other/postgis-cluster/01--psql-connect.yaml b/testing/kuttl/e2e-other/postgis-cluster/01--psql-connect.yaml new file mode 100644 index 0000000000..814958a9f6 --- /dev/null +++ b/testing/kuttl/e2e-other/postgis-cluster/01--psql-connect.yaml @@ -0,0 +1,132 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-postgis-connect +spec: + backoffLimit: 6 + template: + metadata: + labels: { postgres-operator-test: kuttl } + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + env: + - name: PGHOST + valueFrom: { secretKeyRef: { name: postgis-pguser-postgis, key: host } } + - name: PGPORT + valueFrom: { secretKeyRef: { name: postgis-pguser-postgis, key: port } } + - name: PGDATABASE + valueFrom: { secretKeyRef: { name: postgis-pguser-postgis, key: dbname } } + - name: PGUSER + valueFrom: { secretKeyRef: { name: postgis-pguser-postgis, key: user } } + - name: PGPASSWORD + valueFrom: { secretKeyRef: { name: postgis-pguser-postgis, key: password } } + - { name: GIS_VERSION, value: "${KUTTL_POSTGIS_VERSION}" } + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } + command: + - bash + - -c + - | + # Ensure PostGIS version is set + GIS_VERSION=${KUTTL_POSTGIS_VERSION} + GIS_VERSION=${GIS_VERSION:-notset} + + # check version + RESULT=$(psql -c "DO \$\$ + DECLARE + result boolean; + BEGIN + SELECT postgis_version() LIKE '%${GIS_VERSION}%' INTO result; + ASSERT result = 't', 'PostGIS version incorrect'; + END \$\$;" 2>&1) + + if [[ "$RESULT" == *"ERROR"* ]]; then + echo "$RESULT" + exit 1 + fi + + # check full version + RESULT=$(psql -c "DO \$\$ + DECLARE + result boolean; + BEGIN + SELECT postgis_full_version() LIKE 'POSTGIS=\"%${GIS_VERSION}%' INTO result; + ASSERT result = 't', 'PostGIS full version incorrect'; + END \$\$;" 2>&1) + + if [[ "$RESULT" == *"ERROR"* ]]; then + echo "$RESULT" + exit 1 + fi + + # check expected schemas (tiger, tiger_data and topology) + # - https://www.postgresql.org/docs/current/catalog-pg-namespace.html + RESULT=$(psql -c "DO \$\$ + DECLARE + result text; + BEGIN + SELECT nspname FROM pg_catalog.pg_namespace WHERE nspname='tiger' INTO result; + ASSERT result = 'tiger', 'PostGIS tiger schema missing'; + END \$\$;" 2>&1) + + if [[ "$RESULT" == *"ERROR"* ]]; then + echo "$RESULT" + exit 1 + fi + + RESULT=$(psql -c "DO \$\$ + DECLARE + result text; + BEGIN + SELECT nspname FROM pg_catalog.pg_namespace WHERE nspname='tiger_data' INTO result; + ASSERT result = 'tiger_data', 'PostGIS tiger_data schema missing'; + END \$\$;" 2>&1) + + if [[ "$RESULT" == *"ERROR"* ]]; then + echo "$RESULT" + exit 1 + fi + + RESULT=$(psql -c "DO \$\$ + DECLARE + result text; + BEGIN + SELECT nspname FROM pg_catalog.pg_namespace WHERE nspname='topology' INTO result; + ASSERT result = 'topology', 'PostGIS topology schema missing'; + END \$\$;" 2>&1) + + if [[ "$RESULT" == *"ERROR"* ]]; then + echo "$RESULT" + exit 1 + fi + + # check point creation + RESULT=$(psql -c "DO \$\$ + DECLARE + result text; + BEGIN + SELECT pg_typeof(ST_MakePoint(28.385200,-81.563900)) INTO result; + ASSERT result = 'geometry', 'Unable to create PostGIS point'; + END \$\$;" 2>&1) + + if [[ "$RESULT" == *"ERROR"* ]]; then + echo "$RESULT" + exit 1 + fi + + # check GeoJSON function + RESULT=$(psql -c "DO \$\$ + DECLARE + result text; + BEGIN + SELECT ST_AsGeoJSON('SRID=4326;POINT(-118.4079 33.9434)'::geography) INTO result; + ASSERT result = '{\"type\":\"Point\",\"coordinates\":[-118.4079,33.9434]}', FORMAT('GeoJSON check failed, got %L', result); + END \$\$;" 2>&1) + + if [[ "$RESULT" == *"ERROR"* ]]; then + echo "$RESULT" + exit 1 + fi diff --git a/testing/kuttl/e2e-other/postgis-cluster/01-assert.yaml b/testing/kuttl/e2e-other/postgis-cluster/01-assert.yaml new file mode 100644 index 0000000000..22e9e6f9de --- /dev/null +++ b/testing/kuttl/e2e-other/postgis-cluster/01-assert.yaml @@ -0,0 +1,6 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-postgis-connect +status: + succeeded: 1 diff --git a/testing/kuttl/e2e-other/replica-service/00-base-cluster.yaml b/testing/kuttl/e2e-other/replica-service/00-base-cluster.yaml new file mode 100644 index 0000000000..725f40de14 --- /dev/null +++ b/testing/kuttl/e2e-other/replica-service/00-base-cluster.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/base-cluster.yaml +assert: +- files/base-check.yaml diff --git a/testing/kuttl/e2e-other/replica-service/01-node-port.yaml b/testing/kuttl/e2e-other/replica-service/01-node-port.yaml new file mode 100644 index 0000000000..c80e947e40 --- /dev/null +++ b/testing/kuttl/e2e-other/replica-service/01-node-port.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/np-cluster.yaml +assert: +- files/np-check.yaml diff --git a/testing/kuttl/e2e-other/replica-service/02-loadbalancer.yaml b/testing/kuttl/e2e-other/replica-service/02-loadbalancer.yaml new file mode 100644 index 0000000000..f1433111db --- /dev/null +++ b/testing/kuttl/e2e-other/replica-service/02-loadbalancer.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/lb-cluster.yaml +assert: +- files/lb-check.yaml diff --git a/testing/kuttl/e2e-other/replica-service/03-cluster-ip.yaml b/testing/kuttl/e2e-other/replica-service/03-cluster-ip.yaml new file mode 100644 index 0000000000..de6055ea6b --- /dev/null +++ b/testing/kuttl/e2e-other/replica-service/03-cluster-ip.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/cip-cluster.yaml +assert: +- files/cip-check.yaml diff --git a/testing/kuttl/e2e-other/replica-service/files/base-check.yaml b/testing/kuttl/e2e-other/replica-service/files/base-check.yaml new file mode 100644 index 0000000000..a83fce0f57 --- /dev/null +++ b/testing/kuttl/e2e-other/replica-service/files/base-check.yaml @@ -0,0 +1,15 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: service +status: + instances: + - name: instance1 + readyReplicas: 2 + replicas: 2 + updatedReplicas: 2 +--- +apiVersion: v1 +kind: Service +metadata: + name: service-replicas diff --git a/testing/kuttl/e2e-other/replica-service/files/base-cluster.yaml b/testing/kuttl/e2e-other/replica-service/files/base-cluster.yaml new file mode 100644 index 0000000000..67c4481d2f --- /dev/null +++ b/testing/kuttl/e2e-other/replica-service/files/base-cluster.yaml @@ -0,0 +1,28 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: service +spec: + postgresVersion: ${KUTTL_PG_VERSION} + replicaService: + type: ClusterIP + instances: + - name: instance1 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 0.5Gi + replicas: 2 + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 0.5Gi diff --git a/testing/kuttl/e2e-other/replica-service/files/cip-check.yaml b/testing/kuttl/e2e-other/replica-service/files/cip-check.yaml new file mode 100644 index 0000000000..5bf5422bb8 --- /dev/null +++ b/testing/kuttl/e2e-other/replica-service/files/cip-check.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: Service +metadata: + name: service-replicas +spec: + type: ClusterIP + selector: + postgres-operator.crunchydata.com/cluster: service + postgres-operator.crunchydata.com/role: replica diff --git a/testing/kuttl/e2e-other/replica-service/files/cip-cluster.yaml b/testing/kuttl/e2e-other/replica-service/files/cip-cluster.yaml new file mode 100644 index 0000000000..8545aa8223 --- /dev/null +++ b/testing/kuttl/e2e-other/replica-service/files/cip-cluster.yaml @@ -0,0 +1,8 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: service +spec: + replicaService: + type: ClusterIP + nodePort: null diff --git a/testing/kuttl/e2e-other/replica-service/files/lb-check.yaml b/testing/kuttl/e2e-other/replica-service/files/lb-check.yaml new file mode 100644 index 0000000000..b8519491c7 --- /dev/null +++ b/testing/kuttl/e2e-other/replica-service/files/lb-check.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: Service +metadata: + name: service-replicas +spec: + type: LoadBalancer + selector: + postgres-operator.crunchydata.com/cluster: service + postgres-operator.crunchydata.com/role: replica diff --git a/testing/kuttl/e2e-other/replica-service/files/lb-cluster.yaml b/testing/kuttl/e2e-other/replica-service/files/lb-cluster.yaml new file mode 100644 index 0000000000..5e18f71dcd --- /dev/null +++ b/testing/kuttl/e2e-other/replica-service/files/lb-cluster.yaml @@ -0,0 +1,8 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: service +spec: + replicaService: + type: LoadBalancer + nodePort: null diff --git a/testing/kuttl/e2e-other/replica-service/files/np-check.yaml b/testing/kuttl/e2e-other/replica-service/files/np-check.yaml new file mode 100644 index 0000000000..c7d791e36a --- /dev/null +++ b/testing/kuttl/e2e-other/replica-service/files/np-check.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: service-replicas +spec: + type: NodePort + ports: + - name: postgres + port: 5432 + protocol: TCP + targetPort: postgres + selector: + postgres-operator.crunchydata.com/cluster: service + postgres-operator.crunchydata.com/role: replica diff --git a/testing/kuttl/e2e-other/replica-service/files/np-cluster.yaml b/testing/kuttl/e2e-other/replica-service/files/np-cluster.yaml new file mode 100644 index 0000000000..0b20ae63ad --- /dev/null +++ b/testing/kuttl/e2e-other/replica-service/files/np-cluster.yaml @@ -0,0 +1,7 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: service +spec: + replicaService: + type: NodePort diff --git a/testing/kuttl/e2e-other/resize-volume/00-assert.yaml b/testing/kuttl/e2e-other/resize-volume/00-assert.yaml new file mode 100644 index 0000000000..b4372b75e7 --- /dev/null +++ b/testing/kuttl/e2e-other/resize-volume/00-assert.yaml @@ -0,0 +1,7 @@ +# Ensure that the default StorageClass supports VolumeExpansion +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + annotations: + storageclass.kubernetes.io/is-default-class: "true" +allowVolumeExpansion: true diff --git a/testing/kuttl/e2e-other/resize-volume/01--cluster.yaml b/testing/kuttl/e2e-other/resize-volume/01--cluster.yaml new file mode 100644 index 0000000000..4737fb25f4 --- /dev/null +++ b/testing/kuttl/e2e-other/resize-volume/01--cluster.yaml @@ -0,0 +1,25 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: resize-volume-up +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi diff --git a/testing/kuttl/e2e-other/resize-volume/01-assert.yaml b/testing/kuttl/e2e-other/resize-volume/01-assert.yaml new file mode 100644 index 0000000000..ea72af469c --- /dev/null +++ b/testing/kuttl/e2e-other/resize-volume/01-assert.yaml @@ -0,0 +1,59 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: resize-volume-up +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 +--- +apiVersion: batch/v1 +kind: Job +metadata: + labels: + postgres-operator.crunchydata.com/cluster: resize-volume-up + postgres-operator.crunchydata.com/pgbackrest-backup: replica-create +status: + succeeded: 1 +--- +apiVersion: v1 +kind: Service +metadata: + name: resize-volume-up-primary +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + postgres-operator.crunchydata.com/cluster: resize-volume-up + postgres-operator.crunchydata.com/instance-set: instance1 +spec: + resources: + requests: + storage: 1Gi +status: + accessModes: + - ReadWriteOnce + capacity: + storage: 1Gi + phase: Bound +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + postgres-operator.crunchydata.com/cluster: resize-volume-up + postgres-operator.crunchydata.com/data: pgbackrest + postgres-operator.crunchydata.com/pgbackrest-repo: repo1 +spec: + resources: + requests: + storage: 1Gi +status: + accessModes: + - ReadWriteOnce + capacity: + storage: 1Gi + phase: Bound diff --git a/testing/kuttl/e2e-other/resize-volume/02--create-data.yaml b/testing/kuttl/e2e-other/resize-volume/02--create-data.yaml new file mode 100644 index 0000000000..c41a6f80c4 --- /dev/null +++ b/testing/kuttl/e2e-other/resize-volume/02--create-data.yaml @@ -0,0 +1,31 @@ +--- +# Create some data that should be present after resizing. +apiVersion: batch/v1 +kind: Job +metadata: + name: create-data + labels: { postgres-operator-test: kuttl } +spec: + backoffLimit: 3 + template: + metadata: + labels: { postgres-operator-test: kuttl } + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + env: + - name: PGURI + valueFrom: { secretKeyRef: { name: resize-volume-up-pguser-resize-volume-up, key: uri } } + + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } + + command: + - psql + - $(PGURI) + - --set=ON_ERROR_STOP=1 + - --command + - | + CREATE TABLE important (data) AS VALUES ('treasure'); diff --git a/testing/kuttl/e2e-other/resize-volume/02-assert.yaml b/testing/kuttl/e2e-other/resize-volume/02-assert.yaml new file mode 100644 index 0000000000..fdb42e68f5 --- /dev/null +++ b/testing/kuttl/e2e-other/resize-volume/02-assert.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: create-data +status: + succeeded: 1 diff --git a/testing/kuttl/e2e-other/resize-volume/03--resize.yaml b/testing/kuttl/e2e-other/resize-volume/03--resize.yaml new file mode 100644 index 0000000000..dd7c96901f --- /dev/null +++ b/testing/kuttl/e2e-other/resize-volume/03--resize.yaml @@ -0,0 +1,25 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: resize-volume-up +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 2Gi + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 2Gi diff --git a/testing/kuttl/e2e-other/resize-volume/03-assert.yaml b/testing/kuttl/e2e-other/resize-volume/03-assert.yaml new file mode 100644 index 0000000000..11aa230cd4 --- /dev/null +++ b/testing/kuttl/e2e-other/resize-volume/03-assert.yaml @@ -0,0 +1,37 @@ +# We know that the PVC sizes have change so now we can check that they have been +# updated to have the expected size +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + postgres-operator.crunchydata.com/cluster: resize-volume-up + postgres-operator.crunchydata.com/instance-set: instance1 +spec: + resources: + requests: + storage: 2Gi +status: + accessModes: + - ReadWriteOnce + capacity: + storage: 2Gi + phase: Bound +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + postgres-operator.crunchydata.com/cluster: resize-volume-up + postgres-operator.crunchydata.com/data: pgbackrest + postgres-operator.crunchydata.com/pgbackrest-repo: repo1 +spec: + resources: + requests: + storage: 2Gi +status: + accessModes: + - ReadWriteOnce + capacity: + storage: 2Gi + phase: Bound diff --git a/testing/kuttl/e2e-other/resize-volume/06--check-data.yaml b/testing/kuttl/e2e-other/resize-volume/06--check-data.yaml new file mode 100644 index 0000000000..682a46ef4d --- /dev/null +++ b/testing/kuttl/e2e-other/resize-volume/06--check-data.yaml @@ -0,0 +1,40 @@ +--- +# Confirm that all the data still exists. +apiVersion: batch/v1 +kind: Job +metadata: + name: check-data + labels: { postgres-operator-test: kuttl } +spec: + backoffLimit: 3 + template: + metadata: + labels: { postgres-operator-test: kuttl } + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + env: + - name: PGURI + valueFrom: { secretKeyRef: { name: resize-volume-up-pguser-resize-volume-up, key: uri } } + + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } + + # Confirm that all the data still exists. + # Note: the `$$$$` is reduced to `$$` by Kubernetes. + # - https://kubernetes.io/docs/tasks/inject-data-application/ + command: + - psql + - $(PGURI) + - --set=ON_ERROR_STOP=1 + - --command + - | + DO $$$$ + DECLARE + keep_data jsonb; + BEGIN + SELECT jsonb_agg(important) INTO keep_data FROM important; + ASSERT keep_data = '[{"data":"treasure"}]', format('got %L', keep_data); + END $$$$; diff --git a/testing/kuttl/e2e-other/resize-volume/06-assert.yaml b/testing/kuttl/e2e-other/resize-volume/06-assert.yaml new file mode 100644 index 0000000000..cf743b8701 --- /dev/null +++ b/testing/kuttl/e2e-other/resize-volume/06-assert.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: check-data +status: + succeeded: 1 diff --git a/testing/kuttl/e2e-other/resize-volume/11--cluster.yaml b/testing/kuttl/e2e-other/resize-volume/11--cluster.yaml new file mode 100644 index 0000000000..8d2d602ca6 --- /dev/null +++ b/testing/kuttl/e2e-other/resize-volume/11--cluster.yaml @@ -0,0 +1,25 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: resize-volume-down +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 2Gi + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 2Gi diff --git a/testing/kuttl/e2e-other/resize-volume/11-assert.yaml b/testing/kuttl/e2e-other/resize-volume/11-assert.yaml new file mode 100644 index 0000000000..666b4a85c7 --- /dev/null +++ b/testing/kuttl/e2e-other/resize-volume/11-assert.yaml @@ -0,0 +1,59 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: resize-volume-down +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 +--- +apiVersion: batch/v1 +kind: Job +metadata: + labels: + postgres-operator.crunchydata.com/cluster: resize-volume-down + postgres-operator.crunchydata.com/pgbackrest-backup: replica-create +status: + succeeded: 1 +--- +apiVersion: v1 +kind: Service +metadata: + name: resize-volume-down-primary +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + postgres-operator.crunchydata.com/cluster: resize-volume-down + postgres-operator.crunchydata.com/instance-set: instance1 +spec: + resources: + requests: + storage: 2Gi +status: + accessModes: + - ReadWriteOnce + capacity: + storage: 2Gi + phase: Bound +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + postgres-operator.crunchydata.com/cluster: resize-volume-down + postgres-operator.crunchydata.com/data: pgbackrest + postgres-operator.crunchydata.com/pgbackrest-repo: repo1 +spec: + resources: + requests: + storage: 2Gi +status: + accessModes: + - ReadWriteOnce + capacity: + storage: 2Gi + phase: Bound diff --git a/testing/kuttl/e2e-other/resize-volume/13--resize.yaml b/testing/kuttl/e2e-other/resize-volume/13--resize.yaml new file mode 100644 index 0000000000..77af2f2aa3 --- /dev/null +++ b/testing/kuttl/e2e-other/resize-volume/13--resize.yaml @@ -0,0 +1,25 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: resize-volume-down +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi diff --git a/testing/kuttl/e2e-other/resize-volume/13-assert.yaml b/testing/kuttl/e2e-other/resize-volume/13-assert.yaml new file mode 100644 index 0000000000..4210214fd6 --- /dev/null +++ b/testing/kuttl/e2e-other/resize-volume/13-assert.yaml @@ -0,0 +1,43 @@ +apiVersion: v1 +kind: Event +type: Warning +involvedObject: + apiVersion: postgres-operator.crunchydata.com/v1beta1 + kind: PostgresCluster + name: resize-volume-down +reason: PersistentVolumeError +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + postgres-operator.crunchydata.com/cluster: resize-volume-down + postgres-operator.crunchydata.com/instance-set: instance1 +spec: + resources: + requests: + storage: 2Gi +status: + accessModes: + - ReadWriteOnce + capacity: + storage: 2Gi + phase: Bound +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + postgres-operator.crunchydata.com/cluster: resize-volume-down + postgres-operator.crunchydata.com/data: pgbackrest + postgres-operator.crunchydata.com/pgbackrest-repo: repo1 +spec: + resources: + requests: + storage: 2Gi +status: + accessModes: + - ReadWriteOnce + capacity: + storage: 2Gi + phase: Bound diff --git a/testing/kuttl/e2e/cluster-pause/00--cluster.yaml b/testing/kuttl/e2e/cluster-pause/00--cluster.yaml new file mode 100644 index 0000000000..801a22d460 --- /dev/null +++ b/testing/kuttl/e2e/cluster-pause/00--cluster.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/00-create-cluster.yaml +assert: +- files/00-cluster-created.yaml diff --git a/testing/kuttl/e2e/cluster-pause/00-assert.yaml b/testing/kuttl/e2e/cluster-pause/00-assert.yaml new file mode 100644 index 0000000000..a51dd3ab4a --- /dev/null +++ b/testing/kuttl/e2e/cluster-pause/00-assert.yaml @@ -0,0 +1,7 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +collectors: +- type: command + command: kubectl -n $NAMESPACE describe pods --selector postgres-operator.crunchydata.com/cluster=cluster-pause +- namespace: $NAMESPACE + selector: postgres-operator.crunchydata.com/cluster=cluster-pause diff --git a/testing/kuttl/e2e/cluster-pause/01--cluster-paused.yaml b/testing/kuttl/e2e/cluster-pause/01--cluster-paused.yaml new file mode 100644 index 0000000000..deab5e0228 --- /dev/null +++ b/testing/kuttl/e2e/cluster-pause/01--cluster-paused.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/01-pause-cluster.yaml +assert: +- files/01-cluster-paused.yaml diff --git a/testing/kuttl/e2e/cluster-pause/01-assert.yaml b/testing/kuttl/e2e/cluster-pause/01-assert.yaml new file mode 100644 index 0000000000..a51dd3ab4a --- /dev/null +++ b/testing/kuttl/e2e/cluster-pause/01-assert.yaml @@ -0,0 +1,7 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +collectors: +- type: command + command: kubectl -n $NAMESPACE describe pods --selector postgres-operator.crunchydata.com/cluster=cluster-pause +- namespace: $NAMESPACE + selector: postgres-operator.crunchydata.com/cluster=cluster-pause diff --git a/testing/kuttl/e2e/cluster-pause/02--cluster-resume.yaml b/testing/kuttl/e2e/cluster-pause/02--cluster-resume.yaml new file mode 100644 index 0000000000..bb1def96c5 --- /dev/null +++ b/testing/kuttl/e2e/cluster-pause/02--cluster-resume.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/02-resume-cluster.yaml +assert: +- files/02-cluster-resumed.yaml diff --git a/testing/kuttl/e2e/cluster-pause/02-assert.yaml b/testing/kuttl/e2e/cluster-pause/02-assert.yaml new file mode 100644 index 0000000000..a51dd3ab4a --- /dev/null +++ b/testing/kuttl/e2e/cluster-pause/02-assert.yaml @@ -0,0 +1,7 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +collectors: +- type: command + command: kubectl -n $NAMESPACE describe pods --selector postgres-operator.crunchydata.com/cluster=cluster-pause +- namespace: $NAMESPACE + selector: postgres-operator.crunchydata.com/cluster=cluster-pause diff --git a/testing/kuttl/e2e/cluster-pause/files/00-cluster-created.yaml b/testing/kuttl/e2e/cluster-pause/files/00-cluster-created.yaml new file mode 100644 index 0000000000..a5fe982b1a --- /dev/null +++ b/testing/kuttl/e2e/cluster-pause/files/00-cluster-created.yaml @@ -0,0 +1,10 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: cluster-pause +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 diff --git a/testing/kuttl/e2e/cluster-pause/files/00-create-cluster.yaml b/testing/kuttl/e2e/cluster-pause/files/00-create-cluster.yaml new file mode 100644 index 0000000000..9f687a1dfa --- /dev/null +++ b/testing/kuttl/e2e/cluster-pause/files/00-create-cluster.yaml @@ -0,0 +1,14 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: cluster-pause +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi diff --git a/testing/kuttl/e2e/cluster-pause/files/01-cluster-paused.yaml b/testing/kuttl/e2e/cluster-pause/files/01-cluster-paused.yaml new file mode 100644 index 0000000000..6776fc542b --- /dev/null +++ b/testing/kuttl/e2e/cluster-pause/files/01-cluster-paused.yaml @@ -0,0 +1,22 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: cluster-pause +status: + conditions: + - message: No spec changes will be applied and no other statuses will be updated. + reason: Paused + status: "False" + type: Progressing + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 +--- +apiVersion: v1 +kind: Service +metadata: + name: cluster-pause-ha +spec: + type: ClusterIP diff --git a/testing/kuttl/e2e/cluster-pause/files/01-pause-cluster.yaml b/testing/kuttl/e2e/cluster-pause/files/01-pause-cluster.yaml new file mode 100644 index 0000000000..6a21b00b22 --- /dev/null +++ b/testing/kuttl/e2e/cluster-pause/files/01-pause-cluster.yaml @@ -0,0 +1,17 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: cluster-pause +spec: + # We change the service, but this won't result in a change until we resume + service: + type: LoadBalancer + paused: true + instances: + - name: instance1 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi diff --git a/testing/kuttl/e2e/cluster-pause/files/02-cluster-resumed.yaml b/testing/kuttl/e2e/cluster-pause/files/02-cluster-resumed.yaml new file mode 100644 index 0000000000..82062fb908 --- /dev/null +++ b/testing/kuttl/e2e/cluster-pause/files/02-cluster-resumed.yaml @@ -0,0 +1,17 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: cluster-pause +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 +--- +apiVersion: v1 +kind: Service +metadata: + name: cluster-pause-ha +spec: + type: LoadBalancer diff --git a/testing/kuttl/e2e/cluster-pause/files/02-resume-cluster.yaml b/testing/kuttl/e2e/cluster-pause/files/02-resume-cluster.yaml new file mode 100644 index 0000000000..2f5665e146 --- /dev/null +++ b/testing/kuttl/e2e/cluster-pause/files/02-resume-cluster.yaml @@ -0,0 +1,6 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: cluster-pause +spec: + paused: false diff --git a/testing/kuttl/e2e/cluster-start/00--cluster.yaml b/testing/kuttl/e2e/cluster-start/00--cluster.yaml new file mode 100644 index 0000000000..801a22d460 --- /dev/null +++ b/testing/kuttl/e2e/cluster-start/00--cluster.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/00-create-cluster.yaml +assert: +- files/00-cluster-created.yaml diff --git a/testing/kuttl/e2e/cluster-start/00-assert.yaml b/testing/kuttl/e2e/cluster-start/00-assert.yaml new file mode 100644 index 0000000000..b513f5ffda --- /dev/null +++ b/testing/kuttl/e2e/cluster-start/00-assert.yaml @@ -0,0 +1,7 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +collectors: +- type: command + command: kubectl -n $NAMESPACE describe pods --selector postgres-operator.crunchydata.com/cluster=cluster-start +- namespace: $NAMESPACE + selector: postgres-operator.crunchydata.com/cluster=cluster-start diff --git a/testing/kuttl/e2e/cluster-start/01--connect.yaml b/testing/kuttl/e2e/cluster-start/01--connect.yaml new file mode 100644 index 0000000000..9586a772ad --- /dev/null +++ b/testing/kuttl/e2e/cluster-start/01--connect.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/01-connect-psql.yaml +assert: +- files/01-psql-connected.yaml diff --git a/testing/kuttl/e2e/cluster-start/01-assert.yaml b/testing/kuttl/e2e/cluster-start/01-assert.yaml new file mode 100644 index 0000000000..b513f5ffda --- /dev/null +++ b/testing/kuttl/e2e/cluster-start/01-assert.yaml @@ -0,0 +1,7 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +collectors: +- type: command + command: kubectl -n $NAMESPACE describe pods --selector postgres-operator.crunchydata.com/cluster=cluster-start +- namespace: $NAMESPACE + selector: postgres-operator.crunchydata.com/cluster=cluster-start diff --git a/testing/kuttl/e2e/cluster-start/files/00-cluster-created.yaml b/testing/kuttl/e2e/cluster-start/files/00-cluster-created.yaml new file mode 100644 index 0000000000..4eebece89e --- /dev/null +++ b/testing/kuttl/e2e/cluster-start/files/00-cluster-created.yaml @@ -0,0 +1,15 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: cluster-start +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 +--- +apiVersion: v1 +kind: Service +metadata: + name: cluster-start-primary diff --git a/testing/kuttl/e2e/cluster-start/files/00-create-cluster.yaml b/testing/kuttl/e2e/cluster-start/files/00-create-cluster.yaml new file mode 100644 index 0000000000..713cd14eb3 --- /dev/null +++ b/testing/kuttl/e2e/cluster-start/files/00-create-cluster.yaml @@ -0,0 +1,14 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: cluster-start +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi diff --git a/testing/kuttl/e2e/cluster-start/files/01-connect-psql.yaml b/testing/kuttl/e2e/cluster-start/files/01-connect-psql.yaml new file mode 100644 index 0000000000..b4cef74941 --- /dev/null +++ b/testing/kuttl/e2e/cluster-start/files/01-connect-psql.yaml @@ -0,0 +1,29 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-connect +spec: + backoffLimit: 6 + template: + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + command: + - psql + - -c + - "select version();" + env: + - name: PGHOST + valueFrom: { secretKeyRef: { name: cluster-start-pguser-cluster-start, key: host } } + - name: PGPORT + valueFrom: { secretKeyRef: { name: cluster-start-pguser-cluster-start, key: port } } + - name: PGDATABASE + valueFrom: { secretKeyRef: { name: cluster-start-pguser-cluster-start, key: dbname } } + - name: PGUSER + valueFrom: { secretKeyRef: { name: cluster-start-pguser-cluster-start, key: user } } + - name: PGPASSWORD + valueFrom: { secretKeyRef: { name: cluster-start-pguser-cluster-start, key: password } } + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } diff --git a/testing/kuttl/e2e/cluster-start/files/01-psql-connected.yaml b/testing/kuttl/e2e/cluster-start/files/01-psql-connected.yaml new file mode 100644 index 0000000000..e4d8bbb37a --- /dev/null +++ b/testing/kuttl/e2e/cluster-start/files/01-psql-connected.yaml @@ -0,0 +1,6 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-connect +status: + succeeded: 1 diff --git a/testing/kuttl/e2e/delete-namespace/00-assert.yaml b/testing/kuttl/e2e/delete-namespace/00-assert.yaml new file mode 100644 index 0000000000..78aea811c3 --- /dev/null +++ b/testing/kuttl/e2e/delete-namespace/00-assert.yaml @@ -0,0 +1,7 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +collectors: +- type: command + command: kubectl -n ${KUTTL_TEST_DELETE_NAMESPACE} describe pods --selector postgres-operator.crunchydata.com/cluster=delete-namespace +- namespace: ${KUTTL_TEST_DELETE_NAMESPACE} + selector: postgres-operator.crunchydata.com/cluster=delete-namespace diff --git a/testing/kuttl/e2e/delete-namespace/00-create-cluster.yaml b/testing/kuttl/e2e/delete-namespace/00-create-cluster.yaml new file mode 100644 index 0000000000..2245df00c8 --- /dev/null +++ b/testing/kuttl/e2e/delete-namespace/00-create-cluster.yaml @@ -0,0 +1,7 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/00-create-namespace.yaml +- files/00-create-cluster.yaml +assert: +- files/00-created.yaml diff --git a/testing/kuttl/e2e/delete-namespace/01-assert.yaml b/testing/kuttl/e2e/delete-namespace/01-assert.yaml new file mode 100644 index 0000000000..78aea811c3 --- /dev/null +++ b/testing/kuttl/e2e/delete-namespace/01-assert.yaml @@ -0,0 +1,7 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +collectors: +- type: command + command: kubectl -n ${KUTTL_TEST_DELETE_NAMESPACE} describe pods --selector postgres-operator.crunchydata.com/cluster=delete-namespace +- namespace: ${KUTTL_TEST_DELETE_NAMESPACE} + selector: postgres-operator.crunchydata.com/cluster=delete-namespace diff --git a/testing/kuttl/e2e/delete-namespace/01-delete-namespace.yaml b/testing/kuttl/e2e/delete-namespace/01-delete-namespace.yaml new file mode 100644 index 0000000000..8fed721e5e --- /dev/null +++ b/testing/kuttl/e2e/delete-namespace/01-delete-namespace.yaml @@ -0,0 +1,10 @@ +--- +# Remove the namespace. +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +delete: + - apiVersion: v1 + kind: Namespace + name: ${KUTTL_TEST_DELETE_NAMESPACE} +error: +- files/01-errors.yaml diff --git a/testing/kuttl/e2e/delete-namespace/README.md b/testing/kuttl/e2e/delete-namespace/README.md new file mode 100644 index 0000000000..697e2ae915 --- /dev/null +++ b/testing/kuttl/e2e/delete-namespace/README.md @@ -0,0 +1,11 @@ +### Delete namespace test + +* Create a namespace +* Start a regular cluster in that namespace +* Delete the namespace +* Check that nothing remains. + +Note: KUTTL provides a `$NAMESPACE` var that can be used in scripts/commands, +but which cannot be used in object definition yamls (like `01--cluster.yaml`). +Therefore, we use a given, non-random namespace that is defined in the makefile +and generated with `generate-kuttl`. diff --git a/testing/kuttl/e2e/delete-namespace/files/00-create-cluster.yaml b/testing/kuttl/e2e/delete-namespace/files/00-create-cluster.yaml new file mode 100644 index 0000000000..fe6392d75a --- /dev/null +++ b/testing/kuttl/e2e/delete-namespace/files/00-create-cluster.yaml @@ -0,0 +1,18 @@ +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: delete-namespace + namespace: ${KUTTL_TEST_DELETE_NAMESPACE} +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + replicas: 1 + dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } diff --git a/testing/kuttl/e2e/delete-namespace/files/00-create-namespace.yaml b/testing/kuttl/e2e/delete-namespace/files/00-create-namespace.yaml new file mode 100644 index 0000000000..617c1e5399 --- /dev/null +++ b/testing/kuttl/e2e/delete-namespace/files/00-create-namespace.yaml @@ -0,0 +1,5 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: ${KUTTL_TEST_DELETE_NAMESPACE} diff --git a/testing/kuttl/e2e/delete-namespace/files/00-created.yaml b/testing/kuttl/e2e/delete-namespace/files/00-created.yaml new file mode 100644 index 0000000000..3d2c7ec936 --- /dev/null +++ b/testing/kuttl/e2e/delete-namespace/files/00-created.yaml @@ -0,0 +1,22 @@ +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: delete-namespace + namespace: ${KUTTL_TEST_DELETE_NAMESPACE} +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 +--- +apiVersion: batch/v1 +kind: Job +metadata: + namespace: ${KUTTL_TEST_DELETE_NAMESPACE} + labels: + postgres-operator.crunchydata.com/cluster: delete-namespace + postgres-operator.crunchydata.com/pgbackrest-backup: replica-create +status: + succeeded: 1 diff --git a/testing/kuttl/e2e/delete-namespace/files/01-errors.yaml b/testing/kuttl/e2e/delete-namespace/files/01-errors.yaml new file mode 100644 index 0000000000..ee6f31178c --- /dev/null +++ b/testing/kuttl/e2e/delete-namespace/files/01-errors.yaml @@ -0,0 +1,49 @@ +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + namespace: ${KUTTL_TEST_DELETE_NAMESPACE} + name: delete-namespace +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + namespace: ${KUTTL_TEST_DELETE_NAMESPACE} + labels: + postgres-operator.crunchydata.com/cluster: delete-namespace +--- +# Patroni DCS objects are not owned by the PostgresCluster. +apiVersion: v1 +kind: Endpoints +metadata: + namespace: ${KUTTL_TEST_DELETE_NAMESPACE} + labels: + postgres-operator.crunchydata.com/cluster: delete-namespace +--- +apiVersion: v1 +kind: Pod +metadata: + namespace: ${KUTTL_TEST_DELETE_NAMESPACE} + labels: + postgres-operator.crunchydata.com/cluster: delete-namespace +--- +apiVersion: v1 +kind: Service +metadata: + namespace: ${KUTTL_TEST_DELETE_NAMESPACE} + labels: + postgres-operator.crunchydata.com/cluster: delete-namespace +--- +apiVersion: v1 +kind: Secret +metadata: + namespace: ${KUTTL_TEST_DELETE_NAMESPACE} + labels: + postgres-operator.crunchydata.com/cluster: delete-namespace +--- +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: ${KUTTL_TEST_DELETE_NAMESPACE} + labels: + postgres-operator.crunchydata.com/cluster: delete-namespace diff --git a/testing/kuttl/e2e/delete/00-assert.yaml b/testing/kuttl/e2e/delete/00-assert.yaml new file mode 100644 index 0000000000..e4d88b3031 --- /dev/null +++ b/testing/kuttl/e2e/delete/00-assert.yaml @@ -0,0 +1,7 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +collectors: +- type: command + command: kubectl -n $NAMESPACE describe pods --selector postgres-operator.crunchydata.com/cluster=delete +- namespace: $NAMESPACE + selector: postgres-operator.crunchydata.com/cluster=delete diff --git a/testing/kuttl/e2e/delete/00-create-cluster.yaml b/testing/kuttl/e2e/delete/00-create-cluster.yaml new file mode 100644 index 0000000000..801a22d460 --- /dev/null +++ b/testing/kuttl/e2e/delete/00-create-cluster.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/00-create-cluster.yaml +assert: +- files/00-cluster-created.yaml diff --git a/testing/kuttl/e2e/delete/01-delete-cluster.yaml b/testing/kuttl/e2e/delete/01-delete-cluster.yaml new file mode 100644 index 0000000000..a1f26b39c4 --- /dev/null +++ b/testing/kuttl/e2e/delete/01-delete-cluster.yaml @@ -0,0 +1,8 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +delete: + - apiVersion: postgres-operator.crunchydata.com/v1beta1 + kind: PostgresCluster + name: delete +error: +- files/01-cluster-deleted.yaml diff --git a/testing/kuttl/e2e/delete/10-assert.yaml b/testing/kuttl/e2e/delete/10-assert.yaml new file mode 100644 index 0000000000..a2c226cc7a --- /dev/null +++ b/testing/kuttl/e2e/delete/10-assert.yaml @@ -0,0 +1,7 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +collectors: +- type: command + command: kubectl -n $NAMESPACE describe pods --selector postgres-operator.crunchydata.com/cluster=delete-with-replica +- namespace: $NAMESPACE + selector: postgres-operator.crunchydata.com/cluster=delete-with-replica diff --git a/testing/kuttl/e2e/delete/10-create-cluster-with-replicas.yaml b/testing/kuttl/e2e/delete/10-create-cluster-with-replicas.yaml new file mode 100644 index 0000000000..678a09c710 --- /dev/null +++ b/testing/kuttl/e2e/delete/10-create-cluster-with-replicas.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/10-create-cluster-with-replicas.yaml +assert: +- files/10-cluster-with-replicas-created.yaml diff --git a/testing/kuttl/e2e/delete/11-delete-cluster-with-replicas.yaml b/testing/kuttl/e2e/delete/11-delete-cluster-with-replicas.yaml new file mode 100644 index 0000000000..b2f04ea7ed --- /dev/null +++ b/testing/kuttl/e2e/delete/11-delete-cluster-with-replicas.yaml @@ -0,0 +1,10 @@ +--- +# Remove the cluster. +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +delete: + - apiVersion: postgres-operator.crunchydata.com/v1beta1 + kind: PostgresCluster + name: delete-with-replica +error: +- files/11-cluster-with-replicas-deleted.yaml diff --git a/testing/kuttl/e2e/delete/20-assert.yaml b/testing/kuttl/e2e/delete/20-assert.yaml new file mode 100644 index 0000000000..d85d96101f --- /dev/null +++ b/testing/kuttl/e2e/delete/20-assert.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +collectors: +- type: command + command: kubectl -n $NAMESPACE describe pods --selector postgres-operator.crunchydata.com/cluster=delete-not-running +# This shouldn't be running, so skip logs; if there's an error, we'll be able to see it in the describe diff --git a/testing/kuttl/e2e/delete/20-create-broken-cluster.yaml b/testing/kuttl/e2e/delete/20-create-broken-cluster.yaml new file mode 100644 index 0000000000..9db684036e --- /dev/null +++ b/testing/kuttl/e2e/delete/20-create-broken-cluster.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/20-create-broken-cluster.yaml +error: +- files/20-broken-cluster-not-created.yaml diff --git a/testing/kuttl/e2e/delete/21-delete-broken-cluster.yaml b/testing/kuttl/e2e/delete/21-delete-broken-cluster.yaml new file mode 100644 index 0000000000..3e159f17d4 --- /dev/null +++ b/testing/kuttl/e2e/delete/21-delete-broken-cluster.yaml @@ -0,0 +1,10 @@ +--- +# Remove the cluster. +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +delete: + - apiVersion: postgres-operator.crunchydata.com/v1beta1 + kind: PostgresCluster + name: delete-not-running +error: +- files/21-broken-cluster-deleted.yaml diff --git a/testing/kuttl/e2e/delete/README.md b/testing/kuttl/e2e/delete/README.md new file mode 100644 index 0000000000..7e99680162 --- /dev/null +++ b/testing/kuttl/e2e/delete/README.md @@ -0,0 +1,19 @@ +### Delete test + +#### Regular cluster delete (00-01) + +* Start a regular cluster +* Delete it +* Check that nothing remains. + +#### Delete cluster with replica (10-11) + +* Start a regular cluster with 2 replicas +* Delete it +* Check that nothing remains + +#### Delete a cluster that never started (20-21) + +* Start a cluster with a bad image +* Delete it +* Check that nothing remains diff --git a/testing/kuttl/e2e/delete/files/00-cluster-created.yaml b/testing/kuttl/e2e/delete/files/00-cluster-created.yaml new file mode 100644 index 0000000000..6130475c07 --- /dev/null +++ b/testing/kuttl/e2e/delete/files/00-cluster-created.yaml @@ -0,0 +1,20 @@ +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: delete +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 +--- +apiVersion: batch/v1 +kind: Job +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete + postgres-operator.crunchydata.com/pgbackrest-backup: replica-create +status: + succeeded: 1 diff --git a/testing/kuttl/e2e/delete/files/00-create-cluster.yaml b/testing/kuttl/e2e/delete/files/00-create-cluster.yaml new file mode 100644 index 0000000000..0dbcb08204 --- /dev/null +++ b/testing/kuttl/e2e/delete/files/00-create-cluster.yaml @@ -0,0 +1,27 @@ +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: delete +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + replicas: 1 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi diff --git a/testing/kuttl/e2e/delete/files/01-cluster-deleted.yaml b/testing/kuttl/e2e/delete/files/01-cluster-deleted.yaml new file mode 100644 index 0000000000..091bc96b7b --- /dev/null +++ b/testing/kuttl/e2e/delete/files/01-cluster-deleted.yaml @@ -0,0 +1,42 @@ +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: delete +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete +--- +# Patroni DCS objects are not owned by the PostgresCluster. +apiVersion: v1 +kind: Endpoints +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete +--- +apiVersion: v1 +kind: Service +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete +--- +apiVersion: v1 +kind: Secret +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete +--- +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete diff --git a/testing/kuttl/e2e/delete/files/10-cluster-with-replicas-created.yaml b/testing/kuttl/e2e/delete/files/10-cluster-with-replicas-created.yaml new file mode 100644 index 0000000000..1940fc680a --- /dev/null +++ b/testing/kuttl/e2e/delete/files/10-cluster-with-replicas-created.yaml @@ -0,0 +1,36 @@ +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: delete-with-replica +status: + instances: + - name: instance1 + readyReplicas: 2 + replicas: 2 + updatedReplicas: 2 +--- +# Patroni labels and readiness happen separately. +# The next step expects to find pods by their role label; wait for them here. +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete-with-replica + postgres-operator.crunchydata.com/role: master +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete-with-replica + postgres-operator.crunchydata.com/role: replica +--- +apiVersion: batch/v1 +kind: Job +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete-with-replica + postgres-operator.crunchydata.com/pgbackrest-backup: replica-create +status: + succeeded: 1 diff --git a/testing/kuttl/e2e/delete/files/10-create-cluster-with-replicas.yaml b/testing/kuttl/e2e/delete/files/10-create-cluster-with-replicas.yaml new file mode 100644 index 0000000000..53c4fc434d --- /dev/null +++ b/testing/kuttl/e2e/delete/files/10-create-cluster-with-replicas.yaml @@ -0,0 +1,29 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: delete-with-replica +spec: + postgresVersion: ${KUTTL_PG_VERSION} + patroni: + switchover: + enabled: true + instances: + - name: instance1 + replicas: 2 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi diff --git a/testing/kuttl/e2e/delete/files/11-cluster-with-replicas-deleted.yaml b/testing/kuttl/e2e/delete/files/11-cluster-with-replicas-deleted.yaml new file mode 100644 index 0000000000..cc14b60d3d --- /dev/null +++ b/testing/kuttl/e2e/delete/files/11-cluster-with-replicas-deleted.yaml @@ -0,0 +1,42 @@ +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: delete-with-replica +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete-with-replica +--- +# Patroni DCS objects are not owned by the PostgresCluster. +apiVersion: v1 +kind: Endpoints +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete-with-replica +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete-with-replica +--- +apiVersion: v1 +kind: Service +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete-with-replica +--- +apiVersion: v1 +kind: Secret +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete-with-replica +--- +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete-with-replica diff --git a/testing/kuttl/e2e/delete/files/20-broken-cluster-not-created.yaml b/testing/kuttl/e2e/delete/files/20-broken-cluster-not-created.yaml new file mode 100644 index 0000000000..f910fa9811 --- /dev/null +++ b/testing/kuttl/e2e/delete/files/20-broken-cluster-not-created.yaml @@ -0,0 +1,10 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: delete-not-running +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 diff --git a/testing/kuttl/e2e/delete/files/20-create-broken-cluster.yaml b/testing/kuttl/e2e/delete/files/20-create-broken-cluster.yaml new file mode 100644 index 0000000000..2b7d34f3f6 --- /dev/null +++ b/testing/kuttl/e2e/delete/files/20-create-broken-cluster.yaml @@ -0,0 +1,27 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: delete-not-running +spec: + postgresVersion: ${KUTTL_PG_VERSION} + image: "example.com/does-not-exist" + instances: + - name: instance1 + replicas: 1 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi diff --git a/testing/kuttl/e2e/delete/files/21-broken-cluster-deleted.yaml b/testing/kuttl/e2e/delete/files/21-broken-cluster-deleted.yaml new file mode 100644 index 0000000000..4527a3659d --- /dev/null +++ b/testing/kuttl/e2e/delete/files/21-broken-cluster-deleted.yaml @@ -0,0 +1,42 @@ +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: delete-not-running +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete-not-running +--- +# Patroni DCS objects are not owned by the PostgresCluster. +apiVersion: v1 +kind: Endpoints +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete-not-running +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete-not-running +--- +apiVersion: v1 +kind: Service +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete-not-running +--- +apiVersion: v1 +kind: Secret +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete-not-running +--- +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete-not-running diff --git a/testing/kuttl/e2e/exporter-custom-queries/00--create-cluster.yaml b/testing/kuttl/e2e/exporter-custom-queries/00--create-cluster.yaml new file mode 100644 index 0000000000..975567b066 --- /dev/null +++ b/testing/kuttl/e2e/exporter-custom-queries/00--create-cluster.yaml @@ -0,0 +1,7 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/exporter-custom-queries-configmap.yaml +- files/exporter-custom-queries-cluster.yaml +assert: +- files/exporter-custom-queries-cluster-checks.yaml diff --git a/testing/kuttl/e2e/exporter-custom-queries/00-assert.yaml b/testing/kuttl/e2e/exporter-custom-queries/00-assert.yaml new file mode 100644 index 0000000000..bbf5c051fd --- /dev/null +++ b/testing/kuttl/e2e/exporter-custom-queries/00-assert.yaml @@ -0,0 +1,54 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +commands: +# First, check that all containers in the instance pod are ready +# Then, list the query files mounted to the exporter and check for expected files +# Then, check the contents of the queries to ensure queries.yml was generated correctly +# Finally, store the current exporter pid as an annotation +- script: | + retry() { bash -ceu 'printf "$1\nSleeping...\n" && sleep 5' - "$@"; } + check_containers_ready() { bash -ceu 'echo "$1" | jq -e ".[] | select(.type==\"ContainersReady\") | .status==\"True\""' - "$@"; } + contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } + + pod=$(kubectl get pods -o name -n "${NAMESPACE}" \ + -l postgres-operator.crunchydata.com/cluster=exporter-custom-queries \ + -l postgres-operator.crunchydata.com/crunchy-postgres-exporter=true) + [ "$pod" = "" ] && retry "Pod not found" && exit 1 + + condition_json=$(kubectl get "${pod}" -n "${NAMESPACE}" -o jsonpath="{.status.conditions}") + [ "$condition_json" = "" ] && retry "conditions not found" && exit 1 + { check_containers_ready "$condition_json"; } || { + retry "containers not ready" + exit 1 + } + + queries_files=$( + kubectl exec --namespace "${NAMESPACE}" "${pod}" -c exporter \ + -- ls /conf + ) + + { + contains "${queries_files}" "queries.yml" && + !(contains "${queries_files}" "defaultQueries.yml") + } || { + echo >&2 'The /conf directory should contain the queries.yml file. Instead it has:' + echo "${queries_files}" + exit 1 + } + + master_queries_contents=$( + kubectl exec --namespace "${NAMESPACE}" "${pod}" -c exporter \ + -- cat /tmp/queries.yml + ) + + { + contains "${master_queries_contents}" "# This is a test." && + !(contains "${master_queries_contents}" "ccp_postgresql_version") + } || { + echo >&2 'The master queries.yml file should only contain the contents of the custom queries.yml file. Instead it contains:' + echo "${master_queries_contents}" + exit 1 + } + + pid=$(kubectl exec ${pod} -n ${NAMESPACE} -c exporter -- cat /tmp/postgres_exporter.pid) + kubectl annotate --overwrite -n ${NAMESPACE} ${pod} oldpid=${pid} diff --git a/testing/kuttl/e2e/exporter-custom-queries/01--change-custom-queries.yaml b/testing/kuttl/e2e/exporter-custom-queries/01--change-custom-queries.yaml new file mode 100644 index 0000000000..7a28d431d1 --- /dev/null +++ b/testing/kuttl/e2e/exporter-custom-queries/01--change-custom-queries.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/exporter-custom-queries-configmap-update.yaml +assert: +- files/exporter-custom-queries-configmap-update-checks.yaml diff --git a/testing/kuttl/e2e/exporter-custom-queries/01-assert.yaml b/testing/kuttl/e2e/exporter-custom-queries/01-assert.yaml new file mode 100644 index 0000000000..db5a4757cb --- /dev/null +++ b/testing/kuttl/e2e/exporter-custom-queries/01-assert.yaml @@ -0,0 +1,33 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +commands: +# First, check that all containers in the instance pod are ready +# Then, check that the exporter pid has changed +# Finally, check the contents of the queries to ensure queries.yml was generated correctly +- script: | + retry() { bash -ceu 'printf "$1\nSleeping...\n" && sleep 5' - "$@"; } + check_containers_ready() { bash -ceu 'echo "$1" | jq -e ".[] | select(.type==\"ContainersReady\") | .status==\"True\""' - "$@"; } + contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } + + pod=$(kubectl get pods -o name -n "${NAMESPACE}" \ + -l postgres-operator.crunchydata.com/cluster=exporter-custom-queries \ + -l postgres-operator.crunchydata.com/crunchy-postgres-exporter=true) + [ "$pod" = "" ] && retry "Pod not found" && exit 1 + + oldPid=$(kubectl get ${pod} -n ${NAMESPACE} -o jsonpath="{.metadata.annotations.oldpid}") + newPid=$(kubectl exec ${pod} -n ${NAMESPACE} -c exporter -- cat /tmp/postgres_exporter.pid) + [ "${oldPid}" -eq "${newPid}" ] && retry "pid should have changed" && exit 1 + + master_queries_contents=$( + kubectl exec --namespace "${NAMESPACE}" "${pod}" -c exporter \ + -- cat /tmp/queries.yml + ) + + { + contains "${master_queries_contents}" "# This is a different test." && + !(contains "${master_queries_contents}" "ccp_postgresql_version") + } || { + echo >&2 'The master queries.yml file should only contain the contents of the custom queries.yml file. Instead it contains:' + echo "${master_queries_contents}" + exit 1 + } diff --git a/testing/kuttl/e2e/exporter-custom-queries/README.md b/testing/kuttl/e2e/exporter-custom-queries/README.md new file mode 100644 index 0000000000..801b6d02a8 --- /dev/null +++ b/testing/kuttl/e2e/exporter-custom-queries/README.md @@ -0,0 +1,3 @@ +# Exporter + +**Note**: This series of tests depends on PGO being deployed with the `AppendCustomQueries` feature gate OFF. There is a separate set of tests in `e2e-other` that tests the `AppendCustomQueries` functionality. diff --git a/testing/kuttl/e2e/exporter-custom-queries/files/exporter-custom-queries-cluster-checks.yaml b/testing/kuttl/e2e/exporter-custom-queries/files/exporter-custom-queries-cluster-checks.yaml new file mode 100644 index 0000000000..ed6fd22b7c --- /dev/null +++ b/testing/kuttl/e2e/exporter-custom-queries/files/exporter-custom-queries-cluster-checks.yaml @@ -0,0 +1,31 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: exporter-custom-queries +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: exporter-custom-queries + postgres-operator.crunchydata.com/crunchy-postgres-exporter: "true" +status: + phase: Running +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: exporter-custom-queries-exporter-queries-config +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: custom-queries-test +data: + queries.yml: "# This is a test." diff --git a/testing/kuttl/e2e/exporter-custom-queries/files/exporter-custom-queries-cluster.yaml b/testing/kuttl/e2e/exporter-custom-queries/files/exporter-custom-queries-cluster.yaml new file mode 100644 index 0000000000..5356b83be9 --- /dev/null +++ b/testing/kuttl/e2e/exporter-custom-queries/files/exporter-custom-queries-cluster.yaml @@ -0,0 +1,15 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: exporter-custom-queries +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + monitoring: + pgmonitor: + exporter: + configuration: + - configMap: + name: custom-queries-test diff --git a/testing/kuttl/e2e/exporter-custom-queries/files/exporter-custom-queries-configmap-update-checks.yaml b/testing/kuttl/e2e/exporter-custom-queries/files/exporter-custom-queries-configmap-update-checks.yaml new file mode 100644 index 0000000000..72af1103af --- /dev/null +++ b/testing/kuttl/e2e/exporter-custom-queries/files/exporter-custom-queries-configmap-update-checks.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: custom-queries-test +data: + queries.yml: "# This is a different test." diff --git a/testing/kuttl/e2e/exporter-custom-queries/files/exporter-custom-queries-configmap-update.yaml b/testing/kuttl/e2e/exporter-custom-queries/files/exporter-custom-queries-configmap-update.yaml new file mode 100644 index 0000000000..72af1103af --- /dev/null +++ b/testing/kuttl/e2e/exporter-custom-queries/files/exporter-custom-queries-configmap-update.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: custom-queries-test +data: + queries.yml: "# This is a different test." diff --git a/testing/kuttl/e2e/exporter-custom-queries/files/exporter-custom-queries-configmap.yaml b/testing/kuttl/e2e/exporter-custom-queries/files/exporter-custom-queries-configmap.yaml new file mode 100644 index 0000000000..9964d6bc1e --- /dev/null +++ b/testing/kuttl/e2e/exporter-custom-queries/files/exporter-custom-queries-configmap.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: custom-queries-test +data: + queries.yml: "# This is a test." diff --git a/testing/kuttl/e2e/exporter-no-tls/00--create-cluster.yaml b/testing/kuttl/e2e/exporter-no-tls/00--create-cluster.yaml new file mode 100644 index 0000000000..8209623cf8 --- /dev/null +++ b/testing/kuttl/e2e/exporter-no-tls/00--create-cluster.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/exporter-no-tls-cluster.yaml +assert: +- files/exporter-no-tls-cluster-checks.yaml diff --git a/testing/kuttl/e2e/exporter-no-tls/00-assert.yaml b/testing/kuttl/e2e/exporter-no-tls/00-assert.yaml new file mode 100644 index 0000000000..c6bbea051b --- /dev/null +++ b/testing/kuttl/e2e/exporter-no-tls/00-assert.yaml @@ -0,0 +1,47 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +commands: +# First, check that all containers in the instance pod are ready +# Then, check the exporter logs for the 'TLS is disabled' line +# Then, grab the exporter metrics output and check that there were no scrape errors +# Finally, ensure the monitoring user exists and is configured +- script: | + retry() { bash -ceu 'printf "$1\nSleeping...\n" && sleep 5' - "$@"; } + check_containers_ready() { bash -ceu 'echo "$1" | jq -e ".[] | select(.type==\"ContainersReady\") | .status==\"True\""' - "$@"; } + contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } + + pod=$(kubectl get pods -o name -n "${NAMESPACE}" \ + -l postgres-operator.crunchydata.com/cluster=exporter-no-tls \ + -l postgres-operator.crunchydata.com/crunchy-postgres-exporter=true) + [ "$pod" = "" ] && retry "Pod not found" && exit 1 + + condition_json=$(kubectl get "${pod}" -n "${NAMESPACE}" -o jsonpath="{.status.conditions}") + [ "$condition_json" = "" ] && retry "conditions not found" && exit 1 + { check_containers_ready "$condition_json"; } || { + retry "containers not ready" + exit 1 + } + + logs=$(kubectl logs "${pod}" --namespace "${NAMESPACE}" -c exporter) + { contains "${logs}" 'TLS is disabled'; } || { + echo 'tls is not disabled - it should be' + exit 1 + } + + scrape_metrics=$(kubectl exec "${pod}" -c exporter -n "${NAMESPACE}" -- \ + curl --silent http://localhost:9187/metrics | grep "pg_exporter_last_scrape_error") + { contains "${scrape_metrics}" 'pg_exporter_last_scrape_error 0'; } || { + retry "${scrape_metrics}" + exit 1 + } + + kubectl exec --stdin "${pod}" --namespace "${NAMESPACE}" -c database \ + -- psql -qb --set ON_ERROR_STOP=1 --file=- <<'SQL' + DO $$ + DECLARE + result record; + BEGIN + SELECT * INTO result FROM pg_catalog.pg_roles WHERE rolname = 'ccp_monitoring'; + ASSERT FOUND, 'user not found'; + END $$ + SQL diff --git a/testing/kuttl/e2e/exporter-no-tls/files/exporter-no-tls-cluster-checks.yaml b/testing/kuttl/e2e/exporter-no-tls/files/exporter-no-tls-cluster-checks.yaml new file mode 100644 index 0000000000..eab02c6888 --- /dev/null +++ b/testing/kuttl/e2e/exporter-no-tls/files/exporter-no-tls-cluster-checks.yaml @@ -0,0 +1,24 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: exporter-no-tls +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: exporter-no-tls + postgres-operator.crunchydata.com/crunchy-postgres-exporter: "true" +status: + phase: Running +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: exporter-no-tls-exporter-queries-config diff --git a/testing/kuttl/e2e/exporter-no-tls/files/exporter-no-tls-cluster.yaml b/testing/kuttl/e2e/exporter-no-tls/files/exporter-no-tls-cluster.yaml new file mode 100644 index 0000000000..690d5b505d --- /dev/null +++ b/testing/kuttl/e2e/exporter-no-tls/files/exporter-no-tls-cluster.yaml @@ -0,0 +1,12 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: exporter-no-tls +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + monitoring: + pgmonitor: + exporter: {} diff --git a/testing/kuttl/e2e/exporter-password-change/00--create-cluster.yaml b/testing/kuttl/e2e/exporter-password-change/00--create-cluster.yaml new file mode 100644 index 0000000000..4c60626fa5 --- /dev/null +++ b/testing/kuttl/e2e/exporter-password-change/00--create-cluster.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/initial-postgrescluster.yaml +assert: +- files/initial-postgrescluster-checks.yaml diff --git a/testing/kuttl/e2e/exporter-password-change/00-assert.yaml b/testing/kuttl/e2e/exporter-password-change/00-assert.yaml new file mode 100644 index 0000000000..df2a331f10 --- /dev/null +++ b/testing/kuttl/e2e/exporter-password-change/00-assert.yaml @@ -0,0 +1,22 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +commands: +# Check that all containers in the instance pod are ready +- script: | + retry() { bash -ceu 'printf "$1\nSleeping...\n" && sleep 5' - "$@"; } + check_containers_ready() { bash -ceu 'echo "$1" | jq -e ".[] | select(.type==\"ContainersReady\") | .status==\"True\""' - "$@"; } + + pod=$(kubectl get pods -o name -n $NAMESPACE \ + -l postgres-operator.crunchydata.com/cluster=exporter-password-change \ + -l postgres-operator.crunchydata.com/crunchy-postgres-exporter=true) + [ "$pod" = "" ] && retry "Pod not found" && exit 1 + + condition_json=$(kubectl get ${pod} -n ${NAMESPACE} -o jsonpath="{.status.conditions}") + [ "$condition_json" = "" ] && retry "conditions not found" && exit 1 + { check_containers_ready "$condition_json"; } || { + retry "containers not ready" + exit 1 + } +collectors: +- type: command + command: kubectl -n $NAMESPACE describe pods --selector postgres-operator.crunchydata.com/cluster=exporter-password-change,postgres-operator.crunchydata.com/crunchy-postgres-exporter=true diff --git a/testing/kuttl/e2e/exporter-password-change/01-assert.yaml b/testing/kuttl/e2e/exporter-password-change/01-assert.yaml new file mode 100644 index 0000000000..c3b25bd16c --- /dev/null +++ b/testing/kuttl/e2e/exporter-password-change/01-assert.yaml @@ -0,0 +1,27 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +commands: +# Grab the exporter metrics output and check that there were no scrape errors +# Store the exporter pid as an annotation on the pod +- script: | + retry() { bash -ceu 'printf "$1\nSleeping...\n" && sleep 5' - "$@"; } + contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } + + pod=$(kubectl get pods -o name -n $NAMESPACE \ + -l postgres-operator.crunchydata.com/cluster=exporter-password-change \ + -l postgres-operator.crunchydata.com/crunchy-postgres-exporter=true) + [ "$pod" = "" ] && retry "Pod not found" && exit 1 + + scrape_metrics=$(kubectl exec ${pod} -c exporter -n ${NAMESPACE} -- \ + curl --silent http://localhost:9187/metrics | grep "pg_exporter_last_scrape_error") + { contains "${scrape_metrics}" 'pg_exporter_last_scrape_error 0'; } || { + retry "${scrape_metrics}" + exit 1 + } + + pid=$(kubectl exec ${pod} -n ${NAMESPACE} -c exporter -- cat /tmp/postgres_exporter.pid) + kubectl annotate --overwrite -n ${NAMESPACE} ${pod} oldpid=${pid} +collectors: +- type: pod + selector: "postgres-operator.crunchydata.com/cluster=exporter-password-change,postgres-operator.crunchydata.com/crunchy-postgres-exporter=true" + container: exporter diff --git a/testing/kuttl/e2e/exporter-password-change/02--change-password.yaml b/testing/kuttl/e2e/exporter-password-change/02--change-password.yaml new file mode 100644 index 0000000000..e16e473f62 --- /dev/null +++ b/testing/kuttl/e2e/exporter-password-change/02--change-password.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/update-monitoring-password.yaml +assert: +- files/update-monitoring-password-checks.yaml diff --git a/testing/kuttl/e2e/exporter-password-change/02-assert.yaml b/testing/kuttl/e2e/exporter-password-change/02-assert.yaml new file mode 100644 index 0000000000..a06b350cdc --- /dev/null +++ b/testing/kuttl/e2e/exporter-password-change/02-assert.yaml @@ -0,0 +1,34 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +commands: +# Ensure that the password has been updated in the exporter and it can still access +# Postgres. +# - Check that the exporter pid has changed meaning the current process should have the correct password +# - Check that the DATA_SOURCE_PASS_FILE contains the expected password (`password`) +# - Grab the scrape_error output from exporter metrics and check that there were no scrape errors +- script: | + retry() { bash -ceu 'printf "$1\nSleeping...\n" && sleep 5' - "$@"; } + contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } + check_containers_ready() { bash -ceu 'echo "$1" | jq -e ".[] | select(.type==\"ContainersReady\") | .status==\"True\""' - "$@";} + + pod=$(kubectl get pods -o name -n $NAMESPACE \ + -l postgres-operator.crunchydata.com/cluster=exporter-password-change \ + -l postgres-operator.crunchydata.com/crunchy-postgres-exporter=true) + [ "$pod" = "" ] && retry "Pod not found" && exit 1 + + oldPid=$(kubectl get ${pod} -n ${NAMESPACE} -o jsonpath="{.metadata.annotations.oldpid}") + newPid=$(kubectl exec ${pod} -n ${NAMESPACE} -c exporter -- cat /tmp/postgres_exporter.pid) + [ "${oldPid}" -eq "${newPid}" ] && retry "pid should have changed" && exit 1 + + password=$(kubectl exec -n ${NAMESPACE} ${pod} -c exporter -- bash -c 'cat /opt/crunchy/password') + { contains "${password}" "password"; } || { + retry "unexpected password: ${password}" + exit 1 + } + + scrape_metrics=$(kubectl exec ${pod} -c exporter -n ${NAMESPACE} -- \ + curl --silent http://localhost:9187/metrics | grep "pg_exporter_last_scrape_error") + { contains "${scrape_metrics}" 'pg_exporter_last_scrape_error 0'; } || { + retry "${scrape_metrics}" + exit 1 + } diff --git a/testing/kuttl/e2e/exporter-password-change/README.md b/testing/kuttl/e2e/exporter-password-change/README.md new file mode 100644 index 0000000000..2a5b596309 --- /dev/null +++ b/testing/kuttl/e2e/exporter-password-change/README.md @@ -0,0 +1,36 @@ +# Exporter Password Change + +## 00--create-cluster: +The TestStep will: + +1) Apply the `files/inital-postgrescluster.yaml` file to create a cluster with monitoring enabled +2) Assert that conditions outlined in `files/initial-postgrescluster-checks.yaml` are met + - PostgresCluster exists with a single ready replica + - A pod with `cluster` and `crunchy-postgres-exporter` labels has the status `{phase: Running}` + - A `-monitoring` secret exists with correct labels and ownerReferences + +## 00-assert: + +This TestAssert will loop through a script until: +1) the instance pod has the `ContainersReady` condition with status `true` +2) the asserts from `00--create-cluster` are met. + +## 01-assert: + +This TestAssert will loop through a script until: +1) The metrics endpoint returns `pg_exporter_last_scrape_error 0` meaning the exporter was able to access postgres metrics +2) It is able to store the pid of the running postgres_exporter process + +## 02-change-password: + +This TestStep will: +1) Apply the `files/update-monitoring-password.yaml` file to set the monitoring password to `password` +2) Assert that conditions outlined in `files/update-monitoring-password-checks.yaml` are met + - A `-monitoring` secret exists with `data.password` set to the encoded value for `password` + +## 02-assert: + +This TestAssert will loop through a script until: +1) An exec command can confirm that `/opt/crunchy/password` file contains the updated password +2) It can confirm that the pid of the postgres_exporter process has changed +3) The metrics endpoint returns `pg_exporter_last_scrape_error 0` meaning the exporter was able to access postgres metrics using the updated password diff --git a/testing/kuttl/e2e/exporter-password-change/files/check-restarted-pod.yaml b/testing/kuttl/e2e/exporter-password-change/files/check-restarted-pod.yaml new file mode 100644 index 0000000000..012dafa41c --- /dev/null +++ b/testing/kuttl/e2e/exporter-password-change/files/check-restarted-pod.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: exporter-password-change + postgres-operator.crunchydata.com/crunchy-postgres-exporter: "true" +status: + phase: Running diff --git a/testing/kuttl/e2e/exporter-password-change/files/initial-postgrescluster-checks.yaml b/testing/kuttl/e2e/exporter-password-change/files/initial-postgrescluster-checks.yaml new file mode 100644 index 0000000000..19887a0e10 --- /dev/null +++ b/testing/kuttl/e2e/exporter-password-change/files/initial-postgrescluster-checks.yaml @@ -0,0 +1,33 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: exporter-password-change +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: exporter-password-change + postgres-operator.crunchydata.com/crunchy-postgres-exporter: "true" +status: + phase: Running +--- +apiVersion: v1 +kind: Secret +metadata: + name: exporter-password-change-monitoring + labels: + postgres-operator.crunchydata.com/cluster: exporter-password-change + postgres-operator.crunchydata.com/role: monitoring + ownerReferences: + - apiVersion: postgres-operator.crunchydata.com/v1beta1 + blockOwnerDeletion: true + controller: true + kind: PostgresCluster + name: exporter-password-change diff --git a/testing/kuttl/e2e/exporter-password-change/files/initial-postgrescluster.yaml b/testing/kuttl/e2e/exporter-password-change/files/initial-postgrescluster.yaml new file mode 100644 index 0000000000..d16c898ac2 --- /dev/null +++ b/testing/kuttl/e2e/exporter-password-change/files/initial-postgrescluster.yaml @@ -0,0 +1,12 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: exporter-password-change +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + monitoring: + pgmonitor: + exporter: {} diff --git a/testing/kuttl/e2e/exporter-password-change/files/update-monitoring-password-checks.yaml b/testing/kuttl/e2e/exporter-password-change/files/update-monitoring-password-checks.yaml new file mode 100644 index 0000000000..dcf1703861 --- /dev/null +++ b/testing/kuttl/e2e/exporter-password-change/files/update-monitoring-password-checks.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Secret +metadata: + name: exporter-password-change-monitoring + labels: + postgres-operator.crunchydata.com/cluster: exporter-password-change + postgres-operator.crunchydata.com/role: monitoring + ownerReferences: + - apiVersion: postgres-operator.crunchydata.com/v1beta1 + blockOwnerDeletion: true + controller: true + kind: PostgresCluster + name: exporter-password-change +data: + # ensure the password is encoded to 'password' + password: cGFzc3dvcmQ= diff --git a/testing/kuttl/e2e/exporter-password-change/files/update-monitoring-password.yaml b/testing/kuttl/e2e/exporter-password-change/files/update-monitoring-password.yaml new file mode 100644 index 0000000000..7832c89f69 --- /dev/null +++ b/testing/kuttl/e2e/exporter-password-change/files/update-monitoring-password.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Secret +metadata: + name: exporter-password-change-monitoring + labels: + postgres-operator.crunchydata.com/cluster: exporter-password-change + postgres-operator.crunchydata.com/role: monitoring +stringData: + password: password +data: +# Ensure data field is deleted so that password/verifier will be regenerated diff --git a/testing/kuttl/e2e/exporter-tls/00--create-cluster.yaml b/testing/kuttl/e2e/exporter-tls/00--create-cluster.yaml new file mode 100644 index 0000000000..fbb92cbf0e --- /dev/null +++ b/testing/kuttl/e2e/exporter-tls/00--create-cluster.yaml @@ -0,0 +1,7 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/exporter-tls-certs.yaml +- files/exporter-tls-cluster.yaml +assert: +- files/exporter-tls-cluster-checks.yaml diff --git a/testing/kuttl/e2e/exporter-tls/00-assert.yaml b/testing/kuttl/e2e/exporter-tls/00-assert.yaml new file mode 100644 index 0000000000..9ea53266c9 --- /dev/null +++ b/testing/kuttl/e2e/exporter-tls/00-assert.yaml @@ -0,0 +1,48 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +commands: +# First, check that all containers in the instance pod are ready +# Then, grab the exporter metrics output and check that there were no scrape errors +# Then, check the exporter logs for the 'TLS is disabled' line +# Finally, ensure the monitoring user exists and is configured +- script: | + retry() { bash -ceu 'printf "$1\nSleeping...\n" && sleep 5' - "$@"; } + check_containers_ready() { bash -ceu 'echo "$1" | jq -e ".[] | select(.type==\"ContainersReady\") | .status==\"True\""' - "$@"; } + contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } + + pod=$(kubectl get pods -o name -n "${NAMESPACE}" \ + -l postgres-operator.crunchydata.com/cluster=exporter-tls \ + -l postgres-operator.crunchydata.com/crunchy-postgres-exporter=true) + [ "$pod" = "" ] && retry "Pod not found" && exit 1 + + condition_json=$(kubectl get "${pod}" -n "${NAMESPACE}" -o jsonpath="{.status.conditions}") + [ "$condition_json" = "" ] && retry "conditions not found" && exit 1 + { check_containers_ready "$condition_json"; } || { + retry "containers not ready" + exit 1 + } + + logs=$(kubectl logs "${pod}" --namespace "${NAMESPACE}" -c exporter) + { contains "${logs}" 'TLS is enabled'; } || { + echo >&2 'TLS is not enabled - it should be' + echo "${LOGS}" + exit 1 + } + + scrape_metrics=$(kubectl exec "${pod}" -c exporter -n "${NAMESPACE}" -- \ + curl --insecure --silent https://localhost:9187/metrics | grep "pg_exporter_last_scrape_error") + { contains "${scrape_metrics}" 'pg_exporter_last_scrape_error 0'; } || { + retry "${scrape_metrics}" + exit 1 + } + + kubectl exec --stdin "${pod}" --namespace "${NAMESPACE}" -c database \ + -- psql -qb --set ON_ERROR_STOP=1 --file=- <<'SQL' + DO $$ + DECLARE + result record; + BEGIN + SELECT * INTO result FROM pg_catalog.pg_roles WHERE rolname = 'ccp_monitoring'; + ASSERT FOUND, 'user not found'; + END $$ + SQL diff --git a/testing/kuttl/e2e/exporter-tls/files/exporter-tls-certs.yaml b/testing/kuttl/e2e/exporter-tls/files/exporter-tls-certs.yaml new file mode 100644 index 0000000000..1a1340a7b3 --- /dev/null +++ b/testing/kuttl/e2e/exporter-tls/files/exporter-tls-certs.yaml @@ -0,0 +1,12 @@ +# Generated certs using openssl +# openssl req -x509 -nodes -newkey ec -pkeyopt ec_paramgen_curve:prime256v1 \ +# -pkeyopt ec_param_enc:named_curve -sha384 -keyout ca.key -out ca.crt \ +# -days 365 -subj "/CN=*" +apiVersion: v1 +kind: Secret +metadata: + name: cluster-cert +type: Opaque +data: + tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJiakNDQVJPZ0F3SUJBZ0lVUUU3T0pqRDM5WHUvelZlenZQYjdSQ0ZTcE1Jd0NnWUlLb1pJemowRUF3TXcKRERFS01BZ0dBMVVFQXd3QktqQWVGdzB5TWpFd01USXhPRE14TURoYUZ3MHlNekV3TVRJeE9ETXhNRGhhTUF3eApDakFJQmdOVkJBTU1BU293V1RBVEJnY3Foa2pPUFFJQkJnZ3Foa2pPUFFNQkJ3TkNBQVJjaUYyckNlbmg4UFFLClZGUWJaRVcvWi9XUGgwZkk1aHhVb1ZkVVpuRTBTNGhCK1U3aGV5L3QvQVJNbDF3cXovazQ0cmlBa1g1ckFMakgKei9hTm16bnJvMU13VVRBZEJnTlZIUTRFRmdRVTQvUFc2MEdUcWFQdGpYWXdsMk56d0RGMFRmY3dId1lEVlIwagpCQmd3Rm9BVTQvUFc2MEdUcWFQdGpYWXdsMk56d0RGMFRmY3dEd1lEVlIwVEFRSC9CQVV3QXdFQi96QUtCZ2dxCmhrak9QUVFEQXdOSkFEQkdBaUVBbG9iemo3Uml4NkU0OW8yS2JjOUdtYlRSbWE1SVdGb0k4Uk1zcGZDQzVOUUMKSVFET0hzLzhLNVkxeWhoWDc3SGIxSUpsdnFaVVNjdm5NTjBXeS9JUWRuemJ4QT09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K + tls.key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JR0hBZ0VBTUJNR0J5cUdTTTQ5QWdFR0NDcUdTTTQ5QXdFSEJHMHdhd0lCQVFRZ1preDQ4cktidnZtUVRLSC8KSTN4STZzYW45Wk55MjQrOUQ4ODd5a2svb1l1aFJBTkNBQVJjaUYyckNlbmg4UFFLVkZRYlpFVy9aL1dQaDBmSQo1aHhVb1ZkVVpuRTBTNGhCK1U3aGV5L3QvQVJNbDF3cXovazQ0cmlBa1g1ckFMakh6L2FObXpucgotLS0tLUVORCBQUklWQVRFIEtFWS0tLS0tCg== diff --git a/testing/kuttl/e2e/exporter-tls/files/exporter-tls-cluster-checks.yaml b/testing/kuttl/e2e/exporter-tls/files/exporter-tls-cluster-checks.yaml new file mode 100644 index 0000000000..e192191fcd --- /dev/null +++ b/testing/kuttl/e2e/exporter-tls/files/exporter-tls-cluster-checks.yaml @@ -0,0 +1,29 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: exporter-tls +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: exporter-tls + postgres-operator.crunchydata.com/crunchy-postgres-exporter: "true" +status: + phase: Running +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: exporter-tls-exporter-queries-config +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: exporter-tls-exporter-web-config diff --git a/testing/kuttl/e2e/exporter-tls/files/exporter-tls-cluster.yaml b/testing/kuttl/e2e/exporter-tls/files/exporter-tls-cluster.yaml new file mode 100644 index 0000000000..4fa420664a --- /dev/null +++ b/testing/kuttl/e2e/exporter-tls/files/exporter-tls-cluster.yaml @@ -0,0 +1,14 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: exporter-tls +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + monitoring: + pgmonitor: + exporter: + customTLSSecret: + name: cluster-cert diff --git a/testing/kuttl/e2e/major-upgrade-missing-image/01--valid-upgrade.yaml b/testing/kuttl/e2e/major-upgrade-missing-image/01--valid-upgrade.yaml new file mode 100644 index 0000000000..741efead41 --- /dev/null +++ b/testing/kuttl/e2e/major-upgrade-missing-image/01--valid-upgrade.yaml @@ -0,0 +1,11 @@ +--- +# This upgrade is valid, but has no pgcluster to work on and should get that condition +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGUpgrade +metadata: + name: empty-image-upgrade +spec: + # postgres version that is no longer available + fromPostgresVersion: 11 + toPostgresVersion: ${KUTTL_PG_UPGRADE_TO_VERSION} + postgresClusterName: major-upgrade-empty-image diff --git a/testing/kuttl/e2e/major-upgrade-missing-image/01-assert.yaml b/testing/kuttl/e2e/major-upgrade-missing-image/01-assert.yaml new file mode 100644 index 0000000000..b7d0f936fb --- /dev/null +++ b/testing/kuttl/e2e/major-upgrade-missing-image/01-assert.yaml @@ -0,0 +1,10 @@ +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGUpgrade +metadata: + name: empty-image-upgrade +status: + conditions: + - type: "Progressing" + status: "False" + reason: "PGClusterNotFound" diff --git a/testing/kuttl/e2e/major-upgrade-missing-image/10--cluster.yaml b/testing/kuttl/e2e/major-upgrade-missing-image/10--cluster.yaml new file mode 100644 index 0000000000..f5ef8c029e --- /dev/null +++ b/testing/kuttl/e2e/major-upgrade-missing-image/10--cluster.yaml @@ -0,0 +1,23 @@ +--- +# Create the cluster we will do an actual upgrade on, but set the postgres version +# to '10' to force a missing image scenario +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: major-upgrade-empty-image +spec: + # postgres version that is no longer available + postgresVersion: 11 + patroni: + dynamicConfiguration: + postgresql: + parameters: + shared_preload_libraries: pgaudit, set_user, pg_stat_statements, pgnodemx, pg_cron + instances: + - dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } diff --git a/testing/kuttl/e2e/major-upgrade-missing-image/10-assert.yaml b/testing/kuttl/e2e/major-upgrade-missing-image/10-assert.yaml new file mode 100644 index 0000000000..72e9ff6387 --- /dev/null +++ b/testing/kuttl/e2e/major-upgrade-missing-image/10-assert.yaml @@ -0,0 +1,12 @@ +--- +# The cluster is not running due to the missing image, not due to a proper +# shutdown status. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGUpgrade +metadata: + name: empty-image-upgrade +status: + conditions: + - type: "Progressing" + status: "False" + reason: "PGClusterNotShutdown" diff --git a/testing/kuttl/e2e/major-upgrade-missing-image/11--shutdown-cluster.yaml b/testing/kuttl/e2e/major-upgrade-missing-image/11--shutdown-cluster.yaml new file mode 100644 index 0000000000..316f3a5472 --- /dev/null +++ b/testing/kuttl/e2e/major-upgrade-missing-image/11--shutdown-cluster.yaml @@ -0,0 +1,8 @@ +--- +# Shutdown the cluster -- but without the annotation. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: major-upgrade-empty-image +spec: + shutdown: true diff --git a/testing/kuttl/e2e/major-upgrade-missing-image/11-assert.yaml b/testing/kuttl/e2e/major-upgrade-missing-image/11-assert.yaml new file mode 100644 index 0000000000..5bd9d447cb --- /dev/null +++ b/testing/kuttl/e2e/major-upgrade-missing-image/11-assert.yaml @@ -0,0 +1,11 @@ +--- +# Since the cluster is missing the annotation, we get this condition +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGUpgrade +metadata: + name: empty-image-upgrade +status: + conditions: + - type: "Progressing" + status: "False" + reason: "PGClusterPrimaryNotIdentified" diff --git a/testing/kuttl/e2e/major-upgrade-missing-image/12--start-and-update-version.yaml b/testing/kuttl/e2e/major-upgrade-missing-image/12--start-and-update-version.yaml new file mode 100644 index 0000000000..fcdf4f62e3 --- /dev/null +++ b/testing/kuttl/e2e/major-upgrade-missing-image/12--start-and-update-version.yaml @@ -0,0 +1,17 @@ +--- +# Update the postgres version and restart the cluster. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: major-upgrade-empty-image +spec: + shutdown: false + postgresVersion: ${KUTTL_PG_UPGRADE_FROM_VERSION} +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGUpgrade +metadata: + name: empty-image-upgrade +spec: + # update postgres version + fromPostgresVersion: ${KUTTL_PG_UPGRADE_FROM_VERSION} diff --git a/testing/kuttl/e2e/major-upgrade-missing-image/12-assert.yaml b/testing/kuttl/e2e/major-upgrade-missing-image/12-assert.yaml new file mode 100644 index 0000000000..14c33cccfe --- /dev/null +++ b/testing/kuttl/e2e/major-upgrade-missing-image/12-assert.yaml @@ -0,0 +1,31 @@ +--- +# Wait for the instances to be ready and the replica backup to complete +# by waiting for the status to signal pods ready and pgbackrest stanza created +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: major-upgrade-empty-image +spec: + postgresVersion: ${KUTTL_PG_UPGRADE_FROM_VERSION} +status: + instances: + - name: '00' + replicas: 1 + readyReplicas: 1 + updatedReplicas: 1 + pgbackrest: + repos: + - name: repo1 + replicaCreateBackupComplete: true + stanzaCreated: true +--- +# Even when the cluster exists, the pgupgrade is not progressing because the cluster is not shutdown +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGUpgrade +metadata: + name: empty-image-upgrade +status: + conditions: + - type: "Progressing" + status: "False" + reason: "PGClusterNotShutdown" diff --git a/testing/kuttl/e2e/major-upgrade-missing-image/13--shutdown-cluster.yaml b/testing/kuttl/e2e/major-upgrade-missing-image/13--shutdown-cluster.yaml new file mode 100644 index 0000000000..316f3a5472 --- /dev/null +++ b/testing/kuttl/e2e/major-upgrade-missing-image/13--shutdown-cluster.yaml @@ -0,0 +1,8 @@ +--- +# Shutdown the cluster -- but without the annotation. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: major-upgrade-empty-image +spec: + shutdown: true diff --git a/testing/kuttl/e2e/major-upgrade-missing-image/13-assert.yaml b/testing/kuttl/e2e/major-upgrade-missing-image/13-assert.yaml new file mode 100644 index 0000000000..78e51e566a --- /dev/null +++ b/testing/kuttl/e2e/major-upgrade-missing-image/13-assert.yaml @@ -0,0 +1,11 @@ +--- +# Since the cluster is missing the annotation, we get this condition +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGUpgrade +metadata: + name: empty-image-upgrade +status: + conditions: + - type: "Progressing" + status: "False" + reason: "PGClusterMissingRequiredAnnotation" diff --git a/testing/kuttl/e2e/major-upgrade-missing-image/14--annotate-cluster.yaml b/testing/kuttl/e2e/major-upgrade-missing-image/14--annotate-cluster.yaml new file mode 100644 index 0000000000..2fa2c949a9 --- /dev/null +++ b/testing/kuttl/e2e/major-upgrade-missing-image/14--annotate-cluster.yaml @@ -0,0 +1,8 @@ +--- +# Annotate the cluster for an upgrade. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: major-upgrade-empty-image + annotations: + postgres-operator.crunchydata.com/allow-upgrade: empty-image-upgrade diff --git a/testing/kuttl/e2e/major-upgrade-missing-image/14-assert.yaml b/testing/kuttl/e2e/major-upgrade-missing-image/14-assert.yaml new file mode 100644 index 0000000000..bd828180f4 --- /dev/null +++ b/testing/kuttl/e2e/major-upgrade-missing-image/14-assert.yaml @@ -0,0 +1,22 @@ +--- +# Now that the postgres cluster is shut down and annotated, the pgupgrade +# can finish reconciling. We know the reconciliation is complete when +# the pgupgrade status is succeeded and the postgres cluster status +# has the updated version. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGUpgrade +metadata: + name: empty-image-upgrade +status: + conditions: + - type: "Progressing" + status: "False" + - type: "Succeeded" + status: "True" +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: major-upgrade-empty-image +status: + postgresVersion: ${KUTTL_PG_UPGRADE_TO_VERSION} diff --git a/testing/kuttl/e2e/major-upgrade-missing-image/15--start-cluster.yaml b/testing/kuttl/e2e/major-upgrade-missing-image/15--start-cluster.yaml new file mode 100644 index 0000000000..e5f270fb2f --- /dev/null +++ b/testing/kuttl/e2e/major-upgrade-missing-image/15--start-cluster.yaml @@ -0,0 +1,10 @@ +--- +# Once the pgupgrade is finished, update the version and set shutdown to false +# in the postgres cluster +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: major-upgrade-empty-image +spec: + postgresVersion: ${KUTTL_PG_UPGRADE_TO_VERSION} + shutdown: false diff --git a/testing/kuttl/e2e/major-upgrade-missing-image/15-assert.yaml b/testing/kuttl/e2e/major-upgrade-missing-image/15-assert.yaml new file mode 100644 index 0000000000..dfcbd4c819 --- /dev/null +++ b/testing/kuttl/e2e/major-upgrade-missing-image/15-assert.yaml @@ -0,0 +1,18 @@ +--- +# Wait for the instances to be ready with the target Postgres version. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: major-upgrade-empty-image +status: + postgresVersion: ${KUTTL_PG_UPGRADE_TO_VERSION} + instances: + - name: '00' + replicas: 1 + readyReplicas: 1 + updatedReplicas: 1 + pgbackrest: + repos: + - name: repo1 + replicaCreateBackupComplete: true + stanzaCreated: true diff --git a/testing/kuttl/e2e/major-upgrade-missing-image/16-check-pgbackrest.yaml b/testing/kuttl/e2e/major-upgrade-missing-image/16-check-pgbackrest.yaml new file mode 100644 index 0000000000..969e7f0ac3 --- /dev/null +++ b/testing/kuttl/e2e/major-upgrade-missing-image/16-check-pgbackrest.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: +# Check that the pgbackrest setup has successfully completed +- script: | + kubectl -n "${NAMESPACE}" exec "statefulset.apps/major-upgrade-empty-image-repo-host" -c pgbackrest -- pgbackrest check --stanza=db diff --git a/testing/kuttl/e2e/major-upgrade-missing-image/17--check-version.yaml b/testing/kuttl/e2e/major-upgrade-missing-image/17--check-version.yaml new file mode 100644 index 0000000000..5315c1d14f --- /dev/null +++ b/testing/kuttl/e2e/major-upgrade-missing-image/17--check-version.yaml @@ -0,0 +1,39 @@ +--- +# Check the version reported by PostgreSQL +apiVersion: batch/v1 +kind: Job +metadata: + name: major-upgrade-empty-image-after + labels: { postgres-operator-test: kuttl } +spec: + backoffLimit: 6 + template: + metadata: + labels: { postgres-operator-test: kuttl } + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + env: + - name: PGURI + valueFrom: { secretKeyRef: { name: major-upgrade-empty-image-pguser-major-upgrade-empty-image, key: uri } } + + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } + + # Note: the `$$$$` is reduced to `$$` by Kubernetes. + # - https://kubernetes.io/docs/tasks/inject-data-application/ + command: + - psql + - $(PGURI) + - --quiet + - --echo-errors + - --set=ON_ERROR_STOP=1 + - --command + - | + DO $$$$ + BEGIN + ASSERT current_setting('server_version_num') LIKE '${KUTTL_PG_UPGRADE_TO_VERSION}%', + format('got %L', current_setting('server_version_num')); + END $$$$; diff --git a/testing/kuttl/e2e/major-upgrade-missing-image/17-assert.yaml b/testing/kuttl/e2e/major-upgrade-missing-image/17-assert.yaml new file mode 100644 index 0000000000..56289c35c1 --- /dev/null +++ b/testing/kuttl/e2e/major-upgrade-missing-image/17-assert.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: major-upgrade-empty-image-after +status: + succeeded: 1 diff --git a/testing/kuttl/e2e/major-upgrade-missing-image/README.md b/testing/kuttl/e2e/major-upgrade-missing-image/README.md new file mode 100644 index 0000000000..1053da29ed --- /dev/null +++ b/testing/kuttl/e2e/major-upgrade-missing-image/README.md @@ -0,0 +1,36 @@ +## Major upgrade missing image tests + +This is a variation derived from our major upgrade KUTTL tests designed to +test scenarios where required container images are not defined in either the +PostgresCluster spec or via the RELATED_IMAGES environment variables. + +### Basic PGUpgrade controller and CRD instance validation + +* 01--valid-upgrade: create a valid PGUpgrade instance +* 01-assert: check that the PGUpgrade instance exists and has the expected status + +### Verify new statuses for missing required container images + +* 10--cluster: create the cluster with an unavailable image (i.e. Postgres 11) +* 10-assert: check that the PGUpgrade instance has the expected reason: "PGClusterNotShutdown" +* 11-shutdown-cluster: set the spec.shutdown value to 'true' as required for upgrade +* 11-assert: check that the new reason is set, "PGClusterPrimaryNotIdentified" + +### Update to an available Postgres version, start and upgrade PostgresCluster + +* 12--start-and-update-version: update the Postgres version on both CRD instances and set 'shutdown' to false +* 12-assert: verify that the cluster is running and the PGUpgrade instance now has the new status info with reason: "PGClusterNotShutdown" +* 13--shutdown-cluster: set spec.shutdown to 'true' +* 13-assert: check that the PGUpgrade instance has the expected reason: "PGClusterMissingRequiredAnnotation" +* 14--annotate-cluster: set the required annotation +* 14-assert: verify that the upgrade succeeded and the new Postgres version shows in the cluster's status +* 15--start-cluster: set the new Postgres version and spec.shutdown to 'false' + +### Verify upgraded PostgresCluster + +* 15-assert: verify that the cluster is running +* 16-check-pgbackrest: check that the pgbackrest setup has successfully completed +* 17--check-version: check the version reported by PostgreSQL +* 17-assert: assert the Job from the previous step succeeded + + diff --git a/testing/kuttl/e2e/major-upgrade/01--invalid-pgupgrade.yaml b/testing/kuttl/e2e/major-upgrade/01--invalid-pgupgrade.yaml new file mode 100644 index 0000000000..ea90f5718a --- /dev/null +++ b/testing/kuttl/e2e/major-upgrade/01--invalid-pgupgrade.yaml @@ -0,0 +1,10 @@ +--- +# This pgupgrade is invalid and should get that condition (even with no cluster) +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGUpgrade +metadata: + name: major-upgrade-do-it +spec: + fromPostgresVersion: ${KUTTL_PG_VERSION} + toPostgresVersion: ${KUTTL_PG_VERSION} + postgresClusterName: major-upgrade diff --git a/testing/kuttl/e2e/major-upgrade/01-assert.yaml b/testing/kuttl/e2e/major-upgrade/01-assert.yaml new file mode 100644 index 0000000000..f4cef66aa7 --- /dev/null +++ b/testing/kuttl/e2e/major-upgrade/01-assert.yaml @@ -0,0 +1,10 @@ +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGUpgrade +metadata: + name: major-upgrade-do-it +status: + conditions: + - type: "Progressing" + status: "False" + reason: "PGUpgradeInvalid" diff --git a/testing/kuttl/e2e/major-upgrade/02--valid-upgrade.yaml b/testing/kuttl/e2e/major-upgrade/02--valid-upgrade.yaml new file mode 100644 index 0000000000..f76ff06a9f --- /dev/null +++ b/testing/kuttl/e2e/major-upgrade/02--valid-upgrade.yaml @@ -0,0 +1,10 @@ +--- +# This upgrade is valid, but has no pgcluster to work on and should get that condition +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGUpgrade +metadata: + name: major-upgrade-do-it +spec: + fromPostgresVersion: ${KUTTL_PG_UPGRADE_FROM_VERSION} + toPostgresVersion: ${KUTTL_PG_UPGRADE_TO_VERSION} + postgresClusterName: major-upgrade diff --git a/testing/kuttl/e2e/major-upgrade/02-assert.yaml b/testing/kuttl/e2e/major-upgrade/02-assert.yaml new file mode 100644 index 0000000000..4df0ecc4d9 --- /dev/null +++ b/testing/kuttl/e2e/major-upgrade/02-assert.yaml @@ -0,0 +1,10 @@ +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGUpgrade +metadata: + name: major-upgrade-do-it +status: + conditions: + - type: "Progressing" + status: "False" + reason: "PGClusterNotFound" diff --git a/testing/kuttl/e2e/major-upgrade/10--already-updated-cluster.yaml b/testing/kuttl/e2e/major-upgrade/10--already-updated-cluster.yaml new file mode 100644 index 0000000000..0591645221 --- /dev/null +++ b/testing/kuttl/e2e/major-upgrade/10--already-updated-cluster.yaml @@ -0,0 +1,16 @@ +--- +# Create a cluster that is already at the correct version +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: major-upgrade +spec: + postgresVersion: ${KUTTL_PG_UPGRADE_TO_VERSION} + instances: + - dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } diff --git a/testing/kuttl/e2e/major-upgrade/10-assert.yaml b/testing/kuttl/e2e/major-upgrade/10-assert.yaml new file mode 100644 index 0000000000..202864ef09 --- /dev/null +++ b/testing/kuttl/e2e/major-upgrade/10-assert.yaml @@ -0,0 +1,11 @@ +--- +# pgupgrade should exit since the cluster is already at the requested version +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGUpgrade +metadata: + name: major-upgrade-do-it +status: + conditions: + - type: "Progressing" + status: "False" + reason: "PGUpgradeResolved" diff --git a/testing/kuttl/e2e/major-upgrade/11-delete-cluster.yaml b/testing/kuttl/e2e/major-upgrade/11-delete-cluster.yaml new file mode 100644 index 0000000000..14eab0efbb --- /dev/null +++ b/testing/kuttl/e2e/major-upgrade/11-delete-cluster.yaml @@ -0,0 +1,8 @@ +--- +# Delete the existing cluster. +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +delete: + - apiVersion: postgres-operator.crunchydata.com/v1beta1 + kind: PostgresCluster + name: major-upgrade diff --git a/testing/kuttl/e2e/major-upgrade/20--cluster-with-invalid-version.yaml b/testing/kuttl/e2e/major-upgrade/20--cluster-with-invalid-version.yaml new file mode 100644 index 0000000000..8d73277292 --- /dev/null +++ b/testing/kuttl/e2e/major-upgrade/20--cluster-with-invalid-version.yaml @@ -0,0 +1,18 @@ +--- +# Create a cluster where the version does not match the pgupgrade's `from` +# TODO(benjaminjb): this isn't quite working out +# apiVersion: postgres-operator.crunchydata.com/v1beta1 +# kind: PostgresCluster +# metadata: +# name: major-upgrade +# spec: +# shutdown: true +# postgresVersion: ${KUTTL_PG_UPGRADE_TOO_EARLY_FROM_VERSION} +# instances: +# - dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } +# backups: +# pgbackrest: +# repos: +# - name: repo1 +# volume: +# volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } diff --git a/testing/kuttl/e2e/major-upgrade/20-assert.yaml b/testing/kuttl/e2e/major-upgrade/20-assert.yaml new file mode 100644 index 0000000000..2ea1486284 --- /dev/null +++ b/testing/kuttl/e2e/major-upgrade/20-assert.yaml @@ -0,0 +1,11 @@ +--- +# # pgupgrade should exit since the cluster is already at the requested version +# apiVersion: postgres-operator.crunchydata.com/v1beta1 +# kind: PGUpgrade +# metadata: +# name: major-upgrade-do-it +# status: +# conditions: +# - type: "Progressing" +# status: "False" +# reason: "PGUpgradeInvalidForCluster" diff --git a/testing/kuttl/e2e/major-upgrade/21-delete-cluster.yaml b/testing/kuttl/e2e/major-upgrade/21-delete-cluster.yaml new file mode 100644 index 0000000000..535c6311a4 --- /dev/null +++ b/testing/kuttl/e2e/major-upgrade/21-delete-cluster.yaml @@ -0,0 +1,8 @@ +--- +# # Delete the existing cluster. +# apiVersion: kuttl.dev/v1beta1 +# kind: TestStep +# delete: +# - apiVersion: postgres-operator.crunchydata.com/v1beta1 +# kind: PostgresCluster +# name: major-upgrade diff --git a/testing/kuttl/e2e/major-upgrade/30--cluster.yaml b/testing/kuttl/e2e/major-upgrade/30--cluster.yaml new file mode 100644 index 0000000000..01e1ef6175 --- /dev/null +++ b/testing/kuttl/e2e/major-upgrade/30--cluster.yaml @@ -0,0 +1,22 @@ +--- +# Create the cluster we will do an actual upgrade on +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: major-upgrade +spec: + postgresVersion: ${KUTTL_PG_UPGRADE_FROM_VERSION} + patroni: + dynamicConfiguration: + postgresql: + parameters: + shared_preload_libraries: pgaudit, set_user, pg_stat_statements, pgnodemx, pg_cron + instances: + - dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + replicas: 3 + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } diff --git a/testing/kuttl/e2e/major-upgrade/30-assert.yaml b/testing/kuttl/e2e/major-upgrade/30-assert.yaml new file mode 100644 index 0000000000..1db8ec257d --- /dev/null +++ b/testing/kuttl/e2e/major-upgrade/30-assert.yaml @@ -0,0 +1,31 @@ +--- +# Wait for the instances to be ready and the replica backup to complete +# by waiting for the status to signal pods ready and pgbackrest stanza created +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: major-upgrade +spec: + postgresVersion: ${KUTTL_PG_UPGRADE_FROM_VERSION} +status: + instances: + - name: '00' + replicas: 3 + readyReplicas: 3 + updatedReplicas: 3 + pgbackrest: + repos: + - name: repo1 + replicaCreateBackupComplete: true + stanzaCreated: true +--- +# Even when the cluster exists, the pgupgrade is not progressing because the cluster is not shutdown +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGUpgrade +metadata: + name: major-upgrade-do-it +status: + conditions: + - type: "Progressing" + status: "False" + reason: "PGClusterNotShutdown" diff --git a/testing/kuttl/e2e/major-upgrade/31--create-data.yaml b/testing/kuttl/e2e/major-upgrade/31--create-data.yaml new file mode 100644 index 0000000000..ed8c27b06b --- /dev/null +++ b/testing/kuttl/e2e/major-upgrade/31--create-data.yaml @@ -0,0 +1,94 @@ +--- +# Check the version reported by PostgreSQL and create some data. +apiVersion: batch/v1 +kind: Job +metadata: + name: major-upgrade-before + labels: { postgres-operator-test: kuttl } +spec: + backoffLimit: 3 + template: + metadata: + labels: { postgres-operator-test: kuttl } + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + env: + - name: PGURI + valueFrom: { secretKeyRef: { name: major-upgrade-pguser-major-upgrade, key: uri } } + + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } + + # Note: the `$$$$` is reduced to `$$` by Kubernetes. + # - https://kubernetes.io/docs/tasks/inject-data-application/ + command: + - psql + - $(PGURI) + - --quiet + - --echo-errors + - --set=ON_ERROR_STOP=1 + - --command + - | + DO $$$$ + BEGIN + ASSERT current_setting('server_version_num') LIKE '${KUTTL_PG_UPGRADE_FROM_VERSION}%', + format('got %L', current_setting('server_version_num')); + END $$$$; + - --command + - | + CREATE SCHEMA very; + CREATE TABLE very.important (data) AS VALUES ('treasure'); +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: major-upgrade-before-replica + labels: { postgres-operator-test: kuttl } +spec: + backoffLimit: 3 + template: + metadata: + labels: { postgres-operator-test: kuttl } + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + env: + # The Replica svc is not held in the user secret, so we hard-code the Service address + # (using the downstream API for the namespace) + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: PGHOST + value: "major-upgrade-replicas.$(NAMESPACE).svc" + - name: PGPORT + valueFrom: { secretKeyRef: { name: major-upgrade-pguser-major-upgrade, key: port } } + - name: PGDATABASE + valueFrom: { secretKeyRef: { name: major-upgrade-pguser-major-upgrade, key: dbname } } + - name: PGUSER + valueFrom: { secretKeyRef: { name: major-upgrade-pguser-major-upgrade, key: user } } + - name: PGPASSWORD + valueFrom: { secretKeyRef: { name: major-upgrade-pguser-major-upgrade, key: password } } + + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } + + # Note: the `$$$$` is reduced to `$$` by Kubernetes. + # - https://kubernetes.io/docs/tasks/inject-data-application/ + command: + - psql + - --quiet + - --echo-errors + - --set=ON_ERROR_STOP=1 + - --command + - | + DO $$$$ + BEGIN + ASSERT current_setting('server_version_num') LIKE '${KUTTL_PG_UPGRADE_FROM_VERSION}%', + format('got %L', current_setting('server_version_num')); + END $$$$; diff --git a/testing/kuttl/e2e/major-upgrade/31-assert.yaml b/testing/kuttl/e2e/major-upgrade/31-assert.yaml new file mode 100644 index 0000000000..dab4dc9de0 --- /dev/null +++ b/testing/kuttl/e2e/major-upgrade/31-assert.yaml @@ -0,0 +1,14 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: major-upgrade-before +status: + succeeded: 1 +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: major-upgrade-before-replica +status: + succeeded: 1 diff --git a/testing/kuttl/e2e/major-upgrade/32--shutdown-cluster.yaml b/testing/kuttl/e2e/major-upgrade/32--shutdown-cluster.yaml new file mode 100644 index 0000000000..9e4a575a3a --- /dev/null +++ b/testing/kuttl/e2e/major-upgrade/32--shutdown-cluster.yaml @@ -0,0 +1,8 @@ +--- +# Shutdown the cluster -- but without the annotation. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: major-upgrade +spec: + shutdown: true diff --git a/testing/kuttl/e2e/major-upgrade/32-assert.yaml b/testing/kuttl/e2e/major-upgrade/32-assert.yaml new file mode 100644 index 0000000000..2ad7f2869a --- /dev/null +++ b/testing/kuttl/e2e/major-upgrade/32-assert.yaml @@ -0,0 +1,11 @@ +--- +# Since the cluster is missing the annotation, we get this condition +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGUpgrade +metadata: + name: major-upgrade-do-it +status: + conditions: + - type: "Progressing" + status: "False" + reason: "PGClusterMissingRequiredAnnotation" diff --git a/testing/kuttl/e2e/major-upgrade/33--annotate-cluster.yaml b/testing/kuttl/e2e/major-upgrade/33--annotate-cluster.yaml new file mode 100644 index 0000000000..35cd269035 --- /dev/null +++ b/testing/kuttl/e2e/major-upgrade/33--annotate-cluster.yaml @@ -0,0 +1,8 @@ +--- +# Annotate the cluster for an upgrade. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: major-upgrade + annotations: + postgres-operator.crunchydata.com/allow-upgrade: major-upgrade-do-it diff --git a/testing/kuttl/e2e/major-upgrade/33-assert.yaml b/testing/kuttl/e2e/major-upgrade/33-assert.yaml new file mode 100644 index 0000000000..aadb5e3bb1 --- /dev/null +++ b/testing/kuttl/e2e/major-upgrade/33-assert.yaml @@ -0,0 +1,22 @@ +--- +# Now that the postgres cluster is shut down and annotated, the pgupgrade +# can finish reconciling. We know the reconciling is complete when +# the pgupgrade status is succeeded and the postgres cluster status +# has the updated version. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGUpgrade +metadata: + name: major-upgrade-do-it +status: + conditions: + - type: "Progressing" + status: "False" + - type: "Succeeded" + status: "True" +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: major-upgrade +status: + postgresVersion: ${KUTTL_PG_UPGRADE_TO_VERSION} diff --git a/testing/kuttl/e2e/major-upgrade/34--restart-cluster.yaml b/testing/kuttl/e2e/major-upgrade/34--restart-cluster.yaml new file mode 100644 index 0000000000..ee674151ca --- /dev/null +++ b/testing/kuttl/e2e/major-upgrade/34--restart-cluster.yaml @@ -0,0 +1,10 @@ +--- +# Once the pgupgrade is finished, update the version and set shutdown to false +# in the postgres cluster +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: major-upgrade +spec: + postgresVersion: ${KUTTL_PG_UPGRADE_TO_VERSION} + shutdown: false diff --git a/testing/kuttl/e2e/major-upgrade/34-assert.yaml b/testing/kuttl/e2e/major-upgrade/34-assert.yaml new file mode 100644 index 0000000000..aba583f74c --- /dev/null +++ b/testing/kuttl/e2e/major-upgrade/34-assert.yaml @@ -0,0 +1,18 @@ +--- +# Wait for the instances to be ready with the target Postgres version. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: major-upgrade +status: + postgresVersion: ${KUTTL_PG_UPGRADE_TO_VERSION} + instances: + - name: '00' + replicas: 3 + readyReplicas: 3 + updatedReplicas: 3 + pgbackrest: + repos: + - name: repo1 + replicaCreateBackupComplete: true + stanzaCreated: true diff --git a/testing/kuttl/e2e/major-upgrade/35-check-pgbackrest-and-replica.yaml b/testing/kuttl/e2e/major-upgrade/35-check-pgbackrest-and-replica.yaml new file mode 100644 index 0000000000..be1c3ff357 --- /dev/null +++ b/testing/kuttl/e2e/major-upgrade/35-check-pgbackrest-and-replica.yaml @@ -0,0 +1,11 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: +# Check that the pgbackrest setup has successfully completed +- script: | + kubectl -n "${NAMESPACE}" exec "statefulset.apps/major-upgrade-repo-host" -c pgbackrest -- pgbackrest check --stanza=db +# Check that the replica data dir has been successfully cleaned +- script: | + # Check that the old pg folders do not exist on the replica + REPLICA=$(kubectl get pod -l=postgres-operator.crunchydata.com/role=replica -n "${NAMESPACE}" -o=jsonpath='{ .items[0].metadata.name }') + kubectl -n "${NAMESPACE}" exec "${REPLICA}" -c database -- [ ! -d "pgdata/pg${KUTTL_PG_UPGRADE_FROM_VERSION}" ] diff --git a/testing/kuttl/e2e/major-upgrade/36--check-data-and-version.yaml b/testing/kuttl/e2e/major-upgrade/36--check-data-and-version.yaml new file mode 100644 index 0000000000..135f34c7df --- /dev/null +++ b/testing/kuttl/e2e/major-upgrade/36--check-data-and-version.yaml @@ -0,0 +1,108 @@ +--- +# Check the version reported by PostgreSQL and confirm that data was upgraded. +apiVersion: batch/v1 +kind: Job +metadata: + name: major-upgrade-after + labels: { postgres-operator-test: kuttl } +spec: + backoffLimit: 6 + template: + metadata: + labels: { postgres-operator-test: kuttl } + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + env: + - name: PGURI + valueFrom: { secretKeyRef: { name: major-upgrade-pguser-major-upgrade, key: uri } } + + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } + + # Note: the `$$$$` is reduced to `$$` by Kubernetes. + # - https://kubernetes.io/docs/tasks/inject-data-application/ + command: + - psql + - $(PGURI) + - --quiet + - --echo-errors + - --set=ON_ERROR_STOP=1 + - --command + - | + DO $$$$ + BEGIN + ASSERT current_setting('server_version_num') LIKE '${KUTTL_PG_UPGRADE_TO_VERSION}%', + format('got %L', current_setting('server_version_num')); + END $$$$; + - --command + - | + DO $$$$ + DECLARE + everything jsonb; + BEGIN + SELECT jsonb_agg(important) INTO everything FROM very.important; + ASSERT everything = '[{"data":"treasure"}]', format('got %L', everything); + END $$$$; +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: major-upgrade-after-replica + labels: { postgres-operator-test: kuttl } +spec: + backoffLimit: 3 + template: + metadata: + labels: { postgres-operator-test: kuttl } + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + env: + # The Replica svc is not held in the user secret, so we hard-code the Service address + # (using the downstream API for the namespace) + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: PGHOST + value: "major-upgrade-replicas.$(NAMESPACE).svc" + - name: PGPORT + valueFrom: { secretKeyRef: { name: major-upgrade-pguser-major-upgrade, key: port } } + - name: PGDATABASE + valueFrom: { secretKeyRef: { name: major-upgrade-pguser-major-upgrade, key: dbname } } + - name: PGUSER + valueFrom: { secretKeyRef: { name: major-upgrade-pguser-major-upgrade, key: user } } + - name: PGPASSWORD + valueFrom: { secretKeyRef: { name: major-upgrade-pguser-major-upgrade, key: password } } + + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } + + # Note: the `$$$$` is reduced to `$$` by Kubernetes. + # - https://kubernetes.io/docs/tasks/inject-data-application/ + command: + - psql + - --quiet + - --echo-errors + - --set=ON_ERROR_STOP=1 + - --command + - | + DO $$$$ + BEGIN + ASSERT current_setting('server_version_num') LIKE '${KUTTL_PG_UPGRADE_TO_VERSION}%', + format('got %L', current_setting('server_version_num')); + END $$$$; + - --command + - | + DO $$$$ + DECLARE + everything jsonb; + BEGIN + SELECT jsonb_agg(important) INTO everything FROM very.important; + ASSERT everything = '[{"data":"treasure"}]', format('got %L', everything); + END $$$$; diff --git a/testing/kuttl/e2e/major-upgrade/36-assert.yaml b/testing/kuttl/e2e/major-upgrade/36-assert.yaml new file mode 100644 index 0000000000..a545bfd756 --- /dev/null +++ b/testing/kuttl/e2e/major-upgrade/36-assert.yaml @@ -0,0 +1,14 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: major-upgrade-after +status: + succeeded: 1 +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: major-upgrade-after-replica +status: + succeeded: 1 diff --git a/testing/kuttl/e2e/optional-backups/00--cluster.yaml b/testing/kuttl/e2e/optional-backups/00--cluster.yaml new file mode 100644 index 0000000000..7b927831e0 --- /dev/null +++ b/testing/kuttl/e2e/optional-backups/00--cluster.yaml @@ -0,0 +1,15 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: created-without-backups +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + diff --git a/testing/kuttl/e2e/optional-backups/00-assert.yaml b/testing/kuttl/e2e/optional-backups/00-assert.yaml new file mode 100644 index 0000000000..86392d0308 --- /dev/null +++ b/testing/kuttl/e2e/optional-backups/00-assert.yaml @@ -0,0 +1,38 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: created-without-backups +status: + instances: + - name: instance1 + pgbackrest: {} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + postgres-operator.crunchydata.com/cluster: created-without-backups + postgres-operator.crunchydata.com/data: postgres + postgres-operator.crunchydata.com/instance-set: instance1 + postgres-operator.crunchydata.com/role: pgdata +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + postgres-operator.crunchydata.com/cluster: created-without-backups + postgres-operator.crunchydata.com/data: postgres + postgres-operator.crunchydata.com/instance-set: instance1 +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: created-without-backups + postgres-operator.crunchydata.com/data: postgres + postgres-operator.crunchydata.com/instance-set: instance1 + postgres-operator.crunchydata.com/role: master +status: + containerStatuses: + - ready: true + - ready: true diff --git a/testing/kuttl/e2e/optional-backups/01-errors.yaml b/testing/kuttl/e2e/optional-backups/01-errors.yaml new file mode 100644 index 0000000000..e702fcddb4 --- /dev/null +++ b/testing/kuttl/e2e/optional-backups/01-errors.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: created-without-backups-repo1 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: created-without-backups-repo-host +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: created-without-backups-pgbackrest-config +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: created-without-backups-pgbackrest +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: created-without-backups-pgbackrest +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: created-without-backups-pgbackrest diff --git a/testing/kuttl/e2e/optional-backups/02-assert.yaml b/testing/kuttl/e2e/optional-backups/02-assert.yaml new file mode 100644 index 0000000000..eb3f70357f --- /dev/null +++ b/testing/kuttl/e2e/optional-backups/02-assert.yaml @@ -0,0 +1,15 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +commands: +- script: | + pod=$(kubectl get pods -o name -n "${NAMESPACE}" \ + -l postgres-operator.crunchydata.com/cluster=created-without-backups) + + kubectl exec --stdin "${pod}" --namespace "${NAMESPACE}" -c database \ + -- psql -qb --set ON_ERROR_STOP=1 --file=- <<'SQL' + DO $$ + BEGIN + ASSERT current_setting('archive_command') LIKE 'true', + format('expected "true", got %L', current_setting('archive_command')); + END $$ + SQL diff --git a/testing/kuttl/e2e/optional-backups/03-assert.yaml b/testing/kuttl/e2e/optional-backups/03-assert.yaml new file mode 100644 index 0000000000..17ca1e4062 --- /dev/null +++ b/testing/kuttl/e2e/optional-backups/03-assert.yaml @@ -0,0 +1,14 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +commands: +- script: | + pod=$(kubectl get pods -o name -n "${NAMESPACE}" \ + -l postgres-operator.crunchydata.com/cluster=created-without-backups) + + kubectl exec --stdin "${pod}" --namespace "${NAMESPACE}" -c database \ + -- psql -qb --set ON_ERROR_STOP=1 \ + -c "CREATE TABLE important (data) AS VALUES ('treasure');" + + kubectl exec --stdin "${pod}" --namespace "${NAMESPACE}" -c database \ + -- psql -qb --set ON_ERROR_STOP=1 \ + -c "CHECKPOINT;" diff --git a/testing/kuttl/e2e/optional-backups/04--cluster.yaml b/testing/kuttl/e2e/optional-backups/04--cluster.yaml new file mode 100644 index 0000000000..fc39ff6ebe --- /dev/null +++ b/testing/kuttl/e2e/optional-backups/04--cluster.yaml @@ -0,0 +1,16 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: created-without-backups +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + replicas: 2 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + diff --git a/testing/kuttl/e2e/optional-backups/05-assert.yaml b/testing/kuttl/e2e/optional-backups/05-assert.yaml new file mode 100644 index 0000000000..d346e01a04 --- /dev/null +++ b/testing/kuttl/e2e/optional-backups/05-assert.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: created-without-backups + postgres-operator.crunchydata.com/data: postgres + postgres-operator.crunchydata.com/instance-set: instance1 + postgres-operator.crunchydata.com/role: replica +status: + containerStatuses: + - ready: true + - ready: true diff --git a/testing/kuttl/e2e/optional-backups/06-assert.yaml b/testing/kuttl/e2e/optional-backups/06-assert.yaml new file mode 100644 index 0000000000..c366545508 --- /dev/null +++ b/testing/kuttl/e2e/optional-backups/06-assert.yaml @@ -0,0 +1,18 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +commands: +- script: | + pod=$(kubectl get pods -o name -n "${NAMESPACE}" \ + -l postgres-operator.crunchydata.com/cluster=created-without-backups \ + -l postgres-operator.crunchydata.com/role=replica) + + kubectl exec --stdin "${pod}" --namespace "${NAMESPACE}" -c database \ + -- psql -qb --set ON_ERROR_STOP=1 --file=- <<'SQL' + DO $$ + DECLARE + everything jsonb; + BEGIN + SELECT jsonb_agg(important) INTO everything FROM important; + ASSERT everything = '[{"data":"treasure"}]', format('got %L', everything); + END $$ + SQL diff --git a/testing/kuttl/e2e/optional-backups/10--cluster.yaml b/testing/kuttl/e2e/optional-backups/10--cluster.yaml new file mode 100644 index 0000000000..6da85c93f9 --- /dev/null +++ b/testing/kuttl/e2e/optional-backups/10--cluster.yaml @@ -0,0 +1,27 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: created-without-backups +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + replicas: 1 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + diff --git a/testing/kuttl/e2e/optional-backups/10-assert.yaml b/testing/kuttl/e2e/optional-backups/10-assert.yaml new file mode 100644 index 0000000000..7b740b310d --- /dev/null +++ b/testing/kuttl/e2e/optional-backups/10-assert.yaml @@ -0,0 +1,79 @@ +# It should be possible to turn backups back on. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: created-without-backups +status: + pgbackrest: + repoHost: + apiVersion: apps/v1 + kind: StatefulSet + ready: true + repos: + - bound: true + name: repo1 + replicaCreateBackupComplete: true + stanzaCreated: true +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + postgres-operator.crunchydata.com/cluster: created-without-backups + postgres-operator.crunchydata.com/data: postgres + postgres-operator.crunchydata.com/instance-set: instance1 + postgres-operator.crunchydata.com/role: pgdata +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + postgres-operator.crunchydata.com/cluster: created-without-backups + postgres-operator.crunchydata.com/data: postgres + postgres-operator.crunchydata.com/instance-set: instance1 +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: created-without-backups-repo1 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: created-without-backups-repo-host +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: created-without-backups-pgbackrest-config +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: created-without-backups-pgbackrest +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: created-without-backups-pgbackrest +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: created-without-backups-pgbackrest +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: created-without-backups + postgres-operator.crunchydata.com/data: postgres + postgres-operator.crunchydata.com/instance-set: instance1 + postgres-operator.crunchydata.com/patroni: created-without-backups-ha + postgres-operator.crunchydata.com/role: master +status: + containerStatuses: + - ready: true + - ready: true + - ready: true + - ready: true diff --git a/testing/kuttl/e2e/optional-backups/11-assert.yaml b/testing/kuttl/e2e/optional-backups/11-assert.yaml new file mode 100644 index 0000000000..5976d03f41 --- /dev/null +++ b/testing/kuttl/e2e/optional-backups/11-assert.yaml @@ -0,0 +1,18 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +commands: +- script: | + pod=$(kubectl get pods -o name -n "${NAMESPACE}" \ + -l postgres-operator.crunchydata.com/cluster=created-without-backup \ + -l postgres-operator.crunchydata.com/instance-set=instance1 \ + -l postgres-operator.crunchydata.com/patroni=created-without-backups-ha \ + -l postgres-operator.crunchydata.com/role=master) + + kubectl exec --stdin "${pod}" --namespace "${NAMESPACE}" -c database \ + -- psql -qb --set ON_ERROR_STOP=1 --file=- <<'SQL' + DO $$ + BEGIN + ASSERT current_setting('archive_command') LIKE 'pgbackrest --stanza=db archive-push "%p"', + format('expected "pgbackrest --stanza=db archive-push \"%p\"", got %L', current_setting('archive_command')); + END $$ + SQL diff --git a/testing/kuttl/e2e/optional-backups/20--cluster.yaml b/testing/kuttl/e2e/optional-backups/20--cluster.yaml new file mode 100644 index 0000000000..8e0d01cbf8 --- /dev/null +++ b/testing/kuttl/e2e/optional-backups/20--cluster.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: +- command: |- + kubectl patch postgrescluster created-without-backups --type 'merge' -p '{"spec":{"backups": null}}' + namespaced: true diff --git a/testing/kuttl/e2e/optional-backups/20-assert.yaml b/testing/kuttl/e2e/optional-backups/20-assert.yaml new file mode 100644 index 0000000000..b469e277f8 --- /dev/null +++ b/testing/kuttl/e2e/optional-backups/20-assert.yaml @@ -0,0 +1,63 @@ +# It should be possible to turn backups back on. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: created-without-backups +status: + pgbackrest: + repoHost: + apiVersion: apps/v1 + kind: StatefulSet + ready: true + repos: + - bound: true + name: repo1 + replicaCreateBackupComplete: true + stanzaCreated: true +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + postgres-operator.crunchydata.com/cluster: created-without-backups + postgres-operator.crunchydata.com/data: postgres + postgres-operator.crunchydata.com/instance-set: instance1 + postgres-operator.crunchydata.com/role: pgdata +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + postgres-operator.crunchydata.com/cluster: created-without-backups + postgres-operator.crunchydata.com/data: postgres + postgres-operator.crunchydata.com/instance-set: instance1 +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: created-without-backups-repo1 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: created-without-backups-repo-host +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: created-without-backups-pgbackrest-config +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: created-without-backups-pgbackrest +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: created-without-backups-pgbackrest +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: created-without-backups-pgbackrest diff --git a/testing/kuttl/e2e/optional-backups/21-assert.yaml b/testing/kuttl/e2e/optional-backups/21-assert.yaml new file mode 100644 index 0000000000..5976d03f41 --- /dev/null +++ b/testing/kuttl/e2e/optional-backups/21-assert.yaml @@ -0,0 +1,18 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +commands: +- script: | + pod=$(kubectl get pods -o name -n "${NAMESPACE}" \ + -l postgres-operator.crunchydata.com/cluster=created-without-backup \ + -l postgres-operator.crunchydata.com/instance-set=instance1 \ + -l postgres-operator.crunchydata.com/patroni=created-without-backups-ha \ + -l postgres-operator.crunchydata.com/role=master) + + kubectl exec --stdin "${pod}" --namespace "${NAMESPACE}" -c database \ + -- psql -qb --set ON_ERROR_STOP=1 --file=- <<'SQL' + DO $$ + BEGIN + ASSERT current_setting('archive_command') LIKE 'pgbackrest --stanza=db archive-push "%p"', + format('expected "pgbackrest --stanza=db archive-push \"%p\"", got %L', current_setting('archive_command')); + END $$ + SQL diff --git a/testing/kuttl/e2e/optional-backups/22--cluster.yaml b/testing/kuttl/e2e/optional-backups/22--cluster.yaml new file mode 100644 index 0000000000..2e25309886 --- /dev/null +++ b/testing/kuttl/e2e/optional-backups/22--cluster.yaml @@ -0,0 +1,5 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: +- command: kubectl annotate postgrescluster created-without-backups postgres-operator.crunchydata.com/authorizeBackupRemoval="true" + namespaced: true diff --git a/testing/kuttl/e2e/optional-backups/23-assert.yaml b/testing/kuttl/e2e/optional-backups/23-assert.yaml new file mode 100644 index 0000000000..8748ea015c --- /dev/null +++ b/testing/kuttl/e2e/optional-backups/23-assert.yaml @@ -0,0 +1,26 @@ +# It should be possible to turn backups back on. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: created-without-backups +status: + instances: + - name: instance1 + pgbackrest: {} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + postgres-operator.crunchydata.com/cluster: created-without-backups + postgres-operator.crunchydata.com/data: postgres + postgres-operator.crunchydata.com/instance-set: instance1 + postgres-operator.crunchydata.com/role: pgdata +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + postgres-operator.crunchydata.com/cluster: created-without-backups + postgres-operator.crunchydata.com/data: postgres + postgres-operator.crunchydata.com/instance-set: instance1 diff --git a/testing/kuttl/e2e/optional-backups/24-errors.yaml b/testing/kuttl/e2e/optional-backups/24-errors.yaml new file mode 100644 index 0000000000..e702fcddb4 --- /dev/null +++ b/testing/kuttl/e2e/optional-backups/24-errors.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: created-without-backups-repo1 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: created-without-backups-repo-host +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: created-without-backups-pgbackrest-config +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: created-without-backups-pgbackrest +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: created-without-backups-pgbackrest +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: created-without-backups-pgbackrest diff --git a/testing/kuttl/e2e/optional-backups/25-assert.yaml b/testing/kuttl/e2e/optional-backups/25-assert.yaml new file mode 100644 index 0000000000..eb3f70357f --- /dev/null +++ b/testing/kuttl/e2e/optional-backups/25-assert.yaml @@ -0,0 +1,15 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +commands: +- script: | + pod=$(kubectl get pods -o name -n "${NAMESPACE}" \ + -l postgres-operator.crunchydata.com/cluster=created-without-backups) + + kubectl exec --stdin "${pod}" --namespace "${NAMESPACE}" -c database \ + -- psql -qb --set ON_ERROR_STOP=1 --file=- <<'SQL' + DO $$ + BEGIN + ASSERT current_setting('archive_command') LIKE 'true', + format('expected "true", got %L', current_setting('archive_command')); + END $$ + SQL diff --git a/testing/kuttl/e2e/optional-backups/README.md b/testing/kuttl/e2e/optional-backups/README.md new file mode 100644 index 0000000000..92c52d4136 --- /dev/null +++ b/testing/kuttl/e2e/optional-backups/README.md @@ -0,0 +1,13 @@ +## Optional backups + +### Steps + +00-02. Create cluster without backups, check that expected K8s objects do/don't exist, e.g., repo-host sts doesn't exist; check that the archive command is `true` + +03-06. Add data and a replica; check that the data successfully replicates to the replica. + +10-11. Update cluster to add backups, check that expected K8s objects do/don't exist, e.g., repo-host sts exists; check that the archive command is set to the usual + +20-21. Update cluster to remove backups but without annotation, check that no changes were made, including to the archive command + +22-25. Annotate cluster to remove existing backups, check that expected K8s objects do/don't exist, e.g., repo-host sts doesn't exist; check that the archive command is `true` diff --git a/testing/kuttl/e2e/password-change/00--cluster.yaml b/testing/kuttl/e2e/password-change/00--cluster.yaml new file mode 100644 index 0000000000..d7b7019b62 --- /dev/null +++ b/testing/kuttl/e2e/password-change/00--cluster.yaml @@ -0,0 +1,14 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: password-change +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi diff --git a/testing/kuttl/e2e/password-change/00-assert.yaml b/testing/kuttl/e2e/password-change/00-assert.yaml new file mode 100644 index 0000000000..bfedc0b25e --- /dev/null +++ b/testing/kuttl/e2e/password-change/00-assert.yaml @@ -0,0 +1,15 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: password-change +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 +--- +apiVersion: v1 +kind: Service +metadata: + name: password-change-primary diff --git a/testing/kuttl/e2e/password-change/01--psql-connect-uri.yaml b/testing/kuttl/e2e/password-change/01--psql-connect-uri.yaml new file mode 100644 index 0000000000..2c9b769f89 --- /dev/null +++ b/testing/kuttl/e2e/password-change/01--psql-connect-uri.yaml @@ -0,0 +1,23 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-connect-uri +spec: + backoffLimit: 6 + template: + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + command: + - psql + - "$(PGURI)" + - -c + - "select version();" + env: + - name: PGURI + valueFrom: { secretKeyRef: { name: password-change-pguser-password-change, key: uri } } + + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } diff --git a/testing/kuttl/e2e/password-change/01--psql-connect.yaml b/testing/kuttl/e2e/password-change/01--psql-connect.yaml new file mode 100644 index 0000000000..28ffa3a0e5 --- /dev/null +++ b/testing/kuttl/e2e/password-change/01--psql-connect.yaml @@ -0,0 +1,30 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-connect +spec: + backoffLimit: 6 + template: + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + command: + - psql + - -c + - "select version();" + env: + - name: PGHOST + valueFrom: { secretKeyRef: { name: password-change-pguser-password-change, key: host } } + - name: PGPORT + valueFrom: { secretKeyRef: { name: password-change-pguser-password-change, key: port } } + - name: PGDATABASE + valueFrom: { secretKeyRef: { name: password-change-pguser-password-change, key: dbname } } + - name: PGUSER + valueFrom: { secretKeyRef: { name: password-change-pguser-password-change, key: user } } + - name: PGPASSWORD + valueFrom: { secretKeyRef: { name: password-change-pguser-password-change, key: password } } + + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } diff --git a/testing/kuttl/e2e/password-change/01-assert.yaml b/testing/kuttl/e2e/password-change/01-assert.yaml new file mode 100644 index 0000000000..f9e5dca807 --- /dev/null +++ b/testing/kuttl/e2e/password-change/01-assert.yaml @@ -0,0 +1,13 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-connect +status: + succeeded: 1 +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-connect-uri +status: + succeeded: 1 diff --git a/testing/kuttl/e2e/password-change/02--secret.yaml b/testing/kuttl/e2e/password-change/02--secret.yaml new file mode 100644 index 0000000000..03e4816e91 --- /dev/null +++ b/testing/kuttl/e2e/password-change/02--secret.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Secret +metadata: + name: password-change-pguser-password-change +data: + # Hardcoding the password as "datalake" + password: ZGF0YWxha2U= + verifier: "" diff --git a/testing/kuttl/e2e/password-change/02-errors.yaml b/testing/kuttl/e2e/password-change/02-errors.yaml new file mode 100644 index 0000000000..300ace7737 --- /dev/null +++ b/testing/kuttl/e2e/password-change/02-errors.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Secret +metadata: + name: password-change-pguser-password-change +data: + # `02-secret.yaml` changes the password and removes the verifier field, + # so when PGO reconciles the secret, it should fill in the empty verifier field; + # if it does not fill in the verifier field by a certain time this step will error + # and KUTTL will mark the test as failed. + verifier: "" diff --git a/testing/kuttl/e2e/password-change/03--psql-connect-uri.yaml b/testing/kuttl/e2e/password-change/03--psql-connect-uri.yaml new file mode 100644 index 0000000000..175482704a --- /dev/null +++ b/testing/kuttl/e2e/password-change/03--psql-connect-uri.yaml @@ -0,0 +1,26 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-connect-uri2 +spec: + backoffLimit: 6 + template: + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + command: + - psql + - "$(PGURI)" + - -c + - "select version();" + env: + # The ./02-errors.yaml checks that the secret is not in the state that we set it to + # in the ./02-secret.yaml file, i.e., the secret has been reconciled by PGO, + # so the uri field of the secret should be updated with the new password by this time + - name: PGURI + valueFrom: { secretKeyRef: { name: password-change-pguser-password-change, key: uri } } + + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } diff --git a/testing/kuttl/e2e/password-change/03--psql-connect.yaml b/testing/kuttl/e2e/password-change/03--psql-connect.yaml new file mode 100644 index 0000000000..fc03215183 --- /dev/null +++ b/testing/kuttl/e2e/password-change/03--psql-connect.yaml @@ -0,0 +1,34 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-connect2 +spec: + backoffLimit: 6 + template: + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + command: + - psql + - -c + - "select version();" + env: + - name: PGHOST + valueFrom: { secretKeyRef: { name: password-change-pguser-password-change, key: host } } + - name: PGPORT + valueFrom: { secretKeyRef: { name: password-change-pguser-password-change, key: port } } + - name: PGDATABASE + valueFrom: { secretKeyRef: { name: password-change-pguser-password-change, key: dbname } } + - name: PGUSER + valueFrom: { secretKeyRef: { name: password-change-pguser-password-change, key: user } } + # Hardcoding the password here to be equal to what we changed the password to in + # ./02-secret.yaml + # The ./02-errors.yaml checks that the secret is not in the state that we set it to + # in the ./02-secret.yaml file, i.e., the secret has been reconciled by PGO + - name: PGPASSWORD + value: datalake + + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } diff --git a/testing/kuttl/e2e/password-change/03-assert.yaml b/testing/kuttl/e2e/password-change/03-assert.yaml new file mode 100644 index 0000000000..9db69d0367 --- /dev/null +++ b/testing/kuttl/e2e/password-change/03-assert.yaml @@ -0,0 +1,13 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-connect2 +status: + succeeded: 1 +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-connect-uri2 +status: + succeeded: 1 diff --git a/testing/kuttl/e2e/password-change/04--secret.yaml b/testing/kuttl/e2e/password-change/04--secret.yaml new file mode 100644 index 0000000000..f5cd1537c9 --- /dev/null +++ b/testing/kuttl/e2e/password-change/04--secret.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: Secret +metadata: + name: password-change-pguser-password-change +# Updating the password with the stringData field and an md5-based verifier +stringData: + password: infopond + verifier: "md585eb8fa4f697b2ea949d3aba788e8631" + uri: "" diff --git a/testing/kuttl/e2e/password-change/04-errors.yaml b/testing/kuttl/e2e/password-change/04-errors.yaml new file mode 100644 index 0000000000..f23cdded80 --- /dev/null +++ b/testing/kuttl/e2e/password-change/04-errors.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Secret +metadata: + name: password-change-pguser-password-change +data: + # `04-secret.yaml` changes the password and removes the verifier field, + # so when PGO reconciles the secret, it should fill in the empty verifier field; + # if it does not fill in the verifier field by a certain time this step will error + # and KUTTL will mark the test as failed. + uri: "" diff --git a/testing/kuttl/e2e/password-change/05--psql-connect-uri.yaml b/testing/kuttl/e2e/password-change/05--psql-connect-uri.yaml new file mode 100644 index 0000000000..8e96ccfde5 --- /dev/null +++ b/testing/kuttl/e2e/password-change/05--psql-connect-uri.yaml @@ -0,0 +1,26 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-connect-uri3 +spec: + backoffLimit: 6 + template: + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + command: + - psql + - "$(PGURI)" + - -c + - "select version();" + env: + # The ./04-errors.yaml checks that the secret is not in the state that we set it to + # in the ./04-secret.yaml file, i.e., the secret has been reconciled by PGO, + # so the uri field of the secret should be updated with the new password by this time + - name: PGURI + valueFrom: { secretKeyRef: { name: password-change-pguser-password-change, key: uri } } + + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } diff --git a/testing/kuttl/e2e/password-change/05--psql-connect.yaml b/testing/kuttl/e2e/password-change/05--psql-connect.yaml new file mode 100644 index 0000000000..7209235f31 --- /dev/null +++ b/testing/kuttl/e2e/password-change/05--psql-connect.yaml @@ -0,0 +1,34 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-connect3 +spec: + backoffLimit: 6 + template: + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + command: + - psql + - -c + - "select version();" + env: + - name: PGHOST + valueFrom: { secretKeyRef: { name: password-change-pguser-password-change, key: host } } + - name: PGPORT + valueFrom: { secretKeyRef: { name: password-change-pguser-password-change, key: port } } + - name: PGDATABASE + valueFrom: { secretKeyRef: { name: password-change-pguser-password-change, key: dbname } } + - name: PGUSER + valueFrom: { secretKeyRef: { name: password-change-pguser-password-change, key: user } } + # Hardcoding the password here to be equal to what we changed the password to in + # ./04-secret.yaml + # The ./04-errors.yaml checks that the secret is not in the state that we set it to + # in the ./04-secret.yaml file, i.e., the secret has been reconciled by PGO + - name: PGPASSWORD + value: infopond + + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } diff --git a/testing/kuttl/e2e/password-change/05-assert.yaml b/testing/kuttl/e2e/password-change/05-assert.yaml new file mode 100644 index 0000000000..07c2349b06 --- /dev/null +++ b/testing/kuttl/e2e/password-change/05-assert.yaml @@ -0,0 +1,13 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-connect3 +status: + succeeded: 1 +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-connect-uri3 +status: + succeeded: 1 diff --git a/testing/kuttl/e2e/password-change/06--cluster.yaml b/testing/kuttl/e2e/password-change/06--cluster.yaml new file mode 100644 index 0000000000..4cb70defdd --- /dev/null +++ b/testing/kuttl/e2e/password-change/06--cluster.yaml @@ -0,0 +1,10 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: password-change +spec: + # Adding a custom user to the spec + users: + - name: rhino + databases: + - rhino diff --git a/testing/kuttl/e2e/password-change/06-assert.yaml b/testing/kuttl/e2e/password-change/06-assert.yaml new file mode 100644 index 0000000000..bfedc0b25e --- /dev/null +++ b/testing/kuttl/e2e/password-change/06-assert.yaml @@ -0,0 +1,15 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: password-change +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 +--- +apiVersion: v1 +kind: Service +metadata: + name: password-change-primary diff --git a/testing/kuttl/e2e/password-change/07--psql-connect-uri.yaml b/testing/kuttl/e2e/password-change/07--psql-connect-uri.yaml new file mode 100644 index 0000000000..2fb8057021 --- /dev/null +++ b/testing/kuttl/e2e/password-change/07--psql-connect-uri.yaml @@ -0,0 +1,23 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-connect-uri4 +spec: + backoffLimit: 6 + template: + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + command: + - psql + - "$(PGURI)" + - -c + - "select version();" + env: + - name: PGURI + valueFrom: { secretKeyRef: { name: password-change-pguser-rhino, key: uri } } + + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } diff --git a/testing/kuttl/e2e/password-change/07--psql-connect.yaml b/testing/kuttl/e2e/password-change/07--psql-connect.yaml new file mode 100644 index 0000000000..277cce24c4 --- /dev/null +++ b/testing/kuttl/e2e/password-change/07--psql-connect.yaml @@ -0,0 +1,30 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-connect4 +spec: + backoffLimit: 6 + template: + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + command: + - psql + - -c + - "select version();" + env: + - name: PGHOST + valueFrom: { secretKeyRef: { name: password-change-pguser-rhino, key: host } } + - name: PGPORT + valueFrom: { secretKeyRef: { name: password-change-pguser-rhino, key: port } } + - name: PGDATABASE + valueFrom: { secretKeyRef: { name: password-change-pguser-rhino, key: dbname } } + - name: PGUSER + valueFrom: { secretKeyRef: { name: password-change-pguser-rhino, key: user } } + - name: PGPASSWORD + valueFrom: { secretKeyRef: { name: password-change-pguser-rhino, key: password } } + + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } diff --git a/testing/kuttl/e2e/password-change/07-assert.yaml b/testing/kuttl/e2e/password-change/07-assert.yaml new file mode 100644 index 0000000000..4f6afd5d98 --- /dev/null +++ b/testing/kuttl/e2e/password-change/07-assert.yaml @@ -0,0 +1,13 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-connect4 +status: + succeeded: 1 +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-connect-uri4 +status: + succeeded: 1 diff --git a/testing/kuttl/e2e/password-change/08--secret.yaml b/testing/kuttl/e2e/password-change/08--secret.yaml new file mode 100644 index 0000000000..b104ce7ae7 --- /dev/null +++ b/testing/kuttl/e2e/password-change/08--secret.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Secret +metadata: + name: password-change-pguser-rhino +data: + # Hardcoding the password as "datalake" + password: ZGF0YWxha2U= + verifier: "" diff --git a/testing/kuttl/e2e/password-change/08-errors.yaml b/testing/kuttl/e2e/password-change/08-errors.yaml new file mode 100644 index 0000000000..a7ab60c9eb --- /dev/null +++ b/testing/kuttl/e2e/password-change/08-errors.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Secret +metadata: + name: password-change-pguser-rhino +data: + # `08-secret.yaml` changes the password and removes the verifier field, + # so when PGO reconciles the secret, it should fill in the empty verifier field; + # if it does not fill in the verifier field by a certain time this step will error + # and KUTTL will mark the test as failed. + verifier: "" diff --git a/testing/kuttl/e2e/password-change/09--psql-connect-uri.yaml b/testing/kuttl/e2e/password-change/09--psql-connect-uri.yaml new file mode 100644 index 0000000000..5d83af7933 --- /dev/null +++ b/testing/kuttl/e2e/password-change/09--psql-connect-uri.yaml @@ -0,0 +1,26 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-connect-uri5 +spec: + backoffLimit: 6 + template: + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + command: + - psql + - "$(PGURI)" + - -c + - "select version();" + env: + # The ./08-errors.yaml checks that the secret is not in the state that we set it to + # in the ./08-secret.yaml file, i.e., the secret has been reconciled by PGO, + # so the uri field of the secret should be updated with the new password by this time + - name: PGURI + valueFrom: { secretKeyRef: { name: password-change-pguser-rhino, key: uri } } + + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } diff --git a/testing/kuttl/e2e/password-change/09--psql-connect.yaml b/testing/kuttl/e2e/password-change/09--psql-connect.yaml new file mode 100644 index 0000000000..912fb33561 --- /dev/null +++ b/testing/kuttl/e2e/password-change/09--psql-connect.yaml @@ -0,0 +1,34 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-connect5 +spec: + backoffLimit: 6 + template: + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + command: + - psql + - -c + - "select version();" + env: + - name: PGHOST + valueFrom: { secretKeyRef: { name: password-change-pguser-rhino, key: host } } + - name: PGPORT + valueFrom: { secretKeyRef: { name: password-change-pguser-rhino, key: port } } + - name: PGDATABASE + valueFrom: { secretKeyRef: { name: password-change-pguser-rhino, key: dbname } } + - name: PGUSER + valueFrom: { secretKeyRef: { name: password-change-pguser-rhino, key: user } } + # Hardcoding the password here to be equal to what we changed the password to in + # ./08-secret.yaml + # The ./08-errors.yaml checks that the secret is not in the state that we set it to + # in the ./08-secret.yaml file, i.e., the secret has been reconciled by PGO + - name: PGPASSWORD + value: datalake + + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } diff --git a/testing/kuttl/e2e/password-change/09-assert.yaml b/testing/kuttl/e2e/password-change/09-assert.yaml new file mode 100644 index 0000000000..399b7cb17d --- /dev/null +++ b/testing/kuttl/e2e/password-change/09-assert.yaml @@ -0,0 +1,13 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-connect5 +status: + succeeded: 1 +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-connect-uri5 +status: + succeeded: 1 diff --git a/testing/kuttl/e2e/password-change/10--secret.yaml b/testing/kuttl/e2e/password-change/10--secret.yaml new file mode 100644 index 0000000000..7002cc622e --- /dev/null +++ b/testing/kuttl/e2e/password-change/10--secret.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: Secret +metadata: + name: password-change-pguser-rhino +# Updating the password with the stringData field and a scram verifier +stringData: + password: infopond + verifier: "SCRAM-SHA-256$4096:RI03PMRQH2oAFMH6AOQHdA==$D74VOn98ErW3J8CIiFYldUVO+kjsXj+Ju7jhmMURHQo=:c5hC/1V2TYNnoJ6VcaSJCcoGQ2eTcYJBP/pfKFv+k54=" + uri: "" diff --git a/testing/kuttl/e2e/password-change/10-errors.yaml b/testing/kuttl/e2e/password-change/10-errors.yaml new file mode 100644 index 0000000000..16d7b1642a --- /dev/null +++ b/testing/kuttl/e2e/password-change/10-errors.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Secret +metadata: + name: password-change-pguser-rhino +data: + # `10-secret.yaml` changes the password and removes the verifier field, + # so when PGO reconciles the secret, it should fill in the empty verifier field; + # if it does not fill in the verifier field by a certain time this step will error + # and KUTTL will mark the test as failed. + uri: "" diff --git a/testing/kuttl/e2e/password-change/11--psql-connect-uri.yaml b/testing/kuttl/e2e/password-change/11--psql-connect-uri.yaml new file mode 100644 index 0000000000..f7f6d8287a --- /dev/null +++ b/testing/kuttl/e2e/password-change/11--psql-connect-uri.yaml @@ -0,0 +1,26 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-connect-uri6 +spec: + backoffLimit: 6 + template: + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + command: + - psql + - "$(PGURI)" + - -c + - "select version();" + env: + # The ./10-errors.yaml checks that the secret is not in the state that we set it to + # in the ./10-secret.yaml file, i.e., the secret has been reconciled by PGO, + # so the uri field of the secret should be updated with the new password by this time + - name: PGURI + valueFrom: { secretKeyRef: { name: password-change-pguser-rhino, key: uri } } + + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } diff --git a/testing/kuttl/e2e/password-change/11--psql-connect.yaml b/testing/kuttl/e2e/password-change/11--psql-connect.yaml new file mode 100644 index 0000000000..420de82024 --- /dev/null +++ b/testing/kuttl/e2e/password-change/11--psql-connect.yaml @@ -0,0 +1,34 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-connect6 +spec: + backoffLimit: 6 + template: + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + command: + - psql + - -c + - "select version();" + env: + - name: PGHOST + valueFrom: { secretKeyRef: { name: password-change-pguser-rhino, key: host } } + - name: PGPORT + valueFrom: { secretKeyRef: { name: password-change-pguser-rhino, key: port } } + - name: PGDATABASE + valueFrom: { secretKeyRef: { name: password-change-pguser-rhino, key: dbname } } + - name: PGUSER + valueFrom: { secretKeyRef: { name: password-change-pguser-rhino, key: user } } + # Hardcoding the password here to be equal to what we changed the password to in + # ./10-secret.yaml + # The ./10-errors.yaml checks that the secret is not in the state that we set it to + # in the ./10-secret.yaml file, i.e., the secret has been reconciled by PGO + - name: PGPASSWORD + value: infopond + + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } diff --git a/testing/kuttl/e2e/password-change/11-assert.yaml b/testing/kuttl/e2e/password-change/11-assert.yaml new file mode 100644 index 0000000000..589c2cbf21 --- /dev/null +++ b/testing/kuttl/e2e/password-change/11-assert.yaml @@ -0,0 +1,13 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-connect6 +status: + succeeded: 1 +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-connect-uri6 +status: + succeeded: 1 diff --git a/testing/kuttl/e2e/password-change/README.md b/testing/kuttl/e2e/password-change/README.md new file mode 100644 index 0000000000..e898bd5ac2 --- /dev/null +++ b/testing/kuttl/e2e/password-change/README.md @@ -0,0 +1,27 @@ +### Password Change Test with Kuttl + +This Kuttl routine runs through the following steps: + +#### Create cluster and test connection + +- 00: Creates the cluster and verifies that it exists and is ready for connection +- 01: Connects to the cluster with the PGO-generated password (both with env vars and with the URI) + +#### Default user connection tests + +- 02: Change the password (using Kuttl's update object method on the secret's `data` field) and verify that the password changes by asserting that the `verifier` field is not blank (using KUTTL's `errors` method, which makes sure that a state is _not_ met by a certain time) +- 03: Connects to the cluster with the user-defined password (both with env vars and with the URI) +- 04: Change the password and verifier (using Kuttl's update object method on the secret's `stringData` field) and verify that the password changes by asserting that the `uri` field is not blank (using KUTTL's `errors` method, which makes sure that a state is _not_ met by a certain time) +- 05: Connects to the cluster with the second user-defined password (both with env vars and with the URI) + +#### Create custom user and test connection + +- 06: Updates the postgrescluster spec with a custom user and password +- 07: Connects to the cluster with the PGO-generated password (both with env vars and with the URI) for the custom user + +#### Custom user connection tests + +- 08: Change the custom user's password (using Kuttl's update object method on the secret's `data` field) and verify that the password changes by asserting that the `verifier` field is not blank (using KUTTL's `errors` method, which makes sure that a state is _not_ met by a certain time) +- 09: Connects to the cluster with the user-defined password (both with env vars and with the URI) for the custom user +- 10: Change the custom user's password and verifier (using Kuttl's update object method on the secret's `stringData` field) and verify that the password changes by asserting that the `uri` field is not blank (using KUTTL's `errors` method, which makes sure that a state is _not_ met by a certain time) +- 11: Connects to the cluster with the second user-defined password (both with env vars and with the URI) for the custom user diff --git a/testing/kuttl/e2e/pgadmin/01--cluster.yaml b/testing/kuttl/e2e/pgadmin/01--cluster.yaml new file mode 100644 index 0000000000..d1afb7be04 --- /dev/null +++ b/testing/kuttl/e2e/pgadmin/01--cluster.yaml @@ -0,0 +1,40 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: test-cm +data: + configMap: config +--- +apiVersion: v1 +kind: Secret +metadata: + name: test-secret +type: Opaque +stringData: + password: myPassword +--- +# Create a cluster with a configured pgAdmin UI. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: interfaced + labels: { postgres-operator-test: kuttl } +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + replicas: 1 + dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + userInterface: + pgAdmin: + dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + config: + files: + - secret: + name: test-secret + - configMap: + name: test-cm + settings: + SHOW_GRAVATAR_IMAGE: False + LOGIN_BANNER: | + Custom KUTTL Login Banner diff --git a/testing/kuttl/e2e/pgadmin/01-assert.yaml b/testing/kuttl/e2e/pgadmin/01-assert.yaml new file mode 100644 index 0000000000..e4192a1217 --- /dev/null +++ b/testing/kuttl/e2e/pgadmin/01-assert.yaml @@ -0,0 +1,32 @@ +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: interfaced +status: + instances: + - name: instance1 + replicas: 1 + readyReplicas: 1 + updatedReplicas: 1 + +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: interfaced-pgadmin +status: + replicas: 1 + readyReplicas: 1 + updatedReplicas: 1 + +--- +apiVersion: v1 +kind: Secret +metadata: + name: test-secret +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: test-cm diff --git a/testing/kuttl/e2e/pgadmin/02--check-settings.yaml b/testing/kuttl/e2e/pgadmin/02--check-settings.yaml new file mode 100644 index 0000000000..c68d032d1e --- /dev/null +++ b/testing/kuttl/e2e/pgadmin/02--check-settings.yaml @@ -0,0 +1,56 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + # Log the amount of space on the startup volume. Assert that 4KiB are used. + - script: | + kubectl exec --namespace "${NAMESPACE}" statefulset.apps/interfaced-pgadmin \ + -- df --block-size=1K /etc/pgadmin | + awk '{ print } END { exit ($3 != "4") }' + + # Assert that current settings contain values from the spec. + - script: | + SETTINGS=$( + kubectl exec --namespace "${NAMESPACE}" statefulset.apps/interfaced-pgadmin \ + -- cat /etc/pgadmin/conf.d/~postgres-operator/pgadmin.json + ) + + contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } + { + contains "${SETTINGS}" '"LOGIN_BANNER": "Custom KUTTL Login Banner\n"' && + contains "${SETTINGS}" '"SHOW_GRAVATAR_IMAGE": false' + } || { + echo >&2 'Wrong settings!' + echo "${SETTINGS}" + exit 1 + } + + - script: | + CONTENTS=$( + kubectl exec --namespace "${NAMESPACE}" statefulset.apps/interfaced-pgadmin \ + -- cat /etc/pgadmin/conf.d/configMap + ) + + contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } + { + contains "${CONTENTS}" 'config' + } || { + echo >&2 'Wrong settings!' + echo "${CONTENTS}" + exit 1 + } + + - script: | + CONTENTS=$( + kubectl exec --namespace "${NAMESPACE}" statefulset.apps/interfaced-pgadmin \ + -- cat /etc/pgadmin/conf.d/password + ) + + contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } + { + contains "${CONTENTS}" 'myPassword' + } || { + echo >&2 'Wrong settings!' + echo "${CONTENTS}" + exit 1 + } diff --git a/testing/kuttl/e2e/pgbackrest-backup-standby/00--cluster.yaml b/testing/kuttl/e2e/pgbackrest-backup-standby/00--cluster.yaml new file mode 100644 index 0000000000..9665fac665 --- /dev/null +++ b/testing/kuttl/e2e/pgbackrest-backup-standby/00--cluster.yaml @@ -0,0 +1,28 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: pgbackrest-backup-standby +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + replicas: 1 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + backups: + pgbackrest: + global: + backup-standby: "y" + repos: + - name: repo1 + volume: + volumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi diff --git a/testing/kuttl/e2e/pgbackrest-backup-standby/00-assert.yaml b/testing/kuttl/e2e/pgbackrest-backup-standby/00-assert.yaml new file mode 100644 index 0000000000..d69a3c68b5 --- /dev/null +++ b/testing/kuttl/e2e/pgbackrest-backup-standby/00-assert.yaml @@ -0,0 +1,23 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: pgbackrest-backup-standby +status: + pgbackrest: + repoHost: + apiVersion: apps/v1 + kind: StatefulSet + ready: true + repos: + - bound: true + name: repo1 +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: pgbackrest-backup-standby + postgres-operator.crunchydata.com/pgbackrest-backup: replica-create + postgres-operator.crunchydata.com/pgbackrest-repo: repo1 +status: + phase: Failed diff --git a/testing/kuttl/e2e/pgbackrest-backup-standby/01--check-backup-logs.yaml b/testing/kuttl/e2e/pgbackrest-backup-standby/01--check-backup-logs.yaml new file mode 100644 index 0000000000..72d2050d4a --- /dev/null +++ b/testing/kuttl/e2e/pgbackrest-backup-standby/01--check-backup-logs.yaml @@ -0,0 +1,20 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: +# First, find at least one backup job pod. +# Then, check the logs for the 'unable to find standby cluster' line. +# If this line isn't found, exit 1. +- script: | + retry() { bash -ceu 'printf "$1\nSleeping...\n" && sleep 5' - "$@"; } + contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } + + pod=$(kubectl get pods -o name -n "${NAMESPACE}" \ + -l postgres-operator.crunchydata.com/cluster=pgbackrest-backup-standby \ + -l postgres-operator.crunchydata.com/pgbackrest-backup=replica-create) + [ "$pod" = "" ] && retry "Pod not found" && exit 1 + + logs=$(kubectl logs "${pod}" --namespace "${NAMESPACE}") + { contains "${logs}" 'unable to find standby cluster - cannot proceed'; } || { + echo 'did not find expected standby cluster error ' + exit 1 + } diff --git a/testing/kuttl/e2e/pgbackrest-backup-standby/02--cluster.yaml b/testing/kuttl/e2e/pgbackrest-backup-standby/02--cluster.yaml new file mode 100644 index 0000000000..c986f4a9de --- /dev/null +++ b/testing/kuttl/e2e/pgbackrest-backup-standby/02--cluster.yaml @@ -0,0 +1,28 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: pgbackrest-backup-standby +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + replicas: 2 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + backups: + pgbackrest: + global: + backup-standby: "y" + repos: + - name: repo1 + volume: + volumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi diff --git a/testing/kuttl/e2e/pgbackrest-backup-standby/02-assert.yaml b/testing/kuttl/e2e/pgbackrest-backup-standby/02-assert.yaml new file mode 100644 index 0000000000..92f7b12f5a --- /dev/null +++ b/testing/kuttl/e2e/pgbackrest-backup-standby/02-assert.yaml @@ -0,0 +1,25 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: pgbackrest-backup-standby +status: + pgbackrest: + repoHost: + apiVersion: apps/v1 + kind: StatefulSet + ready: true + repos: + - bound: true + name: repo1 + replicaCreateBackupComplete: true + stanzaCreated: true +--- +apiVersion: batch/v1 +kind: Job +metadata: + labels: + postgres-operator.crunchydata.com/cluster: pgbackrest-backup-standby + postgres-operator.crunchydata.com/pgbackrest-backup: replica-create + postgres-operator.crunchydata.com/pgbackrest-repo: repo1 +status: + succeeded: 1 diff --git a/testing/kuttl/e2e/pgbackrest-backup-standby/README.md b/testing/kuttl/e2e/pgbackrest-backup-standby/README.md new file mode 100644 index 0000000000..39fb8707a8 --- /dev/null +++ b/testing/kuttl/e2e/pgbackrest-backup-standby/README.md @@ -0,0 +1,5 @@ +### pgBackRest backup-standby test + +* 00: Create a cluster with 'backup-standby' set to 'y' but with only one replica. +* 01: Check the backup Job Pod logs for the expected error. +* 02: Update the cluster to have 2 replicas and verify that the cluster can initialize successfully and the backup job can complete. diff --git a/testing/kuttl/e2e/pgbackrest-init/00--cluster.yaml b/testing/kuttl/e2e/pgbackrest-init/00--cluster.yaml new file mode 100644 index 0000000000..03391359a1 --- /dev/null +++ b/testing/kuttl/e2e/pgbackrest-init/00--cluster.yaml @@ -0,0 +1,38 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: init-pgbackrest +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + backups: + pgbackrest: + manual: + repoName: repo2 + options: + - --type=full + repos: + - name: repo1 + volume: + volumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + # Adding a second PVC repo for testing, rather than test with S3/GCS/Azure + - name: repo2 + volume: + volumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi diff --git a/testing/kuttl/e2e/pgbackrest-init/00-assert.yaml b/testing/kuttl/e2e/pgbackrest-init/00-assert.yaml new file mode 100644 index 0000000000..5181c95993 --- /dev/null +++ b/testing/kuttl/e2e/pgbackrest-init/00-assert.yaml @@ -0,0 +1,68 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: init-pgbackrest +status: + pgbackrest: + repoHost: + apiVersion: apps/v1 + kind: StatefulSet + ready: true + repos: +# Assert that the status has the two repos, with only the first having the `replicaCreateBackupComplete` field + - bound: true + name: repo1 + replicaCreateBackupComplete: true + stanzaCreated: true + - bound: true + name: repo2 + stanzaCreated: true +--- +apiVersion: batch/v1 +kind: Job +metadata: + labels: + postgres-operator.crunchydata.com/cluster: init-pgbackrest + postgres-operator.crunchydata.com/pgbackrest-backup: replica-create + postgres-operator.crunchydata.com/pgbackrest-repo: repo1 +status: + succeeded: 1 +--- +# Assert the existence of two PVCs +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + postgres-operator.crunchydata.com/cluster: init-pgbackrest + postgres-operator.crunchydata.com/data: pgbackrest + postgres-operator.crunchydata.com/pgbackrest: "" + postgres-operator.crunchydata.com/pgbackrest-repo: repo1 + postgres-operator.crunchydata.com/pgbackrest-volume: "" + name: init-pgbackrest-repo1 +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi +status: + phase: Bound +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + postgres-operator.crunchydata.com/cluster: init-pgbackrest + postgres-operator.crunchydata.com/data: pgbackrest + postgres-operator.crunchydata.com/pgbackrest: "" + postgres-operator.crunchydata.com/pgbackrest-repo: repo2 + postgres-operator.crunchydata.com/pgbackrest-volume: "" + name: init-pgbackrest-repo2 +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi +status: + phase: Bound diff --git a/testing/kuttl/e2e/pgbackrest-init/01-pgbackrest-connect.yaml b/testing/kuttl/e2e/pgbackrest-init/01-pgbackrest-connect.yaml new file mode 100644 index 0000000000..94fa317da1 --- /dev/null +++ b/testing/kuttl/e2e/pgbackrest-init/01-pgbackrest-connect.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: +# When the cluster comes up, only the repo in the 0th position has activated with a backup, +# so the pgbackrest status should be "mixed" and there should be only one backup +- script: CLUSTER=init-pgbackrest ../../scripts/pgbackrest-initialization.sh "mixed" 1 diff --git a/testing/kuttl/e2e/pgbackrest-init/02--cluster.yaml b/testing/kuttl/e2e/pgbackrest-init/02--cluster.yaml new file mode 100644 index 0000000000..606272257d --- /dev/null +++ b/testing/kuttl/e2e/pgbackrest-init/02--cluster.yaml @@ -0,0 +1,5 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: +- command: kubectl annotate postgrescluster init-pgbackrest postgres-operator.crunchydata.com/pgbackrest-backup="manual" + namespaced: true diff --git a/testing/kuttl/e2e/pgbackrest-init/02-assert.yaml b/testing/kuttl/e2e/pgbackrest-init/02-assert.yaml new file mode 100644 index 0000000000..589a04e738 --- /dev/null +++ b/testing/kuttl/e2e/pgbackrest-init/02-assert.yaml @@ -0,0 +1,10 @@ +# Manual backup job should have pushed to repo2 +apiVersion: batch/v1 +kind: Job +metadata: + labels: + postgres-operator.crunchydata.com/cluster: init-pgbackrest + postgres-operator.crunchydata.com/pgbackrest-backup: manual + postgres-operator.crunchydata.com/pgbackrest-repo: repo2 +status: + succeeded: 1 diff --git a/testing/kuttl/e2e/pgbackrest-init/03-pgbackrest-connect.yaml b/testing/kuttl/e2e/pgbackrest-init/03-pgbackrest-connect.yaml new file mode 100644 index 0000000000..9c5cbc9154 --- /dev/null +++ b/testing/kuttl/e2e/pgbackrest-init/03-pgbackrest-connect.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: +# Now that a manual backup has been pushed to repo2, the pgbackrest status should be "ok" +# and there should be two backups +- script: CLUSTER=init-pgbackrest ../../scripts/pgbackrest-initialization.sh "ok" 2 diff --git a/testing/kuttl/e2e/pgbackrest-init/04--cluster.yaml b/testing/kuttl/e2e/pgbackrest-init/04--cluster.yaml new file mode 100644 index 0000000000..e732f1fd9a --- /dev/null +++ b/testing/kuttl/e2e/pgbackrest-init/04--cluster.yaml @@ -0,0 +1,40 @@ +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: init-pgbackrest +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + replicas: 2 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + backups: + pgbackrest: + manual: + repoName: repo2 + options: + - --type=full + repos: + - name: repo1 + volume: + volumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + # Adding a second PVC repo for testing, rather than test with S3/GCS/Azure + - name: repo2 + volume: + volumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi diff --git a/testing/kuttl/e2e/pgbackrest-init/04-assert.yaml b/testing/kuttl/e2e/pgbackrest-init/04-assert.yaml new file mode 100644 index 0000000000..04a38ac9f4 --- /dev/null +++ b/testing/kuttl/e2e/pgbackrest-init/04-assert.yaml @@ -0,0 +1,34 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: init-pgbackrest +status: + instances: + - name: instance1 + readyReplicas: 2 + replicas: 2 + updatedReplicas: 2 + pgbackrest: + repoHost: + apiVersion: apps/v1 + kind: StatefulSet + ready: true + repos: +# Assert that the status has the two repos, with only the first having the `replicaCreateBackupComplete` field + - bound: true + name: repo1 + replicaCreateBackupComplete: true + stanzaCreated: true + - bound: true + name: repo2 + stanzaCreated: true +--- +apiVersion: batch/v1 +kind: Job +metadata: + labels: + postgres-operator.crunchydata.com/cluster: init-pgbackrest + postgres-operator.crunchydata.com/pgbackrest-backup: replica-create + postgres-operator.crunchydata.com/pgbackrest-repo: repo1 +status: + succeeded: 1 diff --git a/testing/kuttl/e2e/pgbackrest-init/05-pgbackrest-connect.yaml b/testing/kuttl/e2e/pgbackrest-init/05-pgbackrest-connect.yaml new file mode 100644 index 0000000000..d8b9cd6758 --- /dev/null +++ b/testing/kuttl/e2e/pgbackrest-init/05-pgbackrest-connect.yaml @@ -0,0 +1,25 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: +- script: | + # Assumes the cluster only has a single replica + NEW_REPLICA=$( + kubectl get pod --namespace "${NAMESPACE}" \ + --output name --selector ' + postgres-operator.crunchydata.com/cluster=init-pgbackrest, + postgres-operator.crunchydata.com/role=replica' + ) + + LIST=$( + kubectl exec --namespace "${NAMESPACE}" "${NEW_REPLICA}" -- \ + ls /pgdata/pg${KUTTL_PG_VERSION}/ + ) + + contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } + { + !(contains "${LIST}" 'recovery.signal') + } || { + echo >&2 'Signal file(s) found' + echo "${LIST}" + exit 1 + } diff --git a/testing/kuttl/e2e/pgbackrest-init/06--check-spool-path.yaml b/testing/kuttl/e2e/pgbackrest-init/06--check-spool-path.yaml new file mode 100644 index 0000000000..e32cc2fc87 --- /dev/null +++ b/testing/kuttl/e2e/pgbackrest-init/06--check-spool-path.yaml @@ -0,0 +1,17 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: +- script: | + PRIMARY=$( + kubectl get pod --namespace "${NAMESPACE}" \ + --output name --selector ' + postgres-operator.crunchydata.com/role=master' + ) + + LIST=$( + kubectl exec --namespace "${NAMESPACE}" -c database "${PRIMARY}" -- \ + ls -l /pgdata + ) + + contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } + contains "$LIST" "pgbackrest-spool" || exit 1 diff --git a/testing/kuttl/e2e/pgbackrest-init/README.md b/testing/kuttl/e2e/pgbackrest-init/README.md new file mode 100644 index 0000000000..d319a31b09 --- /dev/null +++ b/testing/kuttl/e2e/pgbackrest-init/README.md @@ -0,0 +1,6 @@ +### pgBackRest Init test + +* 00: Create a cluster with two PVC repos and set up for manual backups to go to the second; verify that the PVCs exist and that the backup job completed successfully +* 01: Run pgbackrest-initialization.sh, which checks that the status matches the expected status of `mixed` (because the second repo in the repo list has not yet been pushed to) and that there is only one full backup +* 02: Use `kubectl` to annotate the cluster to initiate a manual backup; verify that the job completed successfully +* 03: Rerun pgbackrest-initialization.sh, now expecting the status to be `ok` since both repos have been pushed to and there to be two full backups diff --git a/testing/kuttl/e2e/pgbackrest-restore/01--create-cluster.yaml b/testing/kuttl/e2e/pgbackrest-restore/01--create-cluster.yaml new file mode 100644 index 0000000000..c414806892 --- /dev/null +++ b/testing/kuttl/e2e/pgbackrest-restore/01--create-cluster.yaml @@ -0,0 +1,26 @@ +--- +# Create a cluster with a single pgBackRest repository and some parameters that +# require attention during PostgreSQL recovery. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: original + labels: { postgres-operator-test: kuttl } +spec: + postgresVersion: ${KUTTL_PG_VERSION} + patroni: + dynamicConfiguration: + postgresql: + parameters: + max_connections: 200 + instances: + - dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + replicas: 2 + backups: + pgbackrest: + manual: + repoName: repo1 + repos: + - name: repo1 + volume: + volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } diff --git a/testing/kuttl/e2e/pgbackrest-restore/01-assert.yaml b/testing/kuttl/e2e/pgbackrest-restore/01-assert.yaml new file mode 100644 index 0000000000..25b5bbee76 --- /dev/null +++ b/testing/kuttl/e2e/pgbackrest-restore/01-assert.yaml @@ -0,0 +1,12 @@ +--- +# Wait for the replica backup to complete. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: original +status: + pgbackrest: + repos: + - name: repo1 + replicaCreateBackupComplete: true + stanzaCreated: true diff --git a/testing/kuttl/e2e/pgbackrest-restore/02--create-data.yaml b/testing/kuttl/e2e/pgbackrest-restore/02--create-data.yaml new file mode 100644 index 0000000000..6801edbf61 --- /dev/null +++ b/testing/kuttl/e2e/pgbackrest-restore/02--create-data.yaml @@ -0,0 +1,32 @@ +--- +# Create some data that will be restored. +apiVersion: batch/v1 +kind: Job +metadata: + name: original-data + labels: { postgres-operator-test: kuttl } +spec: + backoffLimit: 3 + template: + metadata: + labels: { postgres-operator-test: kuttl } + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + env: + - name: PGURI + valueFrom: { secretKeyRef: { name: original-pguser-original, key: uri } } + + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } + + command: + - psql + - $(PGURI) + - --set=ON_ERROR_STOP=1 + - --command + - | + CREATE SCHEMA "original"; + CREATE TABLE important (data) AS VALUES ('treasure'); diff --git a/testing/kuttl/e2e/pgbackrest-restore/02-assert.yaml b/testing/kuttl/e2e/pgbackrest-restore/02-assert.yaml new file mode 100644 index 0000000000..5115ba97c9 --- /dev/null +++ b/testing/kuttl/e2e/pgbackrest-restore/02-assert.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: original-data +status: + succeeded: 1 diff --git a/testing/kuttl/e2e/pgbackrest-restore/03--backup.yaml b/testing/kuttl/e2e/pgbackrest-restore/03--backup.yaml new file mode 100644 index 0000000000..b759dd0fc4 --- /dev/null +++ b/testing/kuttl/e2e/pgbackrest-restore/03--backup.yaml @@ -0,0 +1,8 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + # Annotate the cluster to trigger a backup. + - script: | + kubectl annotate --namespace="${NAMESPACE}" postgrescluster/original \ + 'postgres-operator.crunchydata.com/pgbackrest-backup=one' diff --git a/testing/kuttl/e2e/pgbackrest-restore/03-assert.yaml b/testing/kuttl/e2e/pgbackrest-restore/03-assert.yaml new file mode 100644 index 0000000000..a2c5b3bb22 --- /dev/null +++ b/testing/kuttl/e2e/pgbackrest-restore/03-assert.yaml @@ -0,0 +1,13 @@ +--- +# Wait for the backup job to complete. +apiVersion: batch/v1 +kind: Job +metadata: + annotations: + postgres-operator.crunchydata.com/pgbackrest-backup: one + labels: + postgres-operator.crunchydata.com/cluster: original + postgres-operator.crunchydata.com/pgbackrest-backup: manual + postgres-operator.crunchydata.com/pgbackrest-repo: repo1 +status: + succeeded: 1 diff --git a/testing/kuttl/e2e/pgbackrest-restore/04--clone-cluster.yaml b/testing/kuttl/e2e/pgbackrest-restore/04--clone-cluster.yaml new file mode 100644 index 0000000000..4bc1ce56a9 --- /dev/null +++ b/testing/kuttl/e2e/pgbackrest-restore/04--clone-cluster.yaml @@ -0,0 +1,22 @@ +--- +# Clone the cluster using a pgBackRest restore. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: clone-one + labels: { postgres-operator-test: kuttl } +spec: + dataSource: + postgresCluster: + clusterName: original + repoName: repo1 + + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } diff --git a/testing/kuttl/e2e/pgbackrest-restore/04-assert.yaml b/testing/kuttl/e2e/pgbackrest-restore/04-assert.yaml new file mode 100644 index 0000000000..8aa51fc440 --- /dev/null +++ b/testing/kuttl/e2e/pgbackrest-restore/04-assert.yaml @@ -0,0 +1,12 @@ +--- +# Wait for the clone cluster to come online. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: clone-one +status: + instances: + - name: '00' + replicas: 1 + readyReplicas: 1 + updatedReplicas: 1 diff --git a/testing/kuttl/e2e/pgbackrest-restore/05--check-data.yaml b/testing/kuttl/e2e/pgbackrest-restore/05--check-data.yaml new file mode 100644 index 0000000000..1ee6fe9c32 --- /dev/null +++ b/testing/kuttl/e2e/pgbackrest-restore/05--check-data.yaml @@ -0,0 +1,49 @@ +--- +# Confirm that all the data was restored. +apiVersion: batch/v1 +kind: Job +metadata: + name: clone-one-data + labels: { postgres-operator-test: kuttl } +spec: + backoffLimit: 3 + template: + metadata: + labels: { postgres-operator-test: kuttl } + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + env: + # Connect to the cluster using the restored database and original credentials. + - name: PGHOST + valueFrom: { secretKeyRef: { name: clone-one-pguser-clone-one, key: host } } + - name: PGPORT + valueFrom: { secretKeyRef: { name: clone-one-pguser-clone-one, key: port } } + - name: PGDATABASE + valueFrom: { secretKeyRef: { name: original-pguser-original, key: dbname } } + - name: PGUSER + valueFrom: { secretKeyRef: { name: original-pguser-original, key: user } } + - name: PGPASSWORD + valueFrom: { secretKeyRef: { name: original-pguser-original, key: password } } + + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } + + # Confirm that all the data was restored. + # Note: the `$$$$` is reduced to `$$` by Kubernetes. + # - https://kubernetes.io/docs/tasks/inject-data-application/ + command: + - psql + - -qa + - --set=ON_ERROR_STOP=1 + - --command + - | + DO $$$$ + DECLARE + restored jsonb; + BEGIN + SELECT jsonb_agg(important) INTO restored FROM important; + ASSERT restored = '[{"data":"treasure"}]', format('got %L', restored); + END $$$$; diff --git a/testing/kuttl/e2e/pgbackrest-restore/05-assert.yaml b/testing/kuttl/e2e/pgbackrest-restore/05-assert.yaml new file mode 100644 index 0000000000..1b6fad318b --- /dev/null +++ b/testing/kuttl/e2e/pgbackrest-restore/05-assert.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: clone-one-data +status: + succeeded: 1 diff --git a/testing/kuttl/e2e/pgbackrest-restore/06--delete-clone.yaml b/testing/kuttl/e2e/pgbackrest-restore/06--delete-clone.yaml new file mode 100644 index 0000000000..69ebc06c9d --- /dev/null +++ b/testing/kuttl/e2e/pgbackrest-restore/06--delete-clone.yaml @@ -0,0 +1,8 @@ +--- +# Remove the cloned cluster. +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +delete: + - apiVersion: postgres-operator.crunchydata.com/v1beta1 + kind: PostgresCluster + name: clone-one diff --git a/testing/kuttl/e2e/pgbackrest-restore/07--annotate.yaml b/testing/kuttl/e2e/pgbackrest-restore/07--annotate.yaml new file mode 100644 index 0000000000..279c216ed0 --- /dev/null +++ b/testing/kuttl/e2e/pgbackrest-restore/07--annotate.yaml @@ -0,0 +1,18 @@ +--- +# Annotate the cluster with the timestamp at which PostgreSQL last started. +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - script: | + PRIMARY=$( + kubectl get pod --namespace "${NAMESPACE}" \ + --output name --selector ' + postgres-operator.crunchydata.com/cluster=original, + postgres-operator.crunchydata.com/role=master' + ) + START=$( + kubectl exec --namespace "${NAMESPACE}" "${PRIMARY}" \ + -- psql -qAt --command 'SELECT pg_postmaster_start_time()' + ) + kubectl annotate --namespace "${NAMESPACE}" postgrescluster/original \ + "testing/start-before=${START}" diff --git a/testing/kuttl/e2e/pgbackrest-restore/07--update-cluster.yaml b/testing/kuttl/e2e/pgbackrest-restore/07--update-cluster.yaml new file mode 100644 index 0000000000..f83a02c7c6 --- /dev/null +++ b/testing/kuttl/e2e/pgbackrest-restore/07--update-cluster.yaml @@ -0,0 +1,25 @@ +--- +# Update the cluster with PostgreSQL parameters that require attention during recovery. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: original + labels: { postgres-operator-test: kuttl } +spec: + postgresVersion: ${KUTTL_PG_VERSION} + patroni: + dynamicConfiguration: + postgresql: + parameters: + max_connections: 1000 + instances: + - dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + replicas: 2 + backups: + pgbackrest: + manual: + repoName: repo1 + repos: + - name: repo1 + volume: + volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } diff --git a/testing/kuttl/e2e/pgbackrest-restore/08--wait-restart.yaml b/testing/kuttl/e2e/pgbackrest-restore/08--wait-restart.yaml new file mode 100644 index 0000000000..305d757386 --- /dev/null +++ b/testing/kuttl/e2e/pgbackrest-restore/08--wait-restart.yaml @@ -0,0 +1,29 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - script: | + BEFORE=$( + kubectl get --namespace "${NAMESPACE}" postgrescluster/original \ + --output 'go-template={{ index .metadata.annotations "testing/start-before" }}' + ) + PRIMARY=$( + kubectl get pod --namespace "${NAMESPACE}" \ + --output name --selector ' + postgres-operator.crunchydata.com/cluster=original, + postgres-operator.crunchydata.com/role=master' + ) + + # Wait for PostgreSQL to restart. + while true; do + START=$( + kubectl exec --namespace "${NAMESPACE}" "${PRIMARY}" \ + -- psql -qAt --command 'SELECT pg_postmaster_start_time()' + ) + if [ "${START}" ] && [ "${START}" != "${BEFORE}" ]; then break; else sleep 1; fi + done + echo "${START} != ${BEFORE}" + + # Reset counters in the "pg_stat_archiver" view. + kubectl exec --namespace "${NAMESPACE}" "${PRIMARY}" \ + -- psql -qb --command "SELECT pg_stat_reset_shared('archiver')" diff --git a/testing/kuttl/e2e/pgbackrest-restore/09--add-data.yaml b/testing/kuttl/e2e/pgbackrest-restore/09--add-data.yaml new file mode 100644 index 0000000000..41c2255239 --- /dev/null +++ b/testing/kuttl/e2e/pgbackrest-restore/09--add-data.yaml @@ -0,0 +1,31 @@ +--- +# Add more data to the WAL archive. +apiVersion: batch/v1 +kind: Job +metadata: + name: original-more-data + labels: { postgres-operator-test: kuttl } +spec: + backoffLimit: 3 + template: + metadata: + labels: { postgres-operator-test: kuttl } + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + env: + - name: PGURI + valueFrom: { secretKeyRef: { name: original-pguser-original, key: uri } } + + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } + + command: + - psql + - $(PGURI) + - --set=ON_ERROR_STOP=1 + - --command + - | + INSERT INTO important (data) VALUES ('water'), ('socks'); diff --git a/testing/kuttl/e2e/pgbackrest-restore/09-assert.yaml b/testing/kuttl/e2e/pgbackrest-restore/09-assert.yaml new file mode 100644 index 0000000000..a60cd9ab8f --- /dev/null +++ b/testing/kuttl/e2e/pgbackrest-restore/09-assert.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: original-more-data +status: + succeeded: 1 diff --git a/testing/kuttl/e2e/pgbackrest-restore/10--wait-archived.yaml b/testing/kuttl/e2e/pgbackrest-restore/10--wait-archived.yaml new file mode 100644 index 0000000000..446886ead3 --- /dev/null +++ b/testing/kuttl/e2e/pgbackrest-restore/10--wait-archived.yaml @@ -0,0 +1,18 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - script: | + PRIMARY=$( + kubectl get pod --namespace "${NAMESPACE}" \ + --output name --selector ' + postgres-operator.crunchydata.com/cluster=original, + postgres-operator.crunchydata.com/role=master' + ) + + # Wait for the data to be sent to the WAL archive. A prior step reset the + # "pg_stat_archiver" counters, so anything more than zero should suffice. + kubectl exec --namespace "${NAMESPACE}" "${PRIMARY}" -- psql -c 'SELECT pg_switch_wal()' + while [ 0 = "$( + kubectl exec --namespace "${NAMESPACE}" "${PRIMARY}" -- psql -qAt -c 'SELECT archived_count FROM pg_stat_archiver' + )" ]; do sleep 1; done diff --git a/testing/kuttl/e2e/pgbackrest-restore/11--clone-cluster.yaml b/testing/kuttl/e2e/pgbackrest-restore/11--clone-cluster.yaml new file mode 100644 index 0000000000..fcbdde4ea7 --- /dev/null +++ b/testing/kuttl/e2e/pgbackrest-restore/11--clone-cluster.yaml @@ -0,0 +1,22 @@ +--- +# Clone the cluster using a pgBackRest restore. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: clone-two + labels: { postgres-operator-test: kuttl } +spec: + dataSource: + postgresCluster: + clusterName: original + repoName: repo1 + + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } diff --git a/testing/kuttl/e2e/pgbackrest-restore/11-assert.yaml b/testing/kuttl/e2e/pgbackrest-restore/11-assert.yaml new file mode 100644 index 0000000000..0ad9669a62 --- /dev/null +++ b/testing/kuttl/e2e/pgbackrest-restore/11-assert.yaml @@ -0,0 +1,12 @@ +--- +# Wait for the clone cluster to come online. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: clone-two +status: + instances: + - name: '00' + replicas: 1 + readyReplicas: 1 + updatedReplicas: 1 diff --git a/testing/kuttl/e2e/pgbackrest-restore/12--check-data.yaml b/testing/kuttl/e2e/pgbackrest-restore/12--check-data.yaml new file mode 100644 index 0000000000..2cd2e4932b --- /dev/null +++ b/testing/kuttl/e2e/pgbackrest-restore/12--check-data.yaml @@ -0,0 +1,51 @@ +--- +# Confirm that all the data was restored. +apiVersion: batch/v1 +kind: Job +metadata: + name: clone-two-data + labels: { postgres-operator-test: kuttl } +spec: + backoffLimit: 3 + template: + metadata: + labels: { postgres-operator-test: kuttl } + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + env: + # Connect to the cluster using the restored database and original credentials. + - name: PGHOST + valueFrom: { secretKeyRef: { name: clone-two-pguser-clone-two, key: host } } + - name: PGPORT + valueFrom: { secretKeyRef: { name: clone-two-pguser-clone-two, key: port } } + - name: PGDATABASE + valueFrom: { secretKeyRef: { name: original-pguser-original, key: dbname } } + - name: PGUSER + valueFrom: { secretKeyRef: { name: original-pguser-original, key: user } } + - name: PGPASSWORD + valueFrom: { secretKeyRef: { name: original-pguser-original, key: password } } + + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } + + # Confirm that all the data was restored. + # Note: the `$$$$` is reduced to `$$` by Kubernetes. + # - https://kubernetes.io/docs/tasks/inject-data-application/ + command: + - psql + - -qa + - --set=ON_ERROR_STOP=1 + - --command + - | + DO $$$$ + DECLARE + restored jsonb; + BEGIN + SELECT jsonb_agg(important) INTO restored FROM important; + ASSERT restored = '[ + {"data":"treasure"}, {"data":"water"}, {"data":"socks"} + ]', format('got %L', restored); + END $$$$; diff --git a/testing/kuttl/e2e/pgbackrest-restore/12-assert.yaml b/testing/kuttl/e2e/pgbackrest-restore/12-assert.yaml new file mode 100644 index 0000000000..198d196836 --- /dev/null +++ b/testing/kuttl/e2e/pgbackrest-restore/12-assert.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: clone-two-data +status: + succeeded: 1 diff --git a/testing/kuttl/e2e/pgbackrest-restore/13--delete-clone.yaml b/testing/kuttl/e2e/pgbackrest-restore/13--delete-clone.yaml new file mode 100644 index 0000000000..9646f66f35 --- /dev/null +++ b/testing/kuttl/e2e/pgbackrest-restore/13--delete-clone.yaml @@ -0,0 +1,8 @@ +--- +# Remove the cloned cluster. +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +delete: + - apiVersion: postgres-operator.crunchydata.com/v1beta1 + kind: PostgresCluster + name: clone-two diff --git a/testing/kuttl/e2e/pgbackrest-restore/14--lose-data.yaml b/testing/kuttl/e2e/pgbackrest-restore/14--lose-data.yaml new file mode 100644 index 0000000000..4f1eaeaa53 --- /dev/null +++ b/testing/kuttl/e2e/pgbackrest-restore/14--lose-data.yaml @@ -0,0 +1,50 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - script: | + PRIMARY=$( + kubectl get pod --namespace "${NAMESPACE}" \ + --output name --selector ' + postgres-operator.crunchydata.com/cluster=original, + postgres-operator.crunchydata.com/role=master' + ) + OBJECTIVE=$( + kubectl exec --namespace "${NAMESPACE}" "${PRIMARY}" \ + -- psql -qAt --command 'SELECT clock_timestamp()' + ) + + # Store the recovery objective for later steps. + kubectl annotate --namespace "${NAMESPACE}" postgrescluster/original \ + "testing/objective=${OBJECTIVE}" + + # A reason to restore. Wait for the change to be sent to the WAL archive. + kubectl exec --namespace "${NAMESPACE}" "${PRIMARY}" \ + -- psql -qb original --set ON_ERROR_STOP=1 \ + --command 'DROP TABLE original.important' \ + --command "SELECT pg_stat_reset_shared('archiver')" \ + --command 'SELECT pg_switch_wal()' + + while [ 0 = "$( + kubectl exec --namespace "${NAMESPACE}" "${PRIMARY}" -- psql -qAt -c 'SELECT archived_count FROM pg_stat_archiver' + )" ]; do sleep 1; done + + # The replica should also need to be restored. + - script: | + REPLICA=$( + kubectl get pod --namespace "${NAMESPACE}" \ + --output name --selector ' + postgres-operator.crunchydata.com/cluster=original, + postgres-operator.crunchydata.com/role=replica' + ) + + kubectl exec --stdin --namespace "${NAMESPACE}" "${REPLICA}" \ + -- psql -qb original --set ON_ERROR_STOP=1 \ + --file=- <<'SQL' + DO $$ + BEGIN + ASSERT to_regclass('important') IS NULL, 'expected no table'; + PERFORM * FROM information_schema.tables WHERE table_name = 'important'; + ASSERT NOT FOUND, 'expected no table'; + END $$ + SQL diff --git a/testing/kuttl/e2e/pgbackrest-restore/15--in-place-pitr.yaml b/testing/kuttl/e2e/pgbackrest-restore/15--in-place-pitr.yaml new file mode 100644 index 0000000000..3e647946db --- /dev/null +++ b/testing/kuttl/e2e/pgbackrest-restore/15--in-place-pitr.yaml @@ -0,0 +1,50 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - script: | + TARGET_JSON=$( + kubectl get --namespace "${NAMESPACE}" postgrescluster/original \ + --output 'go-template={{ index .metadata.annotations "testing/objective" | printf "--target=%q" | printf "%q" }}' + ) + + # Configure the cluster for an in-place point-in-time restore (PITR). + kubectl patch --namespace "${NAMESPACE}" postgrescluster/original \ + --type 'merge' --patch ' + {"spec":{"backups":{"pgbackrest":{"restore":{ + "enabled": true, + "repoName": "repo1", + "options": ["--type=time", '"${TARGET_JSON}"'] + }}}}}' + + # Annotate the cluster to trigger the restore. + kubectl annotate --namespace="${NAMESPACE}" postgrescluster/original \ + 'postgres-operator.crunchydata.com/pgbackrest-restore=one' + + # TODO(benjaminjb): remove this when PG10 is no longer being supported + # For PG10, we need to run a patronictl reinit for the replica when that is running + # Get the replica name--the replica will exist during the PITR process so we don't need to wait + if [[ ${KUTTL_PG_VERSION} == 10 ]]; then + # Find replica + REPLICA=$(kubectl get pods --namespace "${NAMESPACE}" \ + --selector=' + postgres-operator.crunchydata.com/cluster=original, + postgres-operator.crunchydata.com/data=postgres, + postgres-operator.crunchydata.com/role!=master' \ + --output=jsonpath={.items..metadata.name}) + + # Wait for replica to be deleted + kubectl wait pod/"${REPLICA}" --namespace "${NAMESPACE}" --for=delete --timeout=-1s + + # Wait for the restarted replica to be started + NOT_RUNNING="" + while [[ "${NOT_RUNNING}" == "" ]]; do + kubectl get pods --namespace "${NAMESPACE}" "${REPLICA}" || (sleep 1 && continue) + + NOT_RUNNING=$(kubectl get pods --namespace "${NAMESPACE}" "${REPLICA}" \ + --output jsonpath="{.status.containerStatuses[?(@.name=='database')].state.running.startedAt}") + sleep 1 + done + + kubectl exec --namespace "${NAMESPACE}" "${REPLICA}" -- patronictl reinit original-ha "${REPLICA}" --force + fi diff --git a/testing/kuttl/e2e/pgbackrest-restore/15-assert.yaml b/testing/kuttl/e2e/pgbackrest-restore/15-assert.yaml new file mode 100644 index 0000000000..c408b75a60 --- /dev/null +++ b/testing/kuttl/e2e/pgbackrest-restore/15-assert.yaml @@ -0,0 +1,16 @@ +--- +# Wait for the restore to complete and the cluster to come online. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: original +status: + instances: + - name: '00' + replicas: 2 + readyReplicas: 2 + updatedReplicas: 2 + pgbackrest: + restore: + id: one + finished: true diff --git a/testing/kuttl/e2e/pgbackrest-restore/16--check-data.yaml b/testing/kuttl/e2e/pgbackrest-restore/16--check-data.yaml new file mode 100644 index 0000000000..b0ae252831 --- /dev/null +++ b/testing/kuttl/e2e/pgbackrest-restore/16--check-data.yaml @@ -0,0 +1,100 @@ +--- +# Confirm that data was restored to the point-in-time. +apiVersion: batch/v1 +kind: Job +metadata: + name: original-pitr-primary-data + labels: { postgres-operator-test: kuttl } +spec: + backoffLimit: 3 + template: + metadata: + labels: { postgres-operator-test: kuttl } + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + env: + - name: PGURI + valueFrom: { secretKeyRef: { name: original-pguser-original, key: uri } } + + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } + + # Note: the `$$$$` is reduced to `$$` by Kubernetes. + # - https://kubernetes.io/docs/tasks/inject-data-application/ + command: + - psql + - $(PGURI) + - -qa + - --set=ON_ERROR_STOP=1 + - --command + - | + DO $$$$ + DECLARE + restored jsonb; + BEGIN + SELECT jsonb_agg(important) INTO restored FROM important; + ASSERT restored = '[ + {"data":"treasure"}, {"data":"water"}, {"data":"socks"} + ]', format('got %L', restored); + END $$$$; + +--- +# Confirm that replicas are also restored and streaming from the primary. +apiVersion: batch/v1 +kind: Job +metadata: + name: original-pitr-replica-data + labels: { postgres-operator-test: kuttl } +spec: + backoffLimit: 3 + template: + metadata: + labels: { postgres-operator-test: kuttl } + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + env: + - name: PGPORT + valueFrom: { secretKeyRef: { name: original-pguser-original, key: port } } + - name: PGDATABASE + valueFrom: { secretKeyRef: { name: original-pguser-original, key: dbname } } + - name: PGUSER + valueFrom: { secretKeyRef: { name: original-pguser-original, key: user } } + - name: PGPASSWORD + valueFrom: { secretKeyRef: { name: original-pguser-original, key: password } } + + # The user secret does not contain the replica service. + - name: NAMESPACE + valueFrom: { fieldRef: { fieldPath: metadata.namespace } } + - name: PGHOST + value: "original-replicas.$(NAMESPACE).svc" + + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } + + # Note: the `$$$$` is reduced to `$$` by Kubernetes. + # - https://kubernetes.io/docs/tasks/inject-data-application/ + command: + - psql + - -qa + - --set=ON_ERROR_STOP=1 + - --command + - | + DO $$$$ + DECLARE + restored jsonb; + BEGIN + ASSERT pg_is_in_recovery(), 'expected replica'; + -- only users with "pg_read_all_settings" role may examine "primary_conninfo" + -- ASSERT current_setting('primary_conninfo') <> '', 'expected streaming'; + + SELECT jsonb_agg(important) INTO restored FROM important; + ASSERT restored = '[ + {"data":"treasure"}, {"data":"water"}, {"data":"socks"} + ]', format('got %L', restored); + END $$$$; diff --git a/testing/kuttl/e2e/pgbackrest-restore/16-assert.yaml b/testing/kuttl/e2e/pgbackrest-restore/16-assert.yaml new file mode 100644 index 0000000000..0baadef25b --- /dev/null +++ b/testing/kuttl/e2e/pgbackrest-restore/16-assert.yaml @@ -0,0 +1,15 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: original-pitr-primary-data +status: + succeeded: 1 + +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: original-pitr-replica-data +status: + succeeded: 1 diff --git a/testing/kuttl/e2e/pgbackrest-restore/17--check-replication.yaml b/testing/kuttl/e2e/pgbackrest-restore/17--check-replication.yaml new file mode 100644 index 0000000000..f6c813c8b1 --- /dev/null +++ b/testing/kuttl/e2e/pgbackrest-restore/17--check-replication.yaml @@ -0,0 +1,22 @@ +--- +# Confirm that the replica is streaming from the primary. +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - script: | + REPLICA=$( + kubectl get pod --namespace "${NAMESPACE}" \ + --output name --selector ' + postgres-operator.crunchydata.com/cluster=original, + postgres-operator.crunchydata.com/role=replica' + ) + + kubectl exec --stdin --namespace "${NAMESPACE}" "${REPLICA}" \ + -- psql -qb original --set ON_ERROR_STOP=1 \ + --file=- <<'SQL' + DO $$ + BEGIN + PERFORM * FROM pg_stat_wal_receiver WHERE status = 'streaming'; + ASSERT FOUND, 'expected streaming replication'; + END $$ + SQL diff --git a/testing/kuttl/e2e/pgbouncer/00--cluster.yaml b/testing/kuttl/e2e/pgbouncer/00--cluster.yaml new file mode 100644 index 0000000000..4699d90171 --- /dev/null +++ b/testing/kuttl/e2e/pgbouncer/00--cluster.yaml @@ -0,0 +1,19 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: proxied + labels: { postgres-operator-test: kuttl } +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + replicas: 1 + dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + proxy: + pgBouncer: + replicas: 1 + config: + # Set the pgBouncer verbosity level to debug to print connection logs + # --https://www.pgbouncer.org/config.html#log-settings + global: + verbose: '1' diff --git a/testing/kuttl/e2e/pgbouncer/00-assert.yaml b/testing/kuttl/e2e/pgbouncer/00-assert.yaml new file mode 100644 index 0000000000..6c3a33079f --- /dev/null +++ b/testing/kuttl/e2e/pgbouncer/00-assert.yaml @@ -0,0 +1,15 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: proxied +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 +--- +apiVersion: v1 +kind: Service +metadata: + name: proxied-pgbouncer diff --git a/testing/kuttl/e2e/pgbouncer/01--psql-connect.yaml b/testing/kuttl/e2e/pgbouncer/01--psql-connect.yaml new file mode 100644 index 0000000000..0f7099d4e8 --- /dev/null +++ b/testing/kuttl/e2e/pgbouncer/01--psql-connect.yaml @@ -0,0 +1,41 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-connect + labels: { postgres-operator-test: kuttl } +spec: + backoffLimit: 6 + template: + metadata: + labels: { postgres-operator-test: kuttl } + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + command: + - psql + - -c + - "select version();" + env: + - name: PGSSLMODE + value: verify-full + - name: PGSSLROOTCERT + value: "/tmp/certs/ca.crt" + - name: PGHOST + valueFrom: { secretKeyRef: { name: proxied-pguser-proxied, key: pgbouncer-host } } + - name: PGPORT + valueFrom: { secretKeyRef: { name: proxied-pguser-proxied, key: pgbouncer-port } } + - name: PGDATABASE + valueFrom: { secretKeyRef: { name: proxied-pguser-proxied, key: dbname } } + - name: PGUSER + valueFrom: { secretKeyRef: { name: proxied-pguser-proxied, key: user } } + - name: PGPASSWORD + valueFrom: { secretKeyRef: { name: proxied-pguser-proxied, key: password } } + volumeMounts: + - name: certs + mountPath: "/tmp/certs" + volumes: + - name: certs + secret: + secretName: proxied-cluster-cert diff --git a/testing/kuttl/e2e/pgbouncer/01-assert.yaml b/testing/kuttl/e2e/pgbouncer/01-assert.yaml new file mode 100644 index 0000000000..e4d8bbb37a --- /dev/null +++ b/testing/kuttl/e2e/pgbouncer/01-assert.yaml @@ -0,0 +1,6 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-connect +status: + succeeded: 1 diff --git a/testing/kuttl/e2e/pgbouncer/10--read-certificate.yaml b/testing/kuttl/e2e/pgbouncer/10--read-certificate.yaml new file mode 100644 index 0000000000..87739116ae --- /dev/null +++ b/testing/kuttl/e2e/pgbouncer/10--read-certificate.yaml @@ -0,0 +1,28 @@ +--- +# Print the certificate presented by PgBouncer. +apiVersion: batch/v1 +kind: Job +metadata: + name: read-cert-before + labels: { postgres-operator-test: kuttl } +spec: + backoffLimit: 1 + template: + metadata: + labels: { postgres-operator-test: kuttl } + spec: + restartPolicy: Never + containers: + - name: openssl + image: ${KUTTL_PSQL_IMAGE} + env: + - name: PGHOST + valueFrom: { secretKeyRef: { name: proxied-pguser-proxied, key: pgbouncer-host } } + - name: PGPORT + valueFrom: { secretKeyRef: { name: proxied-pguser-proxied, key: pgbouncer-port } } + command: + - bash + - -ceu + - | + openssl s_client --connect '$(PGHOST):$(PGPORT)' --starttls postgres < /dev/null 2> /dev/null | + openssl x509 --noout --text diff --git a/testing/kuttl/e2e/pgbouncer/10-assert.yaml b/testing/kuttl/e2e/pgbouncer/10-assert.yaml new file mode 100644 index 0000000000..87d1a262fb --- /dev/null +++ b/testing/kuttl/e2e/pgbouncer/10-assert.yaml @@ -0,0 +1,8 @@ +--- +# Wait for the job to complete. +apiVersion: batch/v1 +kind: Job +metadata: + name: read-cert-before +status: + succeeded: 1 diff --git a/testing/kuttl/e2e/pgbouncer/11--open-connection.yaml b/testing/kuttl/e2e/pgbouncer/11--open-connection.yaml new file mode 100644 index 0000000000..f43c586e7f --- /dev/null +++ b/testing/kuttl/e2e/pgbouncer/11--open-connection.yaml @@ -0,0 +1,43 @@ +--- +# Connect through PgBouncer and wait long enough for TLS certificates to rotate. +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-open-connection + labels: { postgres-operator-test: kuttl } +spec: + backoffLimit: 1 + template: + metadata: + labels: { postgres-operator-test: kuttl } + spec: + restartPolicy: Never + volumes: + # TODO(cbandy): Provide a CA bundle that clients can use for verification. + - { name: tls, secret: { secretName: proxied-cluster-cert } } + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + env: + # Connect through PgBouncer. + - name: PGURI + valueFrom: { secretKeyRef: { name: proxied-pguser-proxied, key: pgbouncer-uri } } + + # Verify the certificate presented by PgBouncer. + - { name: PGSSLMODE, value: verify-full } + - { name: PGSSLROOTCERT, value: /mnt/ca.crt } + + volumeMounts: + - { name: tls, mountPath: /mnt } + + command: + - psql + - $(PGURI) + - -qAt + - --set=ON_ERROR_STOP=1 + + # Print connection details. + - --command=SELECT pid, backend_start FROM pg_stat_activity WHERE pid = pg_backend_pid(); + + # Wait here so later test steps can see this open connection. + - --command=SELECT pg_sleep_for('5 minutes'); diff --git a/testing/kuttl/e2e/pgbouncer/11-assert.yaml b/testing/kuttl/e2e/pgbouncer/11-assert.yaml new file mode 100644 index 0000000000..4c1f3a752d --- /dev/null +++ b/testing/kuttl/e2e/pgbouncer/11-assert.yaml @@ -0,0 +1,18 @@ +--- +# Wait for the job to start. +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-open-connection +status: + active: 1 + +--- +# Wait for the pod to start. +apiVersion: v1 +kind: Pod +metadata: + labels: + job-name: psql-open-connection +status: + phase: Running diff --git a/testing/kuttl/e2e/pgbouncer/12--rotate-certificate.yaml b/testing/kuttl/e2e/pgbouncer/12--rotate-certificate.yaml new file mode 100644 index 0000000000..67e8f31c84 --- /dev/null +++ b/testing/kuttl/e2e/pgbouncer/12--rotate-certificate.yaml @@ -0,0 +1,31 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - script: | + BEFORE=$(date -u +%FT%TZ) + + # Wipe out the stored PgBouncer certificate. + kubectl patch --namespace "${NAMESPACE}" secret/proxied-pgbouncer \ + --patch '{"data":{"pgbouncer-frontend.crt":""}}' + + # Wait for the certificate to be regenerated then loaded. + # Changing this from "wait until timeout" to "try X times" + # so that we can get the logs before exiting 1 in case we cannot find the reload. + for _ in $(seq 120); do + kubectl logs --namespace "${NAMESPACE}" deployment.apps/proxied-pgbouncer \ + --container pgbouncer-config --since-time "${BEFORE}" | grep 'Loaded' && \ + found=true && break + sleep 1 + done + + # This test has been flaky in the past, potentially around rotating/reloading the cert. + # To help debug, we set the pgBouncer verbosity to 1 (debug) and print the logs + kubectl logs --namespace "${NAMESPACE}" deployment.apps/proxied-pgbouncer \ + --all-containers --prefix --timestamps + + # If we haven't found the `Loaded` log statement, exit with an error + if [ -z "$found" ]; then + echo "pgbouncer-config has failed to reload in time" + exit 1; + fi diff --git a/testing/kuttl/e2e/pgbouncer/13--read-certificate.yaml b/testing/kuttl/e2e/pgbouncer/13--read-certificate.yaml new file mode 100644 index 0000000000..5134c75ab0 --- /dev/null +++ b/testing/kuttl/e2e/pgbouncer/13--read-certificate.yaml @@ -0,0 +1,28 @@ +--- +# Print the certificate presented by PgBouncer. +apiVersion: batch/v1 +kind: Job +metadata: + name: read-cert-after + labels: { postgres-operator-test: kuttl } +spec: + backoffLimit: 1 + template: + metadata: + labels: { postgres-operator-test: kuttl } + spec: + restartPolicy: Never + containers: + - name: openssl + image: ${KUTTL_PSQL_IMAGE} + env: + - name: PGHOST + valueFrom: { secretKeyRef: { name: proxied-pguser-proxied, key: pgbouncer-host } } + - name: PGPORT + valueFrom: { secretKeyRef: { name: proxied-pguser-proxied, key: pgbouncer-port } } + command: + - bash + - -ceu + - | + openssl s_client --connect '$(PGHOST):$(PGPORT)' --starttls postgres < /dev/null 2> /dev/null | + openssl x509 --noout --text diff --git a/testing/kuttl/e2e/pgbouncer/13-assert.yaml b/testing/kuttl/e2e/pgbouncer/13-assert.yaml new file mode 100644 index 0000000000..ca9eae62a0 --- /dev/null +++ b/testing/kuttl/e2e/pgbouncer/13-assert.yaml @@ -0,0 +1,8 @@ +--- +# Wait for the job to complete. +apiVersion: batch/v1 +kind: Job +metadata: + name: read-cert-after +status: + succeeded: 1 diff --git a/testing/kuttl/e2e/pgbouncer/14--compare-certificate.yaml b/testing/kuttl/e2e/pgbouncer/14--compare-certificate.yaml new file mode 100644 index 0000000000..4d60a4eb6e --- /dev/null +++ b/testing/kuttl/e2e/pgbouncer/14--compare-certificate.yaml @@ -0,0 +1,14 @@ +--- +# Confirm that PgBouncer is serving a new certificate. +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - script: | + bash -c '! diff -u \ + <(kubectl logs --namespace "${NAMESPACE}" job.batch/read-cert-before) \ + <(kubectl logs --namespace "${NAMESPACE}" job.batch/read-cert-after) \ + ' || { + echo 'Certificate did not change!' + kubectl logs --namespace "${NAMESPACE}" job.batch/read-cert-after + exit 1 + } diff --git a/testing/kuttl/e2e/pgbouncer/15--check-connection.yaml b/testing/kuttl/e2e/pgbouncer/15--check-connection.yaml new file mode 100644 index 0000000000..6055dc4910 --- /dev/null +++ b/testing/kuttl/e2e/pgbouncer/15--check-connection.yaml @@ -0,0 +1,35 @@ +--- +# Confirm that the open connection is encrypted and remained open through rotation. +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - script: | + CONNECTION=$( + kubectl logs --namespace "${NAMESPACE}" job.batch/psql-open-connection + ) + PRIMARY=$( + kubectl get pod --namespace "${NAMESPACE}" \ + --output name --selector ' + postgres-operator.crunchydata.com/cluster=proxied, + postgres-operator.crunchydata.com/role=master' + ) + + kubectl exec --stdin --namespace "${NAMESPACE}" "${PRIMARY}" \ + -- psql -qb --set ON_ERROR_STOP=1 --set CONNECTION="${CONNECTION}" \ + --file=- <<'SQL' + SELECT + set_config('testing.pid', (string_to_array(:'CONNECTION', '|'))[1], false) AS "testing.pid", + set_config('testing.start', (string_to_array(:'CONNECTION', '|'))[2], false) AS "testing.start"; + + DO $$ + BEGIN + PERFORM * FROM pg_stat_ssl + WHERE ssl AND pid = current_setting('testing.pid')::integer; + ASSERT FOUND, 'expected TLS end-to-end'; + + PERFORM * FROM pg_stat_activity + WHERE pid = current_setting('testing.pid')::integer + AND backend_start = current_setting('testing.start')::timestamptz; + ASSERT FOUND, 'expected to stay connected'; + END $$; + SQL diff --git a/testing/kuttl/e2e/pgbouncer/16--reconnect.yaml b/testing/kuttl/e2e/pgbouncer/16--reconnect.yaml new file mode 100644 index 0000000000..e070430169 --- /dev/null +++ b/testing/kuttl/e2e/pgbouncer/16--reconnect.yaml @@ -0,0 +1,46 @@ +--- +# Verify the new PgBouncer certificate and transport encryption. +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-tls-after + labels: { postgres-operator-test: kuttl } +spec: + backoffLimit: 1 + template: + metadata: + labels: { postgres-operator-test: kuttl } + spec: + restartPolicy: Never + volumes: + # TODO(cbandy): Provide a CA bundle that clients can use for verification. + - { name: tls, secret: { secretName: proxied-cluster-cert } } + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + env: + # Connect through PgBouncer. + - name: PGURI + valueFrom: { secretKeyRef: { name: proxied-pguser-proxied, key: pgbouncer-uri } } + + # Verify the certificate presented by PgBouncer. + - { name: PGSSLMODE, value: verify-full } + - { name: PGSSLROOTCERT, value: /mnt/ca.crt } + + volumeMounts: + - { name: tls, mountPath: /mnt } + + # Note: the `$$$$` is reduced to `$$` by Kubernetes. + # - https://kubernetes.io/docs/tasks/inject-data-application/ + command: + - psql + - $(PGURI) + - -qb + - --set=ON_ERROR_STOP=1 + - --command + - | + DO $$$$ + BEGIN + PERFORM * FROM pg_stat_ssl WHERE ssl AND pid = pg_backend_pid(); + ASSERT FOUND, 'expected TLS end-to-end'; + END $$$$; diff --git a/testing/kuttl/e2e/pgbouncer/16-assert.yaml b/testing/kuttl/e2e/pgbouncer/16-assert.yaml new file mode 100644 index 0000000000..b6fbbf95f2 --- /dev/null +++ b/testing/kuttl/e2e/pgbouncer/16-assert.yaml @@ -0,0 +1,8 @@ +--- +# Wait for the job to complete. +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-tls-after +status: + succeeded: 1 diff --git a/testing/kuttl/e2e/replica-read/00--cluster.yaml b/testing/kuttl/e2e/replica-read/00--cluster.yaml new file mode 100644 index 0000000000..c62f5418cd --- /dev/null +++ b/testing/kuttl/e2e/replica-read/00--cluster.yaml @@ -0,0 +1,15 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: replica-read +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + replicas: 2 diff --git a/testing/kuttl/e2e/replica-read/00-assert.yaml b/testing/kuttl/e2e/replica-read/00-assert.yaml new file mode 100644 index 0000000000..17c2942eb0 --- /dev/null +++ b/testing/kuttl/e2e/replica-read/00-assert.yaml @@ -0,0 +1,15 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: replica-read +status: + instances: + - name: instance1 + readyReplicas: 2 + replicas: 2 + updatedReplicas: 2 +--- +apiVersion: v1 +kind: Service +metadata: + name: replica-read-replicas diff --git a/testing/kuttl/e2e/replica-read/01--psql-replica-read.yaml b/testing/kuttl/e2e/replica-read/01--psql-replica-read.yaml new file mode 100644 index 0000000000..3d000aee85 --- /dev/null +++ b/testing/kuttl/e2e/replica-read/01--psql-replica-read.yaml @@ -0,0 +1,44 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-replica-read +spec: + backoffLimit: 6 + template: + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + command: + # https://www.postgresql.org/docs/current/plpgsql-errors-and-messages.html#PLPGSQL-STATEMENTS-ASSERT + # If run on a non-replica, this assertion fails, resulting in the pod erroring + # Note: the `$$$$` is reduced to `$$` by Kubernetes. + # - https://kubernetes.io/docs/tasks/inject-data-application/ + - psql + - -qc + - | + DO $$$$ + BEGIN + ASSERT pg_is_in_recovery(); + END $$$$; + env: + # The Replica svc is not held in the user secret, so we hard-code the Service address + # (using the downstream API for the namespace) + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: PGHOST + value: "replica-read-replicas.$(NAMESPACE).svc" + - name: PGPORT + valueFrom: { secretKeyRef: { name: replica-read-pguser-replica-read, key: port } } + - name: PGDATABASE + valueFrom: { secretKeyRef: { name: replica-read-pguser-replica-read, key: dbname } } + - name: PGUSER + valueFrom: { secretKeyRef: { name: replica-read-pguser-replica-read, key: user } } + - name: PGPASSWORD + valueFrom: { secretKeyRef: { name: replica-read-pguser-replica-read, key: password } } + + # Do not wait indefinitely. + - { name: PGCONNECT_TIMEOUT, value: '5' } diff --git a/testing/kuttl/e2e/replica-read/01-assert.yaml b/testing/kuttl/e2e/replica-read/01-assert.yaml new file mode 100644 index 0000000000..97ea0972c3 --- /dev/null +++ b/testing/kuttl/e2e/replica-read/01-assert.yaml @@ -0,0 +1,6 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-replica-read +status: + succeeded: 1 diff --git a/testing/kuttl/e2e/root-cert-ownership/00--cluster.yaml b/testing/kuttl/e2e/root-cert-ownership/00--cluster.yaml new file mode 100644 index 0000000000..2d23e1e3d3 --- /dev/null +++ b/testing/kuttl/e2e/root-cert-ownership/00--cluster.yaml @@ -0,0 +1,23 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: owner1 + labels: { postgres-operator-test: kuttl } +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + replicas: 1 + dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: owner2 + labels: { postgres-operator-test: kuttl } +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + replicas: 1 + dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } diff --git a/testing/kuttl/e2e/root-cert-ownership/00-assert.yaml b/testing/kuttl/e2e/root-cert-ownership/00-assert.yaml new file mode 100644 index 0000000000..406465b691 --- /dev/null +++ b/testing/kuttl/e2e/root-cert-ownership/00-assert.yaml @@ -0,0 +1,26 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: owner1 +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: owner2 +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 +--- +apiVersion: v1 +kind: Secret +metadata: + name: pgo-root-cacert diff --git a/testing/kuttl/e2e/root-cert-ownership/01--check-owners.yaml b/testing/kuttl/e2e/root-cert-ownership/01--check-owners.yaml new file mode 100644 index 0000000000..ea8353427c --- /dev/null +++ b/testing/kuttl/e2e/root-cert-ownership/01--check-owners.yaml @@ -0,0 +1,17 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + # Get a list of the current owners of the root ca cert secret and verify that + # both owners are listed. + - script: | + contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } + while true; do + sleep 1 # this sleep allows time for the owner reference list to be updated + CURRENT_OWNERS=$(kubectl --namespace="${NAMESPACE}" get secret \ + pgo-root-cacert -o jsonpath='{.metadata.ownerReferences[*].name}') + # If owner1 and owner2 are both listed, exit successfully + if contains "${CURRENT_OWNERS}" "owner1" && contains "${CURRENT_OWNERS}" "owner2"; then + exit 0 + fi + done diff --git a/testing/kuttl/e2e/root-cert-ownership/02--delete-owner1.yaml b/testing/kuttl/e2e/root-cert-ownership/02--delete-owner1.yaml new file mode 100644 index 0000000000..14d9532d8d --- /dev/null +++ b/testing/kuttl/e2e/root-cert-ownership/02--delete-owner1.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +delete: +- apiVersion: postgres-operator.crunchydata.com/v1beta1 + kind: PostgresCluster + name: owner1 diff --git a/testing/kuttl/e2e/root-cert-ownership/02-assert.yaml b/testing/kuttl/e2e/root-cert-ownership/02-assert.yaml new file mode 100644 index 0000000000..839f6a9b29 --- /dev/null +++ b/testing/kuttl/e2e/root-cert-ownership/02-assert.yaml @@ -0,0 +1,9 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: owner2 +--- +apiVersion: v1 +kind: Secret +metadata: + name: pgo-root-cacert diff --git a/testing/kuttl/e2e/root-cert-ownership/02-errors.yaml b/testing/kuttl/e2e/root-cert-ownership/02-errors.yaml new file mode 100644 index 0000000000..d8f159d59c --- /dev/null +++ b/testing/kuttl/e2e/root-cert-ownership/02-errors.yaml @@ -0,0 +1,4 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: owner1 diff --git a/testing/kuttl/e2e/root-cert-ownership/03--check-owners.yaml b/testing/kuttl/e2e/root-cert-ownership/03--check-owners.yaml new file mode 100644 index 0000000000..951f9fce68 --- /dev/null +++ b/testing/kuttl/e2e/root-cert-ownership/03--check-owners.yaml @@ -0,0 +1,17 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + # Get a list of the current owners of the root ca cert secret and verify that + # owner1 is no longer listed and owner2 is found. + - script: | + contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } + while true; do + sleep 1 # this sleep allows time for the owner reference list to be updated + CURRENT_OWNERS=$(kubectl --namespace="${NAMESPACE}" get secret \ + pgo-root-cacert -o jsonpath='{.metadata.ownerReferences[*].name}') + # If owner1 is removed and owner2 is still listed, exit successfully + if !(contains "${CURRENT_OWNERS}" "owner1") && contains "${CURRENT_OWNERS}" "owner2"; then + exit 0 + fi + done diff --git a/testing/kuttl/e2e/root-cert-ownership/04--delete-owner2.yaml b/testing/kuttl/e2e/root-cert-ownership/04--delete-owner2.yaml new file mode 100644 index 0000000000..df1d55d3bb --- /dev/null +++ b/testing/kuttl/e2e/root-cert-ownership/04--delete-owner2.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +delete: +- apiVersion: postgres-operator.crunchydata.com/v1beta1 + kind: PostgresCluster + name: owner2 diff --git a/testing/kuttl/e2e/root-cert-ownership/04-errors.yaml b/testing/kuttl/e2e/root-cert-ownership/04-errors.yaml new file mode 100644 index 0000000000..b117c4561b --- /dev/null +++ b/testing/kuttl/e2e/root-cert-ownership/04-errors.yaml @@ -0,0 +1,9 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: owner1 +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: owner2 diff --git a/testing/kuttl/e2e/root-cert-ownership/05--check-secret.yaml b/testing/kuttl/e2e/root-cert-ownership/05--check-secret.yaml new file mode 100644 index 0000000000..9c432f02b2 --- /dev/null +++ b/testing/kuttl/e2e/root-cert-ownership/05--check-secret.yaml @@ -0,0 +1,36 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + # If there are other PostgresClusters in the namespace, ensure that 'owner1' + # and 'owner2' are not listed. + # If there are no other PostgresClusters in the namespace, the 'pgo-root-cacert' + # secret should be deleted. + - script: | + NUM_CLUSTERS=$(kubectl --namespace="${NAMESPACE}" get postgrescluster --output name | wc -l) + echo "Found ${NUM_CLUSTERS} clusters" + if [ "$NUM_CLUSTERS" != 0 ]; then + # Continue checking until Kuttl times out + # If at least one owner is never removed the test fails + while true; do + sleep 5 # This sleep allows time for the owner reference list to be updated + CURRENT_OWNERS=$(kubectl --namespace="${NAMESPACE}" get secret \ + pgo-root-cacert -o jsonpath='{.metadata.ownerReferences[*].name}') + # If neither owner is listed, exit successfully + contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } + if ! contains "${CURRENT_OWNERS}" "owner1" && ! contains "${CURRENT_OWNERS}" "owner2"; then + exit 0 + fi + done + else + # Continue checking until Kuttl times out + # If the secret is never removed, the test fails + while true; do + sleep 5 # this sleep allows time for garbage collector to delete the secret + ROOT_SECRET=$(kubectl --namespace="${NAMESPACE}" get --ignore-not-found \ + secret pgo-root-cacert --output name | wc -l) + if [ "$ROOT_SECRET" = 0 ]; then + exit 0 + fi + done + fi diff --git a/testing/kuttl/e2e/root-cert-ownership/README.md b/testing/kuttl/e2e/root-cert-ownership/README.md new file mode 100644 index 0000000000..fe29596938 --- /dev/null +++ b/testing/kuttl/e2e/root-cert-ownership/README.md @@ -0,0 +1,23 @@ +### Root Certificate Ownership Test + +This Kuttl routine runs through the following steps: + +#### Create two clusters and verify the root certificate secret ownership + +- 00: Creates the two clusters and verifies they and the root cert secret exist +- 01: Check that the secret shows both clusters as owners + +#### Delete the first cluster and verify the root certificate secret ownership + +- 02: Delete the first cluster, assert that the second cluster and the root cert +secret are still present and that the first cluster is not present +- 03: Check that the secret shows the second cluster as an owner but does not show +the first cluster as an owner + +#### Delete the second cluster and verify the root certificate secret ownership + +- 04: Delete the second cluster, assert that both clusters are not present +- 05: Check the number of clusters in the namespace. If there are any remaining +clusters, ensure that the secret shows neither the first nor second cluster as an +owner. If there are no clusters remaining in the namespace, ensure the root cert +secret has been deleted. diff --git a/testing/kuttl/e2e/scaledown/00--create-cluster.yaml b/testing/kuttl/e2e/scaledown/00--create-cluster.yaml new file mode 100644 index 0000000000..50377c2fb6 --- /dev/null +++ b/testing/kuttl/e2e/scaledown/00--create-cluster.yaml @@ -0,0 +1,32 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: scaledown +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + - name: instance2 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi diff --git a/testing/kuttl/e2e/scaledown/00-assert.yaml b/testing/kuttl/e2e/scaledown/00-assert.yaml new file mode 100644 index 0000000000..b5fa5a9051 --- /dev/null +++ b/testing/kuttl/e2e/scaledown/00-assert.yaml @@ -0,0 +1,14 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: scaledown +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 + - name: instance2 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 diff --git a/installers/olm/config/examples/postgrescluster.example.yaml b/testing/kuttl/e2e/scaledown/01--update-cluster.yaml similarity index 60% rename from installers/olm/config/examples/postgrescluster.example.yaml rename to testing/kuttl/e2e/scaledown/01--update-cluster.yaml index dd0e3a0dd9..d6409a8fd1 100644 --- a/installers/olm/config/examples/postgrescluster.example.yaml +++ b/testing/kuttl/e2e/scaledown/01--update-cluster.yaml @@ -1,13 +1,14 @@ apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: - name: example + name: scaledown spec: - postgresVersion: 13 + postgresVersion: ${KUTTL_PG_VERSION} instances: - - replicas: 1 + - name: instance1 dataVolumeClaimSpec: - accessModes: [ReadWriteOnce] + accessModes: + - "ReadWriteOnce" resources: requests: storage: 1Gi diff --git a/testing/kuttl/e2e/scaledown/01-assert.yaml b/testing/kuttl/e2e/scaledown/01-assert.yaml new file mode 100644 index 0000000000..45bb0b6d04 --- /dev/null +++ b/testing/kuttl/e2e/scaledown/01-assert.yaml @@ -0,0 +1,10 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: scaledown +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 diff --git a/testing/kuttl/e2e/scaledown/02--delete-cluster.yaml b/testing/kuttl/e2e/scaledown/02--delete-cluster.yaml new file mode 100644 index 0000000000..fc23731cd3 --- /dev/null +++ b/testing/kuttl/e2e/scaledown/02--delete-cluster.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +delete: + - apiVersion: postgres-operator.crunchydata.com/v1beta1 + kind: PostgresCluster + name: scaledown diff --git a/testing/kuttl/e2e/scaledown/10--create-cluster.yaml b/testing/kuttl/e2e/scaledown/10--create-cluster.yaml new file mode 100644 index 0000000000..3847e588c0 --- /dev/null +++ b/testing/kuttl/e2e/scaledown/10--create-cluster.yaml @@ -0,0 +1,26 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: scaledown1 +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + replicas: 2 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi diff --git a/testing/kuttl/e2e/scaledown/10-assert.yaml b/testing/kuttl/e2e/scaledown/10-assert.yaml new file mode 100644 index 0000000000..cf8bcb461a --- /dev/null +++ b/testing/kuttl/e2e/scaledown/10-assert.yaml @@ -0,0 +1,30 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: scaledown1 +status: + instances: + - name: instance1 + readyReplicas: 2 + replicas: 2 + updatedReplicas: 2 +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: scaledown1 + postgres-operator.crunchydata.com/instance-set: instance1 + postgres-operator.crunchydata.com/role: master +status: + phase: Running +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: scaledown1 + postgres-operator.crunchydata.com/instance-set: instance1 + postgres-operator.crunchydata.com/role: replica +status: + phase: Running diff --git a/testing/kuttl/e2e/scaledown/11-annotate.yaml b/testing/kuttl/e2e/scaledown/11-annotate.yaml new file mode 100644 index 0000000000..a4bc743b3f --- /dev/null +++ b/testing/kuttl/e2e/scaledown/11-annotate.yaml @@ -0,0 +1,13 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + # Label instance pods with their current role. + - script: | + kubectl label --namespace="${NAMESPACE}" pods \ + --selector='postgres-operator.crunchydata.com/role=master' \ + 'testing/role-before=master' + - script: | + kubectl label --namespace="${NAMESPACE}" pods \ + --selector='postgres-operator.crunchydata.com/role=replica' \ + 'testing/role-before=replica' diff --git a/testing/kuttl/e2e/scaledown/12--update-cluster.yaml b/testing/kuttl/e2e/scaledown/12--update-cluster.yaml new file mode 100644 index 0000000000..3b4f62094a --- /dev/null +++ b/testing/kuttl/e2e/scaledown/12--update-cluster.yaml @@ -0,0 +1,15 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: scaledown1 +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + replicas: 1 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi diff --git a/testing/kuttl/e2e/scaledown/12-assert.yaml b/testing/kuttl/e2e/scaledown/12-assert.yaml new file mode 100644 index 0000000000..079435b67d --- /dev/null +++ b/testing/kuttl/e2e/scaledown/12-assert.yaml @@ -0,0 +1,21 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: scaledown1 +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: scaledown1 + postgres-operator.crunchydata.com/instance-set: instance1 + postgres-operator.crunchydata.com/role: master + testing/role-before: master +status: + phase: Running diff --git a/testing/kuttl/e2e/scaledown/13--delete-cluster.yaml b/testing/kuttl/e2e/scaledown/13--delete-cluster.yaml new file mode 100644 index 0000000000..ddcdb20910 --- /dev/null +++ b/testing/kuttl/e2e/scaledown/13--delete-cluster.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +delete: + - apiVersion: postgres-operator.crunchydata.com/v1beta1 + kind: PostgresCluster + name: scaledown1 diff --git a/testing/kuttl/e2e/scaledown/20--create-cluster.yaml b/testing/kuttl/e2e/scaledown/20--create-cluster.yaml new file mode 100644 index 0000000000..796f88db3c --- /dev/null +++ b/testing/kuttl/e2e/scaledown/20--create-cluster.yaml @@ -0,0 +1,33 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: scaledown2 +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + replicas: 2 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + - name: instance2 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi diff --git a/testing/kuttl/e2e/scaledown/20-assert.yaml b/testing/kuttl/e2e/scaledown/20-assert.yaml new file mode 100644 index 0000000000..f65cef60b8 --- /dev/null +++ b/testing/kuttl/e2e/scaledown/20-assert.yaml @@ -0,0 +1,14 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: scaledown2 +status: + instances: + - name: instance1 + readyReplicas: 2 + replicas: 2 + updatedReplicas: 2 + - name: instance2 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 diff --git a/testing/kuttl/e2e/scaledown/21--update-cluster.yaml b/testing/kuttl/e2e/scaledown/21--update-cluster.yaml new file mode 100644 index 0000000000..02d8936d0b --- /dev/null +++ b/testing/kuttl/e2e/scaledown/21--update-cluster.yaml @@ -0,0 +1,21 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: scaledown2 +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + - name: instance2 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi diff --git a/testing/kuttl/e2e/scaledown/21-assert.yaml b/testing/kuttl/e2e/scaledown/21-assert.yaml new file mode 100644 index 0000000000..f137a616b8 --- /dev/null +++ b/testing/kuttl/e2e/scaledown/21-assert.yaml @@ -0,0 +1,14 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: scaledown2 +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 + - name: instance2 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 diff --git a/testing/kuttl/e2e/scaledown/readme.MD b/testing/kuttl/e2e/scaledown/readme.MD new file mode 100644 index 0000000000..44fd880ed1 --- /dev/null +++ b/testing/kuttl/e2e/scaledown/readme.MD @@ -0,0 +1,31 @@ +## Scaledown tests + +This is a KUTTL version of a previous `TestScaleDown` test that was prone to flaky behavior; +The KUTTL test captures the three test-cases enumerated in that test, and for ease of reading, +all three tests exist in this folder, which necessitates a clean-up step after tests one and two. +This tests makes extensive use of `status.instances` to make sure that the expected instances +have the expected number of pods. + +### From two sets to one set + +* 00--create-cluster: create the cluster with two instance sets, one replica each +* 00-assert: check that the cluster exists with the expected status +* 01--update-cluster: update the cluster to remove one instance set +* 01-assert: check that the cluster exists with the expected status +* 02--delete-cluster + +### From one set with multiple replicas to one set with one replica + +* 10--create-cluster: create the cluster with one instance set with two replicas +* 10-assert: check that the cluster exists with the expected status +* 11-annotate: set the roles as labels on the pods +* 12--update-cluster: update the cluster to remove one replica +* 12-assert: check that the cluster exists with the expected status; and that the `master` pod that exists was the `master` before the scaledown +* 13--delete-cluster: delete the cluster + +### From two sets with variable replicas to two set with one replica each + +* 20--create-cluster: create the cluster with two instance sets, with two and one replica +* 20-assert: check that the cluster exists with the expected status +* 21--update-cluster: update the cluster to reduce the two-replica instance to one-replica +* 21-assert: check that the cluster exists with the expected status diff --git a/testing/kuttl/e2e/security-context/00--cluster.yaml b/testing/kuttl/e2e/security-context/00--cluster.yaml new file mode 100644 index 0000000000..5155eb4fc6 --- /dev/null +++ b/testing/kuttl/e2e/security-context/00--cluster.yaml @@ -0,0 +1,26 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: security-context + labels: { postgres-operator-test: kuttl } +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + replicas: 1 + dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + proxy: + pgBouncer: + replicas: 1 + userInterface: + pgAdmin: + dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } + monitoring: + pgmonitor: + exporter: {} diff --git a/testing/kuttl/e2e/security-context/00-assert.yaml b/testing/kuttl/e2e/security-context/00-assert.yaml new file mode 100644 index 0000000000..a6a5f48b6a --- /dev/null +++ b/testing/kuttl/e2e/security-context/00-assert.yaml @@ -0,0 +1,186 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: security-context +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 +--- +apiVersion: batch/v1 +kind: Job +metadata: + labels: + postgres-operator.crunchydata.com/cluster: security-context + postgres-operator.crunchydata.com/pgbackrest-backup: replica-create +status: + succeeded: 1 +--- +# initial pgBackRest backup +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: security-context + postgres-operator.crunchydata.com/pgbackrest: "" + postgres-operator.crunchydata.com/pgbackrest-backup: replica-create + postgres-operator.crunchydata.com/pgbackrest-repo: repo1 +spec: + containers: + - name: pgbackrest + securityContext: + allowPrivilegeEscalation: false + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true +--- +# instance +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: security-context + postgres-operator.crunchydata.com/data: postgres + postgres-operator.crunchydata.com/instance-set: instance1 + postgres-operator.crunchydata.com/patroni: security-context-ha + postgres-operator.crunchydata.com/role: master +spec: + containers: + - name: database + securityContext: + allowPrivilegeEscalation: false + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + - name: replication-cert-copy + securityContext: + allowPrivilegeEscalation: false + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + - name: pgbackrest + securityContext: + allowPrivilegeEscalation: false + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + - name: pgbackrest-config + securityContext: + allowPrivilegeEscalation: false + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + - name: exporter + securityContext: + allowPrivilegeEscalation: false + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + initContainers: + - name: postgres-startup + securityContext: + allowPrivilegeEscalation: false + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + - name: nss-wrapper-init + securityContext: + allowPrivilegeEscalation: false + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true +--- +# pgAdmin +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: security-context + postgres-operator.crunchydata.com/data: pgadmin + postgres-operator.crunchydata.com/role: pgadmin + statefulset.kubernetes.io/pod-name: security-context-pgadmin-0 + name: security-context-pgadmin-0 +spec: + containers: + - name: pgadmin + securityContext: + allowPrivilegeEscalation: false + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + initContainers: + - name: pgadmin-startup + securityContext: + allowPrivilegeEscalation: false + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + - name: nss-wrapper-init + securityContext: + allowPrivilegeEscalation: false + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true +--- +# pgBouncer +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: security-context + postgres-operator.crunchydata.com/role: pgbouncer +spec: + containers: + - name: pgbouncer + securityContext: + allowPrivilegeEscalation: false + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + - name: pgbouncer-config + securityContext: + allowPrivilegeEscalation: false + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true +--- +# pgBackRest repo +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: security-context + postgres-operator.crunchydata.com/data: pgbackrest + postgres-operator.crunchydata.com/pgbackrest: "" + postgres-operator.crunchydata.com/pgbackrest-dedicated: "" + statefulset.kubernetes.io/pod-name: security-context-repo-host-0 + name: security-context-repo-host-0 +spec: + containers: + - name: pgbackrest + securityContext: + allowPrivilegeEscalation: false + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + - name: pgbackrest-config + securityContext: + allowPrivilegeEscalation: false + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + initContainers: + - name: pgbackrest-log-dir + securityContext: + allowPrivilegeEscalation: false + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + - name: nss-wrapper-init + securityContext: + allowPrivilegeEscalation: false + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true diff --git a/testing/kuttl/e2e/security-context/01--security-context.yaml b/testing/kuttl/e2e/security-context/01--security-context.yaml new file mode 100644 index 0000000000..a8dd098697 --- /dev/null +++ b/testing/kuttl/e2e/security-context/01--security-context.yaml @@ -0,0 +1,48 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - script: | + # Check that every container has the correct capabilities. + + # Capture every container name alongside its list of dropped capabilities. + CONTAINERS_DROP_CAPS=$( + kubectl --namespace "${NAMESPACE}" get pods --output "jsonpath={\ + range .items[*].spec.containers[*]\ + }{ @.name }{'\t\t'}{ @.securityContext.capabilities.drop }{'\n'}{\ + end\ + }" + ) || exit + + WRONG=$( ! echo "${CONTAINERS_DROP_CAPS}" | grep -Fv '"ALL"' ) || { + echo 'Not all containers have dropped "ALL" capabilities!' + echo "${WRONG}" + exit 1 + } + + - script: | + # Check that every Pod is assigned to the "restricted" SecurityContextConstraint + # in OpenShift. + + SCC=$( + kubectl api-resources --cached | + grep -F 'security.openshift.io/v1' | + grep -F 'SecurityContextConstraint' + ) + + # Skip this check when the API has no notion of SecurityContextConstraint. + [ -z "${SCC}" ] && exit + + PODS_SCC=$( + kubectl --namespace "${NAMESPACE}" get pods --no-headers \ + --output "custom-columns=\ + NAME:.metadata.name,\ + SCC:.metadata.annotations['openshift\.io/scc']\ + " + ) || exit + + WRONG=$( ! echo "${PODS_SCC}" | grep -Ev -e '\ policies.yaml + kyverno apply --cluster --namespace "${NAMESPACE}" policies.yaml diff --git a/testing/kuttl/e2e/standalone-pgadmin-db-uri/00--create-cluster.yaml b/testing/kuttl/e2e/standalone-pgadmin-db-uri/00--create-cluster.yaml new file mode 100644 index 0000000000..c86a544166 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-db-uri/00--create-cluster.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/00-cluster.yaml +assert: +- files/00-cluster-check.yaml diff --git a/testing/kuttl/e2e/standalone-pgadmin-db-uri/01--user-schema.yaml b/testing/kuttl/e2e/standalone-pgadmin-db-uri/01--user-schema.yaml new file mode 100644 index 0000000000..bbddba56c2 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-db-uri/01--user-schema.yaml @@ -0,0 +1,14 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: +# ensure the user schema is created for pgAdmin to use + - script: | + PRIMARY=$( + kubectl get pod --namespace "${NAMESPACE}" \ + --output name --selector ' + postgres-operator.crunchydata.com/cluster=elephant, + postgres-operator.crunchydata.com/role=master' + ) + kubectl exec --namespace "${NAMESPACE}" "${PRIMARY}" \ + -- psql -qAt -d elephant --command 'CREATE SCHEMA elephant AUTHORIZATION elephant' diff --git a/testing/kuttl/e2e/standalone-pgadmin-db-uri/02--create-pgadmin.yaml b/testing/kuttl/e2e/standalone-pgadmin-db-uri/02--create-pgadmin.yaml new file mode 100644 index 0000000000..0ef15853af --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-db-uri/02--create-pgadmin.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/02-pgadmin.yaml +assert: +- files/02-pgadmin-check.yaml diff --git a/testing/kuttl/e2e/standalone-pgadmin-db-uri/03-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin-db-uri/03-assert.yaml new file mode 100644 index 0000000000..6a25871f63 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-db-uri/03-assert.yaml @@ -0,0 +1,21 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +commands: +- script: | + PRIMARY=$( + kubectl get pod --namespace "${NAMESPACE}" \ + --output name --selector ' + postgres-operator.crunchydata.com/cluster=elephant, + postgres-operator.crunchydata.com/role=master' + ) + + NUM_USERS=$( + kubectl exec --namespace "${NAMESPACE}" "${PRIMARY}" -- \ + psql -qAt -d elephant --command 'select count(*) from elephant.user' \ + ) + + if [[ ${NUM_USERS} != 1 ]]; then + echo >&2 'Expected 1 user' + echo "got ${NUM_USERS}" + exit 1 + fi diff --git a/testing/kuttl/e2e/standalone-pgadmin-db-uri/04--update-pgadmin.yaml b/testing/kuttl/e2e/standalone-pgadmin-db-uri/04--update-pgadmin.yaml new file mode 100644 index 0000000000..f8aaf480fd --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-db-uri/04--update-pgadmin.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/04-pgadmin.yaml +assert: +- files/04-pgadmin-check.yaml diff --git a/testing/kuttl/e2e/standalone-pgadmin-db-uri/05-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin-db-uri/05-assert.yaml new file mode 100644 index 0000000000..4d31c5db18 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-db-uri/05-assert.yaml @@ -0,0 +1,36 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +# timeout: 120 +commands: +- script: | + PRIMARY=$( + kubectl get pod --namespace "${NAMESPACE}" \ + --output name --selector ' + postgres-operator.crunchydata.com/cluster=elephant, + postgres-operator.crunchydata.com/role=master' + ) + + NUM_USERS=$( + kubectl exec --namespace "${NAMESPACE}" "${PRIMARY}" -- \ + psql -qAt -d elephant --command 'select count(*) from elephant.user' \ + ) + + if [[ ${NUM_USERS} != 2 ]]; then + echo >&2 'Expected 2 user' + echo "got ${NUM_USERS}" + exit 1 + fi + + contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } + USER_LIST=$( + kubectl exec --namespace "${NAMESPACE}" "${PRIMARY}" -- \ + psql -qAt -d elephant --command 'select email from elephant.user;' \ + ) + + { + contains "${USER_LIST}" "john.doe@example.com" + } || { + echo >&2 'User john.doe@example.com not found. Got:' + echo "${USER_LIST}" + exit 1 + } diff --git a/testing/kuttl/e2e/standalone-pgadmin-db-uri/README.md b/testing/kuttl/e2e/standalone-pgadmin-db-uri/README.md new file mode 100644 index 0000000000..2d7688ae3b --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-db-uri/README.md @@ -0,0 +1,26 @@ +# pgAdmin external database tests + +Notes: +- Due to the (random) namespace being part of the host, we cannot check the configmap using the usual assert/file pattern. +- These tests will only work with pgAdmin version v8 and higher + +## create postgrescluster and add user schema +* 00: + * create a postgrescluster with a label; + * check that the cluster has the label and that the expected user secret is created. +* 01: + * create the user schema for pgAdmin to use + + ## create pgadmin and verify connection to database +* 02: + * create a pgadmin with a selector for the existing cluster's label; + * check the correct existence of the secret, configmap, and pod. +* 03: + * check that pgAdmin only has one user + + ## add a pgadmin user and verify it in the database +* 04: + * update pgadmin with a new user; + * check that the pod is still running as expected. +* 05: + * check that pgAdmin now has two users and that the defined user is present. diff --git a/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/00-cluster-check.yaml b/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/00-cluster-check.yaml new file mode 100644 index 0000000000..8ae250152f --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/00-cluster-check.yaml @@ -0,0 +1,31 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: elephant + labels: + sometest: test1 +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 +--- +apiVersion: v1 +kind: Secret +metadata: + labels: + postgres-operator.crunchydata.com/cluster: elephant + postgres-operator.crunchydata.com/pguser: elephant + postgres-operator.crunchydata.com/role: pguser +type: Opaque +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: elephant + postgres-operator.crunchydata.com/instance-set: instance1 + postgres-operator.crunchydata.com/role: master +status: + phase: Running diff --git a/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/00-cluster.yaml b/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/00-cluster.yaml new file mode 100644 index 0000000000..5f8678e5e9 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/00-cluster.yaml @@ -0,0 +1,11 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: elephant + labels: + sometest: test1 +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } diff --git a/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/02-pgadmin-check.yaml b/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/02-pgadmin-check.yaml new file mode 100644 index 0000000000..6457b2ca20 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/02-pgadmin-check.yaml @@ -0,0 +1,29 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + postgres-operator.crunchydata.com/role: pgadmin + postgres-operator.crunchydata.com/pgadmin: pgadmin1 +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/data: pgadmin + postgres-operator.crunchydata.com/role: pgadmin + postgres-operator.crunchydata.com/pgadmin: pgadmin1 +status: + containerStatuses: + - name: pgadmin + ready: true + started: true + phase: Running +--- +apiVersion: v1 +kind: Secret +metadata: + labels: + postgres-operator.crunchydata.com/role: pgadmin + postgres-operator.crunchydata.com/pgadmin: pgadmin1 +type: Opaque diff --git a/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/02-pgadmin.yaml b/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/02-pgadmin.yaml new file mode 100644 index 0000000000..f1e251b949 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/02-pgadmin.yaml @@ -0,0 +1,20 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGAdmin +metadata: + name: pgadmin1 +spec: + config: + configDatabaseURI: + name: elephant-pguser-elephant + key: uri + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + serverGroups: + - name: kuttl-test + postgresClusterSelector: + matchLabels: + sometest: test1 diff --git a/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/04-pgadmin-check.yaml b/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/04-pgadmin-check.yaml new file mode 100644 index 0000000000..3a3f459441 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/04-pgadmin-check.yaml @@ -0,0 +1,14 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/data: pgadmin + postgres-operator.crunchydata.com/role: pgadmin + postgres-operator.crunchydata.com/pgadmin: pgadmin1 +status: + containerStatuses: + - name: pgadmin + ready: true + started: true + phase: Running diff --git a/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/04-pgadmin.yaml b/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/04-pgadmin.yaml new file mode 100644 index 0000000000..2c62b58b4b --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/04-pgadmin.yaml @@ -0,0 +1,33 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGAdmin +metadata: + name: pgadmin1 +spec: + users: + - username: "john.doe@example.com" + passwordRef: + name: john-doe-password + key: password + config: + configDatabaseURI: + name: elephant-pguser-elephant + key: uri + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + serverGroups: + - name: kuttl-test + postgresClusterSelector: + matchLabels: + sometest: test1 +--- +apiVersion: v1 +kind: Secret +metadata: + name: john-doe-password +type: Opaque +stringData: + password: password diff --git a/testing/kuttl/e2e/standalone-pgadmin-service/00--pgadmin.yaml b/testing/kuttl/e2e/standalone-pgadmin-service/00--pgadmin.yaml new file mode 100644 index 0000000000..9372467a93 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-service/00--pgadmin.yaml @@ -0,0 +1,12 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGAdmin +metadata: + name: pgadmin +spec: + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + serviceName: pgadmin-service diff --git a/testing/kuttl/e2e/standalone-pgadmin-service/00-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin-service/00-assert.yaml new file mode 100644 index 0000000000..758814cad2 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-service/00-assert.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Service +metadata: + name: pgadmin-service + labels: + postgres-operator.crunchydata.com/role: pgadmin + postgres-operator.crunchydata.com/pgadmin: pgadmin + ownerReferences: + - apiVersion: postgres-operator.crunchydata.com/v1beta1 + controller: true + kind: PGAdmin + name: pgadmin +spec: + selector: + postgres-operator.crunchydata.com/pgadmin: pgadmin + ports: + - port: 5050 + targetPort: 5050 + protocol: TCP + name: pgadmin-port + type: ClusterIP diff --git a/testing/kuttl/e2e/standalone-pgadmin-service/01--update-service.yaml b/testing/kuttl/e2e/standalone-pgadmin-service/01--update-service.yaml new file mode 100644 index 0000000000..81db248fd4 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-service/01--update-service.yaml @@ -0,0 +1,12 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGAdmin +metadata: + name: pgadmin +spec: + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + serviceName: pgadmin-service-updated diff --git a/testing/kuttl/e2e/standalone-pgadmin-service/01-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin-service/01-assert.yaml new file mode 100644 index 0000000000..2303ebe9bb --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-service/01-assert.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + name: pgadmin-service-updated + labels: + postgres-operator.crunchydata.com/role: pgadmin + postgres-operator.crunchydata.com/pgadmin: pgadmin +spec: + selector: + postgres-operator.crunchydata.com/pgadmin: pgadmin + ports: + - port: 5050 + targetPort: 5050 + protocol: TCP + name: pgadmin-port + type: ClusterIP diff --git a/testing/kuttl/e2e/standalone-pgadmin-service/02--remove-service.yaml b/testing/kuttl/e2e/standalone-pgadmin-service/02--remove-service.yaml new file mode 100644 index 0000000000..b8cbf4eb41 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-service/02--remove-service.yaml @@ -0,0 +1,11 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGAdmin +metadata: + name: pgadmin +spec: + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi diff --git a/testing/kuttl/e2e/standalone-pgadmin-service/02-errors.yaml b/testing/kuttl/e2e/standalone-pgadmin-service/02-errors.yaml new file mode 100644 index 0000000000..f2795c106d --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-service/02-errors.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + name: pgadmin-service + labels: + postgres-operator.crunchydata.com/role: pgadmin + postgres-operator.crunchydata.com/pgadmin: pgadmin +spec: + selector: + postgres-operator.crunchydata.com/pgadmin: pgadmin + ports: + - port: 5050 + targetPort: 5050 + protocol: TCP + name: pgadmin-port + type: ClusterIP diff --git a/testing/kuttl/e2e/standalone-pgadmin-service/10--manual-service.yaml b/testing/kuttl/e2e/standalone-pgadmin-service/10--manual-service.yaml new file mode 100644 index 0000000000..88d8da6718 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-service/10--manual-service.yaml @@ -0,0 +1,29 @@ +# Manually create a service that should be taken over by pgAdmin +# The manual service is of type LoadBalancer +# Once taken over, the type should change to ClusterIP +apiVersion: v1 +kind: Service +metadata: + name: manual-pgadmin-service +spec: + ports: + - name: pgadmin-port + port: 5050 + protocol: TCP + selector: + postgres-operator.crunchydata.com/pgadmin: rhino + type: LoadBalancer +--- +# Create a pgAdmin that points to an existing un-owned service +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGAdmin +metadata: + name: manual-svc-pgadmin +spec: + serviceName: manual-pgadmin-service + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi diff --git a/testing/kuttl/e2e/standalone-pgadmin-service/10-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin-service/10-assert.yaml new file mode 100644 index 0000000000..95bf241b16 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-service/10-assert.yaml @@ -0,0 +1,22 @@ +# Check that the manually created service has the correct ownerReference +apiVersion: v1 +kind: Service +metadata: + name: manual-pgadmin-service + labels: + postgres-operator.crunchydata.com/role: pgadmin + postgres-operator.crunchydata.com/pgadmin: manual-svc-pgadmin + ownerReferences: + - apiVersion: postgres-operator.crunchydata.com/v1beta1 + controller: true + kind: PGAdmin + name: manual-svc-pgadmin +spec: + selector: + postgres-operator.crunchydata.com/pgadmin: manual-svc-pgadmin + ports: + - port: 5050 + targetPort: 5050 + protocol: TCP + name: pgadmin-port + type: ClusterIP diff --git a/testing/kuttl/e2e/standalone-pgadmin-service/20--owned-service.yaml b/testing/kuttl/e2e/standalone-pgadmin-service/20--owned-service.yaml new file mode 100644 index 0000000000..04f211ffc7 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-service/20--owned-service.yaml @@ -0,0 +1,13 @@ +# Create a pgAdmin that will create and own a service +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGAdmin +metadata: + name: pgadmin-service-owner +spec: + serviceName: pgadmin-owned-service + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi diff --git a/testing/kuttl/e2e/standalone-pgadmin-service/20-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin-service/20-assert.yaml new file mode 100644 index 0000000000..a6ab1653bb --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-service/20-assert.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Service +metadata: + name: pgadmin-owned-service + labels: + postgres-operator.crunchydata.com/role: pgadmin + postgres-operator.crunchydata.com/pgadmin: pgadmin-service-owner + ownerReferences: + - apiVersion: postgres-operator.crunchydata.com/v1beta1 + controller: true + kind: PGAdmin + name: pgadmin-service-owner +spec: + selector: + postgres-operator.crunchydata.com/pgadmin: pgadmin-service-owner + ports: + - port: 5050 + targetPort: 5050 + protocol: TCP + name: pgadmin-port + type: ClusterIP diff --git a/testing/kuttl/e2e/standalone-pgadmin-service/21--service-takeover-fails.yaml b/testing/kuttl/e2e/standalone-pgadmin-service/21--service-takeover-fails.yaml new file mode 100644 index 0000000000..f992521ce8 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-service/21--service-takeover-fails.yaml @@ -0,0 +1,13 @@ +# Create a second pgAdmin that attempts to steal the service +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGAdmin +metadata: + name: pgadmin-service-thief +spec: + serviceName: pgadmin-owned-service + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi diff --git a/testing/kuttl/e2e/standalone-pgadmin-service/21-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin-service/21-assert.yaml new file mode 100644 index 0000000000..060d669987 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-service/21-assert.yaml @@ -0,0 +1,35 @@ +# Original service should still have owner reference +apiVersion: v1 +kind: Service +metadata: + name: pgadmin-owned-service + labels: + postgres-operator.crunchydata.com/role: pgadmin + postgres-operator.crunchydata.com/pgadmin: pgadmin-service-owner + ownerReferences: + - apiVersion: postgres-operator.crunchydata.com/v1beta1 + controller: true + kind: PGAdmin + name: pgadmin-service-owner +spec: + selector: + postgres-operator.crunchydata.com/pgadmin: pgadmin-service-owner + ports: + - port: 5050 + targetPort: 5050 + protocol: TCP + name: pgadmin-port + type: ClusterIP +--- +# An event should be created for the failure to reconcile the Service +apiVersion: v1 +involvedObject: + apiVersion: postgres-operator.crunchydata.com/v1beta1 + kind: PGAdmin + name: pgadmin-service-thief +kind: Event +message: 'Failed to reconcile Service ServiceName: pgadmin-owned-service' +reason: InvalidServiceWarning +source: + component: pgadmin-controller +type: Warning diff --git a/testing/kuttl/e2e/standalone-pgadmin-user-management/00--create-pgadmin.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/00--create-pgadmin.yaml new file mode 100644 index 0000000000..ee1a03ec64 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-user-management/00--create-pgadmin.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/00-pgadmin.yaml +assert: +- files/00-pgadmin-check.yaml diff --git a/testing/kuttl/e2e/standalone-pgadmin-user-management/01-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/01-assert.yaml new file mode 100644 index 0000000000..244533b7ee --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-user-management/01-assert.yaml @@ -0,0 +1,26 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +commands: +# When setup.py returns users in Json, the Role translation is 1 for Admin, 2 for User +- script: | + pod_name=$(kubectl get pod -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) + secret_name=$(kubectl get secret -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) + + users_in_pgadmin=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py get-users --json") + + bob_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="bob@example.com") | .role') + dave_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="dave@example.com") | .role') + + [ $bob_role = 1 ] && [ $dave_role = 2 ] || exit 1 + + users_in_secret=$(kubectl get "${secret_name}" -n "${NAMESPACE}" -o 'go-template={{index .data "users.json" }}' | base64 -d) + + bob_is_admin=$(printf '%s\n' $users_in_secret | jq '.[] | select(.username=="bob@example.com") | .isAdmin') + dave_is_admin=$(printf '%s\n' $users_in_secret | jq '.[] | select(.username=="dave@example.com") | .isAdmin') + + $bob_is_admin && ! $dave_is_admin || exit 1 + + bob_password=$(printf '%s\n' $users_in_secret | jq -r '.[] | select(.username=="bob@example.com") | .password') + dave_password=$(printf '%s\n' $users_in_secret | jq -r '.[] | select(.username=="dave@example.com") | .password') + + [ "$bob_password" = "password123" ] && [ "$dave_password" = "password456" ] || exit 1 diff --git a/testing/kuttl/e2e/standalone-pgadmin-user-management/02--edit-pgadmin-users.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/02--edit-pgadmin-users.yaml new file mode 100644 index 0000000000..0ef15853af --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-user-management/02--edit-pgadmin-users.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/02-pgadmin.yaml +assert: +- files/02-pgadmin-check.yaml diff --git a/testing/kuttl/e2e/standalone-pgadmin-user-management/03-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/03-assert.yaml new file mode 100644 index 0000000000..01aff25b3b --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-user-management/03-assert.yaml @@ -0,0 +1,29 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +commands: +# When setup.py returns users in Json, the Role translation is 1 for Admin, 2 for User +- script: | + pod_name=$(kubectl get pod -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) + secret_name=$(kubectl get secret -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) + + users_in_pgadmin=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py get-users --json") + + bob_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="bob@example.com") | .role') + dave_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="dave@example.com") | .role') + jimi_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="jimi@example.com") | .role') + + [ $bob_role = 1 ] && [ $dave_role = 1 ] && [ $jimi_role = 2 ] || exit 1 + + users_in_secret=$(kubectl get "${secret_name}" -n "${NAMESPACE}" -o 'go-template={{index .data "users.json" }}' | base64 -d) + + bob_is_admin=$(printf '%s\n' $users_in_secret | jq '.[] | select(.username=="bob@example.com") | .isAdmin') + dave_is_admin=$(printf '%s\n' $users_in_secret | jq '.[] | select(.username=="dave@example.com") | .isAdmin') + jimi_is_admin=$(printf '%s\n' $users_in_secret | jq '.[] | select(.username=="jimi@example.com") | .isAdmin') + + $bob_is_admin && $dave_is_admin && ! $jimi_is_admin || exit 1 + + bob_password=$(printf '%s\n' $users_in_secret | jq -r '.[] | select(.username=="bob@example.com") | .password') + dave_password=$(printf '%s\n' $users_in_secret | jq -r '.[] | select(.username=="dave@example.com") | .password') + jimi_password=$(printf '%s\n' $users_in_secret | jq -r '.[] | select(.username=="jimi@example.com") | .password') + + [ "$bob_password" = "password123" ] && [ "$dave_password" = "password456" ] && [ "$jimi_password" = "password789" ] || exit 1 diff --git a/testing/kuttl/e2e/standalone-pgadmin-user-management/04--change-pgadmin-user-passwords.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/04--change-pgadmin-user-passwords.yaml new file mode 100644 index 0000000000..f8aaf480fd --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-user-management/04--change-pgadmin-user-passwords.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/04-pgadmin.yaml +assert: +- files/04-pgadmin-check.yaml diff --git a/testing/kuttl/e2e/standalone-pgadmin-user-management/05-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/05-assert.yaml new file mode 100644 index 0000000000..1dca13a7b7 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-user-management/05-assert.yaml @@ -0,0 +1,29 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +commands: +# When setup.py returns users in Json, the Role translation is 1 for Admin, 2 for User +- script: | + pod_name=$(kubectl get pod -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) + secret_name=$(kubectl get secret -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) + + users_in_pgadmin=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py get-users --json") + + bob_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="bob@example.com") | .role') + dave_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="dave@example.com") | .role') + jimi_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="jimi@example.com") | .role') + + [ $bob_role = 1 ] && [ $dave_role = 1 ] && [ $jimi_role = 2 ] || exit 1 + + users_in_secret=$(kubectl get "${secret_name}" -n "${NAMESPACE}" -o 'go-template={{index .data "users.json" }}' | base64 -d) + + bob_is_admin=$(printf '%s\n' $users_in_secret | jq '.[] | select(.username=="bob@example.com") | .isAdmin') + dave_is_admin=$(printf '%s\n' $users_in_secret | jq '.[] | select(.username=="dave@example.com") | .isAdmin') + jimi_is_admin=$(printf '%s\n' $users_in_secret | jq '.[] | select(.username=="jimi@example.com") | .isAdmin') + + $bob_is_admin && $dave_is_admin && ! $jimi_is_admin || exit 1 + + bob_password=$(printf '%s\n' $users_in_secret | jq -r '.[] | select(.username=="bob@example.com") | .password') + dave_password=$(printf '%s\n' $users_in_secret | jq -r '.[] | select(.username=="dave@example.com") | .password') + jimi_password=$(printf '%s\n' $users_in_secret | jq -r '.[] | select(.username=="jimi@example.com") | .password') + + [ "$bob_password" = "NEWpassword123" ] && [ "$dave_password" = "NEWpassword456" ] && [ "$jimi_password" = "NEWpassword789" ] || exit 1 diff --git a/testing/kuttl/e2e/standalone-pgadmin-user-management/06--delete-pgadmin-users.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/06--delete-pgadmin-users.yaml new file mode 100644 index 0000000000..a538b7dca4 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-user-management/06--delete-pgadmin-users.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/06-pgadmin.yaml +assert: +- files/06-pgadmin-check.yaml diff --git a/testing/kuttl/e2e/standalone-pgadmin-user-management/07-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/07-assert.yaml new file mode 100644 index 0000000000..5c0e7267e6 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-user-management/07-assert.yaml @@ -0,0 +1,19 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +commands: +# When setup.py returns users in Json, the Role translation is 1 for Admin, 2 for User +- script: | + pod_name=$(kubectl get pod -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) + secret_name=$(kubectl get secret -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) + + users_in_pgadmin=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py get-users --json") + + bob_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="bob@example.com") | .role') + dave_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="dave@example.com") | .role') + jimi_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="jimi@example.com") | .role') + + [ $bob_role = 1 ] && [ $dave_role = 1 ] && [ $jimi_role = 2 ] || exit 1 + + users_in_secret=$(kubectl get "${secret_name}" -n "${NAMESPACE}" -o 'go-template={{index .data "users.json" }}' | base64 -d) + + $(printf '%s\n' $users_in_secret | jq '. == []') || exit 1 diff --git a/testing/kuttl/e2e/standalone-pgadmin-user-management/README.md b/testing/kuttl/e2e/standalone-pgadmin-user-management/README.md new file mode 100644 index 0000000000..0bbdfc2893 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-user-management/README.md @@ -0,0 +1,21 @@ +# pgAdmin User Management tests + +*Note: These tests will only work with pgAdmin version v8 and higher* + +## Create pgAdmin with users + +* Start pgAdmin with a couple users +* Ensure users exist in pgAdmin with correct settings +* Ensure users exist in the `users.json` file in the pgAdmin secret with the correct settings + +## Edit pgAdmin users + +* Add a user and edit an existing user +* Ensure users exist in pgAdmin with correct settings +* Ensure users exist in the `users.json` file in the pgAdmin secret with the correct settings + +## Delete pgAdmin users + +* Remove users from pgAdmin spec +* Ensure users still exist in pgAdmin with correct settings +* Ensure users have been removed from the `users.json` file in the pgAdmin secret diff --git a/testing/kuttl/e2e/standalone-pgadmin-user-management/files/00-pgadmin-check.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/files/00-pgadmin-check.yaml new file mode 100644 index 0000000000..f2c7f28cd1 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-user-management/files/00-pgadmin-check.yaml @@ -0,0 +1,34 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/data: pgadmin + postgres-operator.crunchydata.com/role: pgadmin + postgres-operator.crunchydata.com/pgadmin: pgadmin +status: + containerStatuses: + - name: pgadmin + ready: true + started: true + phase: Running +--- +apiVersion: v1 +kind: Secret +metadata: + labels: + postgres-operator.crunchydata.com/role: pgadmin + postgres-operator.crunchydata.com/pgadmin: pgadmin +type: Opaque +--- +apiVersion: v1 +kind: Secret +metadata: + name: bob-password-secret +type: Opaque +--- +apiVersion: v1 +kind: Secret +metadata: + name: dave-password-secret +type: Opaque diff --git a/testing/kuttl/e2e/standalone-pgadmin-user-management/files/00-pgadmin.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/files/00-pgadmin.yaml new file mode 100644 index 0000000000..ce86d8d894 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-user-management/files/00-pgadmin.yaml @@ -0,0 +1,40 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGAdmin +metadata: + name: pgadmin +spec: + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + serverGroups: [] + users: + - username: bob@example.com + role: Administrator + passwordRef: + name: bob-password-secret + key: password + - username: dave@example.com + passwordRef: + name: dave-password-secret + key: password +--- +apiVersion: v1 +kind: Secret +metadata: + name: bob-password-secret +type: Opaque +data: + # Password is "password123", base64 encoded + password: cGFzc3dvcmQxMjM= +--- +apiVersion: v1 +kind: Secret +metadata: + name: dave-password-secret +type: Opaque +data: + # Password is "password456", base64 encoded + password: cGFzc3dvcmQ0NTY= diff --git a/testing/kuttl/e2e/standalone-pgadmin-user-management/files/02-pgadmin-check.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/files/02-pgadmin-check.yaml new file mode 100644 index 0000000000..9a07b0d994 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-user-management/files/02-pgadmin-check.yaml @@ -0,0 +1,40 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/data: pgadmin + postgres-operator.crunchydata.com/role: pgadmin + postgres-operator.crunchydata.com/pgadmin: pgadmin +status: + containerStatuses: + - name: pgadmin + ready: true + started: true + phase: Running +--- +apiVersion: v1 +kind: Secret +metadata: + labels: + postgres-operator.crunchydata.com/role: pgadmin + postgres-operator.crunchydata.com/pgadmin: pgadmin +type: Opaque +--- +apiVersion: v1 +kind: Secret +metadata: + name: bob-password-secret +type: Opaque +--- +apiVersion: v1 +kind: Secret +metadata: + name: dave-password-secret +type: Opaque +--- +apiVersion: v1 +kind: Secret +metadata: + name: jimi-password-secret +type: Opaque diff --git a/testing/kuttl/e2e/standalone-pgadmin-user-management/files/02-pgadmin.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/files/02-pgadmin.yaml new file mode 100644 index 0000000000..88f75d8092 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-user-management/files/02-pgadmin.yaml @@ -0,0 +1,54 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGAdmin +metadata: + name: pgadmin +spec: + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + serverGroups: [] + users: + - username: bob@example.com + role: Administrator + passwordRef: + name: bob-password-secret + key: password + - username: dave@example.com + role: Administrator + passwordRef: + name: dave-password-secret + key: password + - username: jimi@example.com + passwordRef: + name: jimi-password-secret + key: password +--- +apiVersion: v1 +kind: Secret +metadata: + name: bob-password-secret +type: Opaque +data: + # Password is "password123", base64 encoded + password: cGFzc3dvcmQxMjM= +--- +apiVersion: v1 +kind: Secret +metadata: + name: dave-password-secret +type: Opaque +data: + # Password is "password456", base64 encoded + password: cGFzc3dvcmQ0NTY= +--- +apiVersion: v1 +kind: Secret +metadata: + name: jimi-password-secret +type: Opaque +data: + # Password is "password789", base64 encoded + password: cGFzc3dvcmQ3ODk= diff --git a/testing/kuttl/e2e/standalone-pgadmin-user-management/files/04-pgadmin-check.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/files/04-pgadmin-check.yaml new file mode 100644 index 0000000000..9a07b0d994 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-user-management/files/04-pgadmin-check.yaml @@ -0,0 +1,40 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/data: pgadmin + postgres-operator.crunchydata.com/role: pgadmin + postgres-operator.crunchydata.com/pgadmin: pgadmin +status: + containerStatuses: + - name: pgadmin + ready: true + started: true + phase: Running +--- +apiVersion: v1 +kind: Secret +metadata: + labels: + postgres-operator.crunchydata.com/role: pgadmin + postgres-operator.crunchydata.com/pgadmin: pgadmin +type: Opaque +--- +apiVersion: v1 +kind: Secret +metadata: + name: bob-password-secret +type: Opaque +--- +apiVersion: v1 +kind: Secret +metadata: + name: dave-password-secret +type: Opaque +--- +apiVersion: v1 +kind: Secret +metadata: + name: jimi-password-secret +type: Opaque diff --git a/testing/kuttl/e2e/standalone-pgadmin-user-management/files/04-pgadmin.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/files/04-pgadmin.yaml new file mode 100644 index 0000000000..32b0081f92 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-user-management/files/04-pgadmin.yaml @@ -0,0 +1,54 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGAdmin +metadata: + name: pgadmin +spec: + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + serverGroups: [] + users: + - username: bob@example.com + role: Administrator + passwordRef: + name: bob-password-secret + key: password + - username: dave@example.com + role: Administrator + passwordRef: + name: dave-password-secret + key: password + - username: jimi@example.com + passwordRef: + name: jimi-password-secret + key: password +--- +apiVersion: v1 +kind: Secret +metadata: + name: bob-password-secret +type: Opaque +data: + # Password is "NEWpassword123", base64 encoded + password: TkVXcGFzc3dvcmQxMjM= +--- +apiVersion: v1 +kind: Secret +metadata: + name: dave-password-secret +type: Opaque +data: + # Password is "NEWpassword456", base64 encoded + password: TkVXcGFzc3dvcmQ0NTY= +--- +apiVersion: v1 +kind: Secret +metadata: + name: jimi-password-secret +type: Opaque +data: + # Password is "NEWpassword789", base64 encoded + password: TkVXcGFzc3dvcmQ3ODk= diff --git a/testing/kuttl/e2e/standalone-pgadmin-user-management/files/06-pgadmin-check.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/files/06-pgadmin-check.yaml new file mode 100644 index 0000000000..04481fb4d1 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-user-management/files/06-pgadmin-check.yaml @@ -0,0 +1,22 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/data: pgadmin + postgres-operator.crunchydata.com/role: pgadmin + postgres-operator.crunchydata.com/pgadmin: pgadmin +status: + containerStatuses: + - name: pgadmin + ready: true + started: true + phase: Running +--- +apiVersion: v1 +kind: Secret +metadata: + labels: + postgres-operator.crunchydata.com/role: pgadmin + postgres-operator.crunchydata.com/pgadmin: pgadmin +type: Opaque diff --git a/testing/kuttl/e2e/standalone-pgadmin-user-management/files/06-pgadmin.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/files/06-pgadmin.yaml new file mode 100644 index 0000000000..0513edf050 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-user-management/files/06-pgadmin.yaml @@ -0,0 +1,13 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGAdmin +metadata: + name: pgadmin +spec: + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + serverGroups: [] + users: [] diff --git a/testing/kuttl/e2e/standalone-pgadmin/00--create-pgadmin.yaml b/testing/kuttl/e2e/standalone-pgadmin/00--create-pgadmin.yaml new file mode 100644 index 0000000000..ee1a03ec64 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin/00--create-pgadmin.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/00-pgadmin.yaml +assert: +- files/00-pgadmin-check.yaml diff --git a/testing/kuttl/e2e/standalone-pgadmin/00-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin/00-assert.yaml new file mode 100644 index 0000000000..5b95b46964 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin/00-assert.yaml @@ -0,0 +1,7 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +collectors: +- type: command + command: kubectl -n $NAMESPACE describe pods --selector postgres-operator.crunchydata.com/pgadmin=pgadmin +- namespace: $NAMESPACE + selector: postgres-operator.crunchydata.com/pgadmin=pgadmin diff --git a/testing/kuttl/e2e/standalone-pgadmin/01-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin/01-assert.yaml new file mode 100644 index 0000000000..6b7c8c8794 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin/01-assert.yaml @@ -0,0 +1,17 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +commands: +- script: | + contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } + + pod_name=$(kubectl get pod -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) + + clusters_actual=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py dump-servers /tmp/dumped.json --user admin@pgadmin.${NAMESPACE}.svc && cat /tmp/dumped.json") + + clusters_expected="\"Servers\": {}" + { + contains "${clusters_actual}" "${clusters_expected}" + } || { + echo "Wrong servers dumped: got ${clusters_actual}" + exit 1 + } diff --git a/testing/kuttl/e2e/standalone-pgadmin/02--create-cluster.yaml b/testing/kuttl/e2e/standalone-pgadmin/02--create-cluster.yaml new file mode 100644 index 0000000000..bee91ce0a4 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin/02--create-cluster.yaml @@ -0,0 +1,7 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/02-cluster.yaml +- files/02-pgadmin.yaml +assert: +- files/02-cluster-check.yaml diff --git a/testing/kuttl/e2e/standalone-pgadmin/03-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin/03-assert.yaml new file mode 100644 index 0000000000..169a8261eb --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin/03-assert.yaml @@ -0,0 +1,76 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +# Check the configmap is updated; +# Check the file is updated on the pod; +# Check the server dump is accurate. +# Because we have to wait for the configmap reload, make sure we have enough time. +timeout: 120 +commands: +- script: | + contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } + diff_comp() { bash -ceu 'diff <(echo "$1" ) <(echo "$2")' - "$@"; } + + data_expected='"pgadmin-shared-clusters.json": "{\n \"Servers\": {\n \"1\": {\n \"Group\": \"groupOne\",\n \"Host\": \"pgadmin1-primary.'${NAMESPACE}.svc'\",\n \"MaintenanceDB\": \"postgres\",\n \"Name\": \"pgadmin1\",\n \"Port\": 5432,\n \"SSLMode\": \"prefer\",\n \"Shared\": true,\n \"Username\": \"pgadmin1\"\n }\n }\n}\n"' + + data_actual=$(kubectl get cm -l postgres-operator.crunchydata.com/pgadmin=pgadmin -n "${NAMESPACE}" -o json | jq .items[0].data) + + { + contains "${data_actual}" "${data_expected}" + } || { + echo "Wrong configmap: got ${data_actual}" + exit 1 + } + + pod_name=$(kubectl get pod -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) + + config_updated=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c 'cat /etc/pgadmin/conf.d/~postgres-operator/pgadmin-shared-clusters.json') + config_expected='"Servers": { + "1": { + "Group": "groupOne", + "Host": "pgadmin1-primary.'${NAMESPACE}.svc'", + "MaintenanceDB": "postgres", + "Name": "pgadmin1", + "Port": 5432, + "SSLMode": "prefer", + "Shared": true, + "Username": "pgadmin1" + } + }' + { + contains "${config_updated}" "${config_expected}" + } || { + echo "Wrong file mounted: got ${config_updated}" + echo "Wrong file mounted: expected ${config_expected}" + sleep 10 + exit 1 + } + + clusters_actual=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py dump-servers /tmp/dumped.json --user admin@pgadmin.${NAMESPACE}.svc && cat /tmp/dumped.json") + + clusters_expected=' + { + "Servers": { + "1": { + "Name": "pgadmin1", + "Group": "groupOne", + "Host": "pgadmin1-primary.'${NAMESPACE}.svc'", + "Port": 5432, + "MaintenanceDB": "postgres", + "Username": "pgadmin1", + "Shared": true, + "TunnelPort": "22", + "KerberosAuthentication": false, + "ConnectionParameters": { + "sslmode": "prefer" + } + } + } + }' + { + contains "${clusters_actual}" "${clusters_expected}" + } || { + echo "Wrong servers dumped: got ${clusters_actual}" + echo "Wrong servers dumped: expected ${clusters_expected}" + diff_comp "${clusters_actual}" "${clusters_expected}" + exit 1 + } diff --git a/testing/kuttl/e2e/standalone-pgadmin/04--create-cluster.yaml b/testing/kuttl/e2e/standalone-pgadmin/04--create-cluster.yaml new file mode 100644 index 0000000000..5701678501 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin/04--create-cluster.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/04-cluster.yaml +assert: +- files/04-cluster-check.yaml diff --git a/testing/kuttl/e2e/standalone-pgadmin/05-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin/05-assert.yaml new file mode 100644 index 0000000000..7fe5b69dc2 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin/05-assert.yaml @@ -0,0 +1,102 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +# Check the configmap is updated; +# Check the file is updated on the pod; +# Check the server dump is accurate. +# Because we have to wait for the configmap reload, make sure we have enough time. +timeout: 120 +commands: +- script: | + contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } + diff_comp() { bash -ceu 'diff <(echo "$1" ) <(echo "$2")' - "$@"; } + + data_expected='"pgadmin-shared-clusters.json": "{\n \"Servers\": {\n \"1\": {\n \"Group\": \"groupOne\",\n \"Host\": \"pgadmin1-primary.'${NAMESPACE}.svc'\",\n \"MaintenanceDB\": \"postgres\",\n \"Name\": \"pgadmin1\",\n \"Port\": 5432,\n \"SSLMode\": \"prefer\",\n \"Shared\": true,\n \"Username\": \"pgadmin1\"\n },\n \"2\": {\n \"Group\": \"groupOne\",\n \"Host\": \"pgadmin2-primary.'${NAMESPACE}.svc'\",\n \"MaintenanceDB\": \"postgres\",\n \"Name\": \"pgadmin2\",\n \"Port\": 5432,\n \"SSLMode\": \"prefer\",\n \"Shared\": true,\n \"Username\": \"pgadmin2\"\n }\n }\n}\n"' + + data_actual=$(kubectl get cm -l postgres-operator.crunchydata.com/pgadmin=pgadmin -n "${NAMESPACE}" -o json | jq .items[0].data) + + { + contains "${data_actual}" "${data_expected}" + } || { + echo "Wrong configmap: got ${data_actual}" + diff_comp "${data_actual}" "${data_expected}" + exit 1 + } + + pod_name=$(kubectl get pod -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) + + config_updated=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c 'cat /etc/pgadmin/conf.d/~postgres-operator/pgadmin-shared-clusters.json') + config_expected='"Servers": { + "1": { + "Group": "groupOne", + "Host": "pgadmin1-primary.'${NAMESPACE}.svc'", + "MaintenanceDB": "postgres", + "Name": "pgadmin1", + "Port": 5432, + "SSLMode": "prefer", + "Shared": true, + "Username": "pgadmin1" + }, + "2": { + "Group": "groupOne", + "Host": "pgadmin2-primary.'${NAMESPACE}.svc'", + "MaintenanceDB": "postgres", + "Name": "pgadmin2", + "Port": 5432, + "SSLMode": "prefer", + "Shared": true, + "Username": "pgadmin2" + } + }' + { + contains "${config_updated}" "${config_expected}" + } || { + echo "Wrong file mounted: got ${config_updated}" + echo "Wrong file mounted: expected ${config_expected}" + diff_comp "${config_updated}" "${config_expected}" + sleep 10 + exit 1 + } + + clusters_actual=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py dump-servers /tmp/dumped.json --user admin@pgadmin.${NAMESPACE}.svc && cat /tmp/dumped.json") + + clusters_expected=' + { + "Servers": { + "1": { + "Name": "pgadmin1", + "Group": "groupOne", + "Host": "pgadmin1-primary.'${NAMESPACE}.svc'", + "Port": 5432, + "MaintenanceDB": "postgres", + "Username": "pgadmin1", + "Shared": true, + "TunnelPort": "22", + "KerberosAuthentication": false, + "ConnectionParameters": { + "sslmode": "prefer" + } + }, + "2": { + "Name": "pgadmin2", + "Group": "groupOne", + "Host": "pgadmin2-primary.'${NAMESPACE}.svc'", + "Port": 5432, + "MaintenanceDB": "postgres", + "Username": "pgadmin2", + "Shared": true, + "TunnelPort": "22", + "KerberosAuthentication": false, + "ConnectionParameters": { + "sslmode": "prefer" + } + } + } + }' + { + contains "${clusters_actual}" "${clusters_expected}" + } || { + echo "Wrong servers dumped: got ${clusters_actual}" + echo "Wrong servers dumped: expected ${clusters_expected}" + diff_comp "${clusters_actual}" "${clusters_expected}" + exit 1 + } diff --git a/testing/kuttl/e2e/standalone-pgadmin/06--create-cluster.yaml b/testing/kuttl/e2e/standalone-pgadmin/06--create-cluster.yaml new file mode 100644 index 0000000000..86b5f8bf04 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin/06--create-cluster.yaml @@ -0,0 +1,7 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/06-cluster.yaml +- files/06-pgadmin.yaml +assert: +- files/06-cluster-check.yaml diff --git a/testing/kuttl/e2e/standalone-pgadmin/07-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin/07-assert.yaml new file mode 100644 index 0000000000..323237cad4 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin/07-assert.yaml @@ -0,0 +1,126 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +# Check the configmap is updated; +# Check the file is updated on the pod; +# Check the server dump is accurate. +# Because we have to wait for the configmap reload, make sure we have enough time. +timeout: 120 +commands: +- script: | + contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } + diff_comp() { bash -ceu 'diff <(echo "$1" ) <(echo "$2")' - "$@"; } + + data_expected='"pgadmin-shared-clusters.json": "{\n \"Servers\": {\n \"1\": {\n \"Group\": \"groupOne\",\n \"Host\": \"pgadmin1-primary.'${NAMESPACE}.svc'\",\n \"MaintenanceDB\": \"postgres\",\n \"Name\": \"pgadmin1\",\n \"Port\": 5432,\n \"SSLMode\": \"prefer\",\n \"Shared\": true,\n \"Username\": \"pgadmin1\"\n },\n \"2\": {\n \"Group\": \"groupOne\",\n \"Host\": \"pgadmin2-primary.'${NAMESPACE}.svc'\",\n \"MaintenanceDB\": \"postgres\",\n \"Name\": \"pgadmin2\",\n \"Port\": 5432,\n \"SSLMode\": \"prefer\",\n \"Shared\": true,\n \"Username\": \"pgadmin2\"\n },\n \"3\": {\n \"Group\": \"groupTwo\",\n \"Host\": \"pgadmin3-primary.'${NAMESPACE}.svc'\",\n \"MaintenanceDB\": \"postgres\",\n \"Name\": \"pgadmin3\",\n \"Port\": 5432,\n \"SSLMode\": \"prefer\",\n \"Shared\": true,\n \"Username\": \"pgadmin3\"\n }\n }\n}\n"' + + data_actual=$(kubectl get cm -l postgres-operator.crunchydata.com/pgadmin=pgadmin -n "${NAMESPACE}" -o json | jq .items[0].data) + + { + contains "${data_actual}" "${data_expected}" + } || { + echo "Wrong configmap: got ${data_actual}" + diff_comp "${data_actual}" "${data_expected}" + exit 1 + } + + pod_name=$(kubectl get pod -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) + + config_updated=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c 'cat /etc/pgadmin/conf.d/~postgres-operator/pgadmin-shared-clusters.json') + config_expected='"Servers": { + "1": { + "Group": "groupOne", + "Host": "pgadmin1-primary.'${NAMESPACE}.svc'", + "MaintenanceDB": "postgres", + "Name": "pgadmin1", + "Port": 5432, + "SSLMode": "prefer", + "Shared": true, + "Username": "pgadmin1" + }, + "2": { + "Group": "groupOne", + "Host": "pgadmin2-primary.'${NAMESPACE}.svc'", + "MaintenanceDB": "postgres", + "Name": "pgadmin2", + "Port": 5432, + "SSLMode": "prefer", + "Shared": true, + "Username": "pgadmin2" + }, + "3": { + "Group": "groupTwo", + "Host": "pgadmin3-primary.'${NAMESPACE}.svc'", + "MaintenanceDB": "postgres", + "Name": "pgadmin3", + "Port": 5432, + "SSLMode": "prefer", + "Shared": true, + "Username": "pgadmin3" + } + }' + { + contains "${config_updated}" "${config_expected}" + } || { + echo "Wrong file mounted: got ${config_updated}" + echo "Wrong file mounted: expected ${config_expected}" + diff_comp "${config_updated}" "${config_expected}" + sleep 10 + exit 1 + } + + clusters_actual=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py dump-servers /tmp/dumped.json --user admin@pgadmin.${NAMESPACE}.svc && cat /tmp/dumped.json") + + clusters_expected=' + { + "Servers": { + "1": { + "Name": "pgadmin1", + "Group": "groupOne", + "Host": "pgadmin1-primary.'${NAMESPACE}.svc'", + "Port": 5432, + "MaintenanceDB": "postgres", + "Username": "pgadmin1", + "Shared": true, + "TunnelPort": "22", + "KerberosAuthentication": false, + "ConnectionParameters": { + "sslmode": "prefer" + } + }, + "2": { + "Name": "pgadmin2", + "Group": "groupOne", + "Host": "pgadmin2-primary.'${NAMESPACE}.svc'", + "Port": 5432, + "MaintenanceDB": "postgres", + "Username": "pgadmin2", + "Shared": true, + "TunnelPort": "22", + "KerberosAuthentication": false, + "ConnectionParameters": { + "sslmode": "prefer" + } + }, + "3": { + "Name": "pgadmin3", + "Group": "groupTwo", + "Host": "pgadmin3-primary.'${NAMESPACE}.svc'", + "Port": 5432, + "MaintenanceDB": "postgres", + "Username": "pgadmin3", + "Shared": true, + "TunnelPort": "22", + "KerberosAuthentication": false, + "ConnectionParameters": { + "sslmode": "prefer" + } + } + } + }' + { + contains "${clusters_actual}" "${clusters_expected}" + } || { + echo "Wrong servers dumped: got ${clusters_actual}" + echo "Wrong servers dumped: expected ${clusters_expected}" + diff_comp "${clusters_actual}" "${clusters_expected}" + exit 1 + } diff --git a/testing/kuttl/e2e/standalone-pgadmin/08--delete-cluster.yaml b/testing/kuttl/e2e/standalone-pgadmin/08--delete-cluster.yaml new file mode 100644 index 0000000000..bc11ea62f4 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin/08--delete-cluster.yaml @@ -0,0 +1,8 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +delete: + - apiVersion: postgres-operator.crunchydata.com/v1beta1 + kind: PostgresCluster + name: pgadmin2 +error: +- files/04-cluster-check.yaml diff --git a/testing/kuttl/e2e/standalone-pgadmin/09-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin/09-assert.yaml new file mode 100644 index 0000000000..eca5581cb7 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin/09-assert.yaml @@ -0,0 +1,102 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +# Check the configmap is updated; +# Check the file is updated on the pod; +# Check the server dump is accurate. +# Because we have to wait for the configmap reload, make sure we have enough time. +timeout: 120 +commands: +- script: | + contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } + diff_comp() { bash -ceu 'diff <(echo "$1" ) <(echo "$2")' - "$@"; } + + data_expected='"pgadmin-shared-clusters.json": "{\n \"Servers\": {\n \"1\": {\n \"Group\": \"groupOne\",\n \"Host\": \"pgadmin1-primary.'${NAMESPACE}.svc'\",\n \"MaintenanceDB\": \"postgres\",\n \"Name\": \"pgadmin1\",\n \"Port\": 5432,\n \"SSLMode\": \"prefer\",\n \"Shared\": true,\n \"Username\": \"pgadmin1\"\n },\n \"2\": {\n \"Group\": \"groupTwo\",\n \"Host\": \"pgadmin3-primary.'${NAMESPACE}.svc'\",\n \"MaintenanceDB\": \"postgres\",\n \"Name\": \"pgadmin3\",\n \"Port\": 5432,\n \"SSLMode\": \"prefer\",\n \"Shared\": true,\n \"Username\": \"pgadmin3\"\n }\n }\n}\n"' + + data_actual=$(kubectl get cm -l postgres-operator.crunchydata.com/pgadmin=pgadmin -n "${NAMESPACE}" -o json | jq .items[0].data) + + { + contains "${data_actual}" "${data_expected}" + } || { + echo "Wrong configmap: got ${data_actual}" + diff_comp "${data_actual}" "${data_expected}" + exit 1 + } + + pod_name=$(kubectl get pod -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) + + config_updated=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c 'cat /etc/pgadmin/conf.d/~postgres-operator/pgadmin-shared-clusters.json') + config_expected='"Servers": { + "1": { + "Group": "groupOne", + "Host": "pgadmin1-primary.'${NAMESPACE}.svc'", + "MaintenanceDB": "postgres", + "Name": "pgadmin1", + "Port": 5432, + "SSLMode": "prefer", + "Shared": true, + "Username": "pgadmin1" + }, + "2": { + "Group": "groupTwo", + "Host": "pgadmin3-primary.'${NAMESPACE}.svc'", + "MaintenanceDB": "postgres", + "Name": "pgadmin3", + "Port": 5432, + "SSLMode": "prefer", + "Shared": true, + "Username": "pgadmin3" + } + }' + { + contains "${config_updated}" "${config_expected}" + } || { + echo "Wrong file mounted: got ${config_updated}" + echo "Wrong file mounted: expected ${config_expected}" + diff_comp "${config_updated}" "${config_expected}" + sleep 10 + exit 1 + } + + clusters_actual=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py dump-servers /tmp/dumped.json --user admin@pgadmin.${NAMESPACE}.svc && cat /tmp/dumped.json") + + clusters_expected=' + { + "Servers": { + "1": { + "Name": "pgadmin1", + "Group": "groupOne", + "Host": "pgadmin1-primary.'${NAMESPACE}.svc'", + "Port": 5432, + "MaintenanceDB": "postgres", + "Username": "pgadmin1", + "Shared": true, + "TunnelPort": "22", + "KerberosAuthentication": false, + "ConnectionParameters": { + "sslmode": "prefer" + } + }, + "2": { + "Name": "pgadmin3", + "Group": "groupTwo", + "Host": "pgadmin3-primary.'${NAMESPACE}.svc'", + "Port": 5432, + "MaintenanceDB": "postgres", + "Username": "pgadmin3", + "Shared": true, + "TunnelPort": "22", + "KerberosAuthentication": false, + "ConnectionParameters": { + "sslmode": "prefer" + } + } + } + }' + { + contains "${clusters_actual}" "${clusters_expected}" + } || { + echo "Wrong servers dumped: got ${clusters_actual}" + echo "Wrong servers dumped: expected ${clusters_expected}" + diff_comp "${clusters_actual}" "${clusters_expected}" + exit 1 + } diff --git a/testing/kuttl/e2e/standalone-pgadmin/10-invalid-pgadmin.yaml b/testing/kuttl/e2e/standalone-pgadmin/10-invalid-pgadmin.yaml new file mode 100644 index 0000000000..118b8d06ef --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin/10-invalid-pgadmin.yaml @@ -0,0 +1,37 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +# Check that invalid spec cannot be applied. +commands: +- script: | + contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } + diff_comp() { bash -ceu 'diff <(echo "$1" ) <(echo "$2")' - "$@"; } + + data_expected='"pgadmin2" is invalid: spec.serverGroups[0]: Invalid value: "object": exactly one of "postgresClusterName" or "postgresClusterSelector" is required' + + data_actual=$(kubectl apply -f - 2>&1 < /pgwal/pgbackrest-spool" || exit 1 diff --git a/testing/kuttl/e2e/wal-pvc-pgupgrade/06-assert.yaml b/testing/kuttl/e2e/wal-pvc-pgupgrade/06-assert.yaml new file mode 100644 index 0000000000..f7575212e0 --- /dev/null +++ b/testing/kuttl/e2e/wal-pvc-pgupgrade/06-assert.yaml @@ -0,0 +1,14 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: wal-pvc-pgupgrade-after +status: + succeeded: 1 +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: wal-pvc-pgupgrade-after-replica +status: + succeeded: 1 diff --git a/testing/kuttl/kuttl-test.yaml b/testing/kuttl/kuttl-test.yaml new file mode 100644 index 0000000000..6733707507 --- /dev/null +++ b/testing/kuttl/kuttl-test.yaml @@ -0,0 +1,14 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestSuite +testDirs: +- testing/kuttl/e2e-generated/ +timeout: 300 +parallel: 2 +# by default kuttl will run in a generated namespace to override +# that functionality simply uncomment the line below and replace +# postgres-operator with the desired namespace to run in. +# namespace: postgres-operator +# By default kuttl deletes the resources created during a test. +# For debugging, it may be helpful to uncomment the following line +# in order to inspect the resources. +# skipDelete: true diff --git a/testing/kuttl/scripts/pgbackrest-initialization.sh b/testing/kuttl/scripts/pgbackrest-initialization.sh new file mode 100755 index 0000000000..ba6cd4a7e5 --- /dev/null +++ b/testing/kuttl/scripts/pgbackrest-initialization.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +EXPECTED_STATUS=$1 +EXPECTED_NUM_BACKUPS=$2 + +CLUSTER=${CLUSTER:-default} + +INFO=$(kubectl -n "${NAMESPACE}" exec "statefulset.apps/${CLUSTER}-repo-host" -c pgbackrest -- pgbackrest info) + +# Grab the `status` line from `pgbackrest info`, remove whitespace with `xargs`, +# and trim the string to only include the status in order to +# validate the status matches the expected status. +STATUS=$(grep "status" <<< "$INFO" | xargs | cut -d' ' -f 2) +if [[ "$STATUS" != "$EXPECTED_STATUS" ]]; then + echo "Expected ${EXPECTED_STATUS} but got ${STATUS}" + exit 1 +fi + +# Count the lines with `full backup` to validate that the expected number of backups are found. +NUM_BACKUPS=$(grep -c "full backup:" <<< "$INFO") +if [[ "$NUM_BACKUPS" != "$EXPECTED_NUM_BACKUPS" ]]; then + echo "Expected ${EXPECTED_NUM_BACKUPS} but got ${NUM_BACKUPS}" + exit 1 +fi diff --git a/testing/policies/kyverno/kustomization.yaml b/testing/policies/kyverno/kustomization.yaml new file mode 100644 index 0000000000..88e9775e79 --- /dev/null +++ b/testing/policies/kyverno/kustomization.yaml @@ -0,0 +1,37 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +bases: + - https://github.com/kyverno/policies/pod-security/restricted + +resources: + # CVE-2020-14386: https://cloud.google.com/anthos/clusters/docs/security-bulletins#gcp-2020-012 + # CVE-2021-22555: https://cloud.google.com/anthos/clusters/docs/security-bulletins#gcp-2021-015 + - https://raw.githubusercontent.com/kyverno/policies/main/best-practices/require-drop-all/require-drop-all.yaml + - https://raw.githubusercontent.com/kyverno/policies/main/best-practices/require-ro-rootfs/require-ro-rootfs.yaml + + # CVE-2020-8554: https://cloud.google.com/anthos/clusters/docs/security-bulletins#gcp-2020-015 + - https://raw.githubusercontent.com/kyverno/policies/main/best-practices/restrict-service-external-ips/restrict-service-external-ips.yaml + +patches: +- target: + group: kyverno.io + kind: ClusterPolicy + patch: |- + # Ensure all policies "audit" rather than "enforce". + - { op: replace, path: /spec/validationFailureAction, value: audit } + +# Issue: [sc-11286] +# OpenShift 4.10 forbids any/all seccomp profiles. Remove the policy for now. +# - https://github.com/openshift/cluster-kube-apiserver-operator/issues/1325 +# - https://github.com/kyverno/policies/tree/main/pod-security/restricted/restrict-seccomp-strict +- target: + group: kyverno.io + kind: ClusterPolicy + name: restrict-seccomp-strict + patch: |- + $patch: delete + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: restrict-seccomp-strict diff --git a/testing/policies/kyverno/service_links.yaml b/testing/policies/kyverno/service_links.yaml new file mode 100644 index 0000000000..0ae48796ed --- /dev/null +++ b/testing/policies/kyverno/service_links.yaml @@ -0,0 +1,43 @@ +# Copyright 2022 - 2024 Crunchy Data Solutions, Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: disable-service-links + annotations: + policies.kyverno.io/title: Disable Injection of Service Environment Variables + policies.kyverno.io/category: PGO + policies.kyverno.io/severity: high + policies.kyverno.io/subject: Pod + policies.kyverno.io/description: >- + Kubernetes automatically adds environment variables describing every Service in a Pod's namespace. + This can inadvertently change the behavior of things that read from the environment. For example, + a PodSpec that worked in the past might start to fail when the Pod is recreated with new Services + around. + +spec: + validationFailureAction: audit + background: true + rules: + - name: validate-enableServiceLinks + match: + resources: + kinds: + - Pod + validate: + message: Do not inject Service environment variables. + pattern: + spec: + enableServiceLinks: false diff --git a/trivy.yaml b/trivy.yaml new file mode 100644 index 0000000000..b2ef32d785 --- /dev/null +++ b/trivy.yaml @@ -0,0 +1,14 @@ +# https://aquasecurity.github.io/trivy/latest/docs/references/configuration/config-file/ +--- +# Specify an exact list of recognized and acceptable licenses. +# [A GitHub workflow](/.github/workflows/trivy.yaml) rejects pull requests that +# import licenses not in this list. +# +# https://aquasecurity.github.io/trivy/latest/docs/scanner/license/ +license: + ignored: + - Apache-2.0 + - BSD-2-Clause + - BSD-3-Clause + - ISC + - MIT