diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000000..639a059edc --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,16 @@ +# https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file +# https://docs.github.com/code-security/dependabot/dependabot-version-updates/customizing-dependency-updates +# +# See: https://www.github.com/dependabot/dependabot-core/issues/4605 +--- +# yaml-language-server: $schema=https://json.schemastore.org/dependabot-2.0.json +version: 2 +updates: + - package-ecosystem: github-actions + directory: / + schedule: + interval: weekly + day: tuesday + groups: + all-github-actions: + patterns: ['*'] diff --git a/.github/workflows/codeql-analysis.yaml b/.github/workflows/codeql-analysis.yaml index 5ee0fd846a..ae4d24d122 100644 --- a/.github/workflows/codeql-analysis.yaml +++ b/.github/workflows/codeql-analysis.yaml @@ -1,15 +1,20 @@ name: CodeQL on: + pull_request: push: branches: - - master + - main schedule: - cron: '10 18 * * 2' +env: + # Use the Go toolchain installed by setup-go + # https://github.com/actions/setup-go/issues/457 + GOTOOLCHAIN: local + jobs: analyze: - name: Analyze runs-on: ubuntu-latest permissions: actions: read @@ -19,17 +24,17 @@ jobs: if: ${{ github.repository == 'CrunchyData/postgres-operator' }} steps: - - uses: actions/checkout@v3 - - uses: actions/setup-go@v3 - with: { go-version: 1.x } + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: { go-version: stable } - name: Initialize CodeQL - uses: github/codeql-action/init@v2 + uses: github/codeql-action/init@v3 with: { languages: go } - name: Autobuild # This action calls `make` which runs our "help" target. - uses: github/codeql-action/autobuild@v2 + uses: github/codeql-action/autobuild@v3 - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v2 + uses: github/codeql-action/analyze@v3 diff --git a/.github/workflows/lint.yaml b/.github/workflows/lint.yaml index c9c578fc2c..c715f2a1d7 100644 --- a/.github/workflows/lint.yaml +++ b/.github/workflows/lint.yaml @@ -2,55 +2,24 @@ name: Linters on: pull_request: - branches: - - master -jobs: - documentation: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - # Some versions of Ubuntu have an awk that does not recognize POSIX classes. - # Log the version of awk and abort when it cannot match space U+0020. - # - https://bugs.launchpad.net/ubuntu/+source/mawk/+bug/69724 - - run: awk -W version && awk '{ exit 1 != match($0, /[[:space:]]/) }' <<< ' ' - - run: | - find docs/content -not -type d -not -name crd.md -print0 | xargs -0 awk ' - BEGIN { print "::add-matcher::.github/actions/awk-matcher.json" } - - /[[:space:]]$/ { errors++; print FILENAME ":" FNR " error: Trailing space" } - /TODO/ { errors++; print FILENAME ":" FNR " error: Found TODO. Try running hack/create-todo-patch.sh" } - - END { print "::remove-matcher owner=awk::" } - END { exit errors != 0 } - ' - - documentation-crd: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - # The `documentation-crd` job only checks the crd.md for `TODO`, - # as some of the upstream documentation has trailing spaces - - run: | - find docs/content -name crd.md -print0 | xargs -0 awk ' - BEGIN { print "::add-matcher::.github/actions/awk-matcher.json" } - - /TODO/ { errors++; print FILENAME ":" FNR " error: Found TODO. Try running hack/create-todo-patch.sh" } - - END { print "::remove-matcher owner=awk::" } - END { exit errors != 0 } - ' +env: + # Use the Go toolchain installed by setup-go + # https://github.com/actions/setup-go/issues/457 + GOTOOLCHAIN: local +jobs: golangci-lint: runs-on: ubuntu-latest + permissions: + contents: read + checks: write steps: - - uses: actions/checkout@v3 - - uses: actions/setup-go@v3 - with: { go-version: 1.x } + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: { go-version: stable } - - uses: golangci/golangci-lint-action@v3 + - uses: golangci/golangci-lint-action@v6 with: version: latest args: --timeout=5m diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 45adcfb385..e8174e4f95 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -2,20 +2,22 @@ name: Tests on: pull_request: - branches: - - master push: branches: - - master + - main + +env: + # Use the Go toolchain installed by setup-go + # https://github.com/actions/setup-go/issues/457 + GOTOOLCHAIN: local jobs: go-test: - runs-on: ubuntu-20.04 + runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 - - uses: actions/setup-go@v3 - with: - go-version: 1.x + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: { go-version: stable } - run: make check - run: make check-generate @@ -23,16 +25,16 @@ jobs: run: go mod tidy && git diff --exit-code -- go.mod kubernetes-api: - runs-on: ubuntu-20.04 + runs-on: ubuntu-latest needs: [go-test] strategy: fail-fast: false matrix: kubernetes: ['default'] steps: - - uses: actions/checkout@v3 - - uses: actions/setup-go@v3 - with: { go-version: 1.x } + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: { go-version: stable } - run: go mod download - run: ENVTEST_K8S_VERSION="${KUBERNETES#default}" make check-envtest env: @@ -41,33 +43,33 @@ jobs: # Upload coverage to GitHub - run: gzip envtest.coverage - - uses: actions/upload-artifact@v3 + - uses: actions/upload-artifact@v4 with: - name: "kubernetes-api=${{ matrix.kubernetes }}" + name: "~coverage~kubernetes-api=${{ matrix.kubernetes }}" path: envtest.coverage.gz retention-days: 1 kubernetes-k3d: if: "${{ github.repository == 'CrunchyData/postgres-operator' }}" - runs-on: ubuntu-20.04 + runs-on: ubuntu-latest needs: [go-test] strategy: fail-fast: false matrix: - kubernetes: [v1.28, v1.25] + kubernetes: [v1.31, v1.28] steps: - - uses: actions/checkout@v3 - - uses: actions/setup-go@v3 - with: { go-version: 1.x } + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: { go-version: stable } - name: Start k3s uses: ./.github/actions/k3d with: k3s-channel: "${{ matrix.kubernetes }}" prefetch-images: | - registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi8-2.47-0 - registry.developers.crunchydata.com/crunchydata/crunchy-pgbouncer:ubi8-1.19-4 - registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-14.9-0 + registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi8-2.53.1-0 + registry.developers.crunchydata.com/crunchydata/crunchy-pgbouncer:ubi8-1.23-0 + registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-16.4-2 - run: make createnamespaces check-envtest-existing env: @@ -76,38 +78,39 @@ jobs: # Upload coverage to GitHub - run: gzip envtest-existing.coverage - - uses: actions/upload-artifact@v3 + - uses: actions/upload-artifact@v4 with: - name: "kubernetes-k3d=${{ matrix.kubernetes }}" + name: "~coverage~kubernetes-k3d=${{ matrix.kubernetes }}" path: envtest-existing.coverage.gz retention-days: 1 kuttl-k3d: - runs-on: ubuntu-20.04 + runs-on: ubuntu-latest needs: [go-test] strategy: fail-fast: false matrix: - kubernetes: [v1.28, v1.27, v1.26, v1.25] + kubernetes: [v1.31, v1.30, v1.29, v1.28] steps: - - uses: actions/checkout@v3 - - uses: actions/setup-go@v3 - with: { go-version: 1.x } + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: { go-version: stable } - name: Start k3s uses: ./.github/actions/k3d with: k3s-channel: "${{ matrix.kubernetes }}" prefetch-images: | - registry.developers.crunchydata.com/crunchydata/crunchy-pgadmin4:ubi8-4.30-17 - registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi8-2.47-0 - registry.developers.crunchydata.com/crunchydata/crunchy-pgbouncer:ubi8-1.19-4 + registry.developers.crunchydata.com/crunchydata/crunchy-pgadmin4:ubi8-4.30-31 + registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi8-2.53.1-0 + registry.developers.crunchydata.com/crunchydata/crunchy-pgbouncer:ubi8-1.23-0 registry.developers.crunchydata.com/crunchydata/crunchy-postgres-exporter:latest registry.developers.crunchydata.com/crunchydata/crunchy-upgrade:latest - registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-14.9-0 - registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-14.9-3.1-0 - registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-15.4-0 - registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-15.4-3.3-0 + registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-16.4-2 + registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-16.4-3.3-2 + registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-16.4-3.4-2 + registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-17.0-0 + registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-17.0-3.4-0 - run: go mod download - name: Build executable run: PGO_VERSION='${{ github.sha }}' make build-postgres-operator @@ -116,7 +119,7 @@ jobs: run: make get-pgmonitor env: PGMONITOR_DIR: "${{ github.workspace }}/hack/tools/pgmonitor" - QUERIES_CONFIG_DIR: "${{ github.workspace }}/hack/tools/queries" + QUERIES_CONFIG_DIR: "${{ github.workspace }}/hack/tools/queries" # Start a Docker container with the working directory mounted. - name: Start PGO @@ -126,21 +129,23 @@ jobs: hack/create-kubeconfig.sh postgres-operator pgo docker run --detach --network host --read-only \ --volume "$(pwd):/mnt" --workdir '/mnt' --env 'PATH=/mnt/bin' \ + --env 'CHECK_FOR_UPGRADES=false' \ --env 'QUERIES_CONFIG_DIR=/mnt/hack/tools/queries' \ --env 'KUBECONFIG=hack/.kube/postgres-operator/pgo' \ - --env 'RELATED_IMAGE_PGADMIN=registry.developers.crunchydata.com/crunchydata/crunchy-pgadmin4:ubi8-4.30-17' \ - --env 'RELATED_IMAGE_PGBACKREST=registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi8-2.47-0' \ - --env 'RELATED_IMAGE_PGBOUNCER=registry.developers.crunchydata.com/crunchydata/crunchy-pgbouncer:ubi8-1.19-4' \ + --env 'RELATED_IMAGE_PGADMIN=registry.developers.crunchydata.com/crunchydata/crunchy-pgadmin4:ubi8-4.30-31' \ + --env 'RELATED_IMAGE_PGBACKREST=registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi8-2.53.1-0' \ + --env 'RELATED_IMAGE_PGBOUNCER=registry.developers.crunchydata.com/crunchydata/crunchy-pgbouncer:ubi8-1.23-0' \ --env 'RELATED_IMAGE_PGEXPORTER=registry.developers.crunchydata.com/crunchydata/crunchy-postgres-exporter:latest' \ --env 'RELATED_IMAGE_PGUPGRADE=registry.developers.crunchydata.com/crunchydata/crunchy-upgrade:latest' \ - --env 'RELATED_IMAGE_POSTGRES_14=registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-14.9-0' \ - --env 'RELATED_IMAGE_POSTGRES_14_GIS_3.1=registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-14.9-3.1-0' \ - --env 'RELATED_IMAGE_POSTGRES_15=registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-15.4-0' \ - --env 'RELATED_IMAGE_POSTGRES_15_GIS_3.3=registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-15.4-3.3-0' \ + --env 'RELATED_IMAGE_POSTGRES_16=registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-16.4-2' \ + --env 'RELATED_IMAGE_POSTGRES_16_GIS_3.3=registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-16.4-3.3-2' \ + --env 'RELATED_IMAGE_POSTGRES_16_GIS_3.4=registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-16.4-3.4-2' \ + --env 'RELATED_IMAGE_POSTGRES_17=registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-17.0-0' \ + --env 'RELATED_IMAGE_POSTGRES_17_GIS_3.4=registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-17.0-3.4-0' \ + --env 'RELATED_IMAGE_STANDALONE_PGADMIN=registry.developers.crunchydata.com/crunchydata/crunchy-pgadmin4:ubi8-8.12-0' \ --env 'PGO_FEATURE_GATES=TablespaceVolumes=true' \ --name 'postgres-operator' ubuntu \ postgres-operator - - name: Install kuttl run: | curl -Lo /usr/local/bin/kubectl-kuttl https://github.com/kudobuilder/kuttl/releases/download/v0.13.0/kubectl-kuttl_0.13.0_linux_x86_64 @@ -148,11 +153,11 @@ jobs: - run: make generate-kuttl env: - KUTTL_PG_UPGRADE_FROM_VERSION: '14' - KUTTL_PG_UPGRADE_TO_VERSION: '15' - KUTTL_PG_VERSION: '14' - KUTTL_POSTGIS_VERSION: '3.1' - KUTTL_PSQL_IMAGE: 'registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-14.9-0' + KUTTL_PG_UPGRADE_FROM_VERSION: '16' + KUTTL_PG_UPGRADE_TO_VERSION: '17' + KUTTL_PG_VERSION: '16' + KUTTL_POSTGIS_VERSION: '3.4' + KUTTL_PSQL_IMAGE: 'registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-16.4-2' - run: | make check-kuttl && exit failed=$? @@ -170,10 +175,10 @@ jobs: - kubernetes-api - kubernetes-k3d steps: - - uses: actions/checkout@v3 - - uses: actions/setup-go@v3 - with: { go-version: 1.x } - - uses: actions/download-artifact@v3 + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: { go-version: stable } + - uses: actions/download-artifact@v4 with: { path: download } # Combine the coverage profiles by taking the mode line from any one file @@ -197,8 +202,8 @@ jobs: # Upload coverage to GitHub - run: gzip total-coverage.html - - uses: actions/upload-artifact@v3 + - uses: actions/upload-artifact@v4 with: - name: coverage-report + name: coverage-report=html path: total-coverage.html.gz retention-days: 15 diff --git a/.github/workflows/trivy-pr-scan.yaml b/.github/workflows/trivy.yaml similarity index 57% rename from .github/workflows/trivy-pr-scan.yaml rename to .github/workflows/trivy.yaml index 183082e3f4..2a16e4929c 100644 --- a/.github/workflows/trivy-pr-scan.yaml +++ b/.github/workflows/trivy.yaml @@ -1,17 +1,38 @@ -# Uses Trivy to scan every pull request, rejecting those with severe, fixable vulnerabilities. -# Scans on PR to master and weekly with same behavior. name: Trivy on: pull_request: - branches: - - master push: branches: - - master + - main + +env: + # Use the Go toolchain installed by setup-go + # https://github.com/actions/setup-go/issues/457 + GOTOOLCHAIN: local jobs: - scan: + licenses: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + # Trivy needs a populated Go module cache to detect Go module licenses. + - uses: actions/setup-go@v5 + with: { go-version: stable } + - run: go mod download + + # Report success only when detected licenses are listed in [/trivy.yaml]. + - name: Scan licenses + uses: aquasecurity/trivy-action@0.28.0 + env: + TRIVY_DEBUG: true + with: + scan-type: filesystem + scanners: license + exit-code: 1 + + vulnerabilities: if: ${{ github.repository == 'CrunchyData/postgres-operator' }} permissions: @@ -21,32 +42,34 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 # Run trivy and log detected and fixed vulnerabilities # This report should match the uploaded code scan report below # and is a convenience/redundant effort for those who prefer to # read logs and/or if anything goes wrong with the upload. - name: Log all detected vulnerabilities - uses: aquasecurity/trivy-action@master + uses: aquasecurity/trivy-action@0.28.0 with: - scan-type: fs + scan-type: filesystem hide-progress: true ignore-unfixed: true - + scanners: secret,vuln + # Upload actionable results to the GitHub Security tab. # Pull request checks fail according to repository settings. # - https://docs.github.com/en/code-security/code-scanning/integrating-with-code-scanning/uploading-a-sarif-file-to-github # - https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning - name: Report actionable vulnerabilities - uses: aquasecurity/trivy-action@master + uses: aquasecurity/trivy-action@0.28.0 with: - scan-type: fs + scan-type: filesystem ignore-unfixed: true format: 'sarif' output: 'trivy-results.sarif' + scanners: secret,vuln - name: Upload Trivy scan results to GitHub Security tab - uses: github/codeql-action/upload-sarif@v2 + uses: github/codeql-action/upload-sarif@v3 with: sarif_file: 'trivy-results.sarif' diff --git a/.gitignore b/.gitignore index 2fa6186778..dcfd7074a3 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ .DS_Store /vendor/ /testing/kuttl/e2e-generated*/ +gke_gcloud_auth_plugin_cache diff --git a/.gitmodules b/.gitmodules deleted file mode 100644 index b8907ec067..0000000000 --- a/.gitmodules +++ /dev/null @@ -1,3 +0,0 @@ -[submodule "hugo/themes/crunchy-hugo-theme"] - path = docs/themes/crunchy-hugo-theme - url = https://github.com/crunchydata/crunchy-hugo-theme diff --git a/.golangci.next.yaml b/.golangci.next.yaml index 8973702226..95b3f63347 100644 --- a/.golangci.next.yaml +++ b/.golangci.next.yaml @@ -9,11 +9,11 @@ linters: disable-all: true enable: - contextcheck + - err113 - errchkjson - gocritic - godot - godox - - goerr113 - gofumpt - gosec # exclude-use-default - nilnil @@ -38,7 +38,3 @@ linters-settings: # https://github.com/kulti/thelper/issues/27 tb: { begin: true, first: true } test: { begin: true, first: true, name: true } - -run: - build-tags: - - envtest diff --git a/.golangci.yaml b/.golangci.yaml index fb1ee2ceaf..87a6ed0464 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -6,9 +6,9 @@ linters: - errchkjson - gci - gofumpt - - scopelint enable: - depguard + - goheader - gomodguard - gosimple - importas @@ -44,6 +44,15 @@ linters-settings: exhaustive: default-signifies-exhaustive: true + goheader: + template: |- + Copyright {{ DATES }} Crunchy Data Solutions, Inc. + + SPDX-License-Identifier: Apache-2.0 + values: + regexp: + DATES: '((201[7-9]|202[0-3]) - 2024|2024)' + goimports: local-prefixes: github.com/crunchydata/postgres-operator @@ -58,6 +67,11 @@ linters-settings: k8s.io/kubernetes is for managing dependencies of the Kubernetes project, i.e. building kubelet and kubeadm. + gosec: + excludes: + # Flags for potentially-unsafe casting of ints, similar problem to globally-disabled G103 + - G115 + importas: alias: - pkg: k8s.io/api/(\w+)/(v[\w\w]+) @@ -68,8 +82,6 @@ linters-settings: alias: apierrors no-unaliased: true -run: - build-tags: - - envtest - skip-dirs: +issues: + exclude-dirs: - pkg/generated diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 2e69d17f63..e209f4e5a7 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -13,15 +13,11 @@ Thanks! We look forward to your contribution. # General Contributing Guidelines All ongoing development for an upcoming release gets committed to the -**`master`** branch. The `master` branch technically serves as the "development" -branch as well, but all code that is committed to the `master` branch should be +**`main`** branch. The `main` branch technically serves as the "development" +branch as well, but all code that is committed to the `main` branch should be considered _stable_, even if it is part of an ongoing release cycle. -All fixes for a supported release should be committed to the supported release -branch. For example, the 4.3 release is maintained on the `REL_4_3` branch. -Please see the section on _Supported Releases_ for more information. - -Ensure any changes are clear and well-documented. When we say "well-documented": +Ensure any changes are clear and well-documented: - If the changes include code, ensure all additional code has corresponding documentation in and around it. This includes documenting the definition of @@ -32,10 +28,7 @@ summarize how. Avoid simply repeating details from declarations,. When in doubt, favor overexplaining to underexplaining. - Code comments should be consistent with their language conventions. For -example, please use GoDoc conventions for Go source code. - -- Any new features must have corresponding user documentation. Any removed -features must have their user documentation removed from the documents. +example, please use `gofmt` [conventions](https://go.dev/doc/comment) for Go source code. - Do not submit commented-out code. If the code does not need to be used anymore, please remove it. @@ -62,12 +55,7 @@ All commits must either be rebased in atomic order or squashed (if the squashed commit is considered atomic). Merge commits are not accepted. All conflicts must be resolved prior to pushing changes. -**All pull requests should be made from the `master` branch** unless it is a fix -for a specific supported release. - -Once a major or minor release is made, no new features are added into the -release branch, only bug fixes. Any new features are added to the `master` -branch until the time that said new features are released. +**All pull requests should be made from the `main` branch.** # Commit Messages @@ -86,12 +74,11 @@ possible as to what the changes are. Good things to include: understand. ``` -If you wish to tag a Github issue or another project management tracker, please +If you wish to tag a GitHub issue or another project management tracker, please do so at the bottom of the commit message, and make it clearly labeled like so: ``` -Issue: #123 -Issue: [sc-1234] +Issue: CrunchyData/postgres-operator#123 ``` # Submitting Pull Requests @@ -100,102 +87,23 @@ All work should be made in your own repository fork. When you believe your work is ready to be committed, please follow the guidance below for creating a pull request. -## Upcoming Releases / Features - -Ongoing work for new features should occur in branches off of the `master` -branch. It is suggested, but not required, that the branch name should reflect -that this is for an upcoming release, i.e. `upstream/branch-name` where the -`branch-name` is something descriptive for what you're working on. - -## Supported Releases / Fixes - -While not required, it is recommended to make your branch name along the lines -of: `REL_X_Y/branch-name` where the `branch-name` is something descriptive -for what you're working on. - -# Releases & Versioning - -Overall, release tags attempt to follow the -[semantic versioning](https://semver.org) scheme. - -"Supported releases" (described in the next section) occur on "minor" release -branches (e.g. the `x.y` portion of the `x.y.z`). - -One or more "patch" releases can occur after a minor release. A patch release is -used to fix bugs and other issues that may be found after a supported release. - -Fixes found on the `master` branch can be backported to a support release -branch. Any fixes for a supported release must have a pull request off of the -supported release branch, which is detailed below. - -## Supported Releases +## Upcoming Features -When a "minor" release is made, the release is stamped using the `vx.y.0` format -as denoted above, and a branch is created with the name `REL_X_Y`. Once a -minor release occurs, no new features are added to the `REL_X_Y` branch. -However, bug fixes can (and if found, should) be added to this branch. +Ongoing work for new features should occur in branches off of the `main` +branch. -To contribute a bug fix to a supported release, please make a pull request off -of the supported release branch. For instance, if you find a bug in the 4.3 -release, then you would make a pull request off of the `REL_4_3` branch. +## Unsupported Branches -## Unsupported Releases - -When a release is no longer supported, the branch will be renamed following the +When a release branch is no longer supported, it will be renamed following the pattern `REL_X_Y_FINAL` with the key suffix being _FINAL_. For example, `REL_3_2_FINAL` indicates that the 3.2 release is no longer supported. Nothing should ever be pushed to a `REL_X_Y_FINAL` branch once `FINAL` is on the branch name. -## Alpha, Beta, Release Candidate Releases - -At any point in the release cycle for a new release, there could exist one or -more alpha, beta, or release candidate (RC) release. Alpha, beta, and release -candidates **should not be used in production environments**. - -Alpha is the early stage of a release cycle and is typically made to test the -mechanics of an upcoming release. These should be considered relatively -unstable. The format for an alpha release tag is `v4.3.0-alpha.1`, which in this -case indicates it is the first alpha release for 4.3. - -Beta occurs during the later stage of a release cycle. At this point, the -release should be considered feature complete and the beta is used to -distribute, test, and collect feedback on the upcoming release. The betas should -be considered unstable, but as mentioned feature complete. The format for an -beta release tag is `v4.3.0-beta.1`, which in this case indicates it is the -first beta release for 4.3. - -Release candidates (RCs) occur just before a release. A release candidate should -be considered stable, and is typically used for a final round of bug checking -and testing. Multiple release candidates can occur in the event of serious bugs. -The format for a release candidate tag is `v4.3.0-rc.1`, which in this -case indicates it is the first release candidate for 4.3. - -**After a major or minor release, no alpha, beta, or release candidate releases -are supported**. In fact, any newer release of an alpha, beta, or RC immediately -deprecates any older alpha, beta or RC. (Naturally, a beta deprecates an alpha, -and a RC deprecates a beta). - -If you are testing on an older alpha, beta or RC, bug reports will not be -accepted. Please ensure you are testing on the latest version. - # Testing -We greatly appreciate any and all testing for the project. When testing, please -be sure you do the following: - -- If testing against a release, ensure your tests are performed against the -latest minor version (the last number in the release denotes the minor version, -e.g. the "3" in the 4.3.3) -- If testing against a pre-release (alpha, beta, RC), ensure your tests are -performed against latest version -- If testing against a development (`master`) or release (`REL_X_Y`) branch, -ensure your tests are performed against the latest commit - -Please do not test against unsupported versions (e.g. any release that is marked -final). - +We greatly appreciate any and all testing for the project. There are several ways to help with the testing effort: - Manual testing: testing particular features with a series of manual commands diff --git a/LICENSE.md b/LICENSE.md index 8ce5664373..8d57ad6f2e 100644 --- a/LICENSE.md +++ b/LICENSE.md @@ -176,7 +176,7 @@ END OF TERMS AND CONDITIONS - Copyright 2017 - 2023 Crunchy Data Solutions, Inc. + Copyright 2017 - 2024 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/Makefile b/Makefile index d8f325965b..37aca1a37e 100644 --- a/Makefile +++ b/Makefile @@ -6,25 +6,21 @@ PGO_IMAGE_URL ?= https://www.crunchydata.com/products/crunchy-postgresql-for-kub PGO_IMAGE_PREFIX ?= localhost PGMONITOR_DIR ?= hack/tools/pgmonitor -PGMONITOR_VERSION ?= v4.10.0 +PGMONITOR_VERSION ?= v5.1.1 QUERIES_CONFIG_DIR ?= hack/tools/queries +EXTERNAL_SNAPSHOTTER_DIR ?= hack/tools/external-snapshotter +EXTERNAL_SNAPSHOTTER_VERSION ?= v8.0.1 + # Buildah's "build" used to be "bud". Use the alias to be compatible for a while. BUILDAH_BUILD ?= buildah bud -DEBUG_BUILD ?= false GO ?= go -GO_BUILD = $(GO_CMD) build -trimpath -GO_CMD = $(GO_ENV) $(GO) +GO_BUILD = $(GO) build GO_TEST ?= $(GO) test KUTTL ?= kubectl-kuttl KUTTL_TEST ?= $(KUTTL) test -# Disable optimizations if creating a debug build -ifeq ("$(DEBUG_BUILD)", "true") - GO_BUILD = $(GO_CMD) build -gcflags='all=-N -l' -endif - ##@ General # The help target prints out all targets with their descriptions organized @@ -59,18 +55,23 @@ get-pgmonitor: cp -r '$(PGMONITOR_DIR)/postgres_exporter/common/.' '${QUERIES_CONFIG_DIR}' cp '$(PGMONITOR_DIR)/postgres_exporter/linux/queries_backrest.yml' '${QUERIES_CONFIG_DIR}' +.PHONY: get-external-snapshotter +get-external-snapshotter: + git -C '$(dir $(EXTERNAL_SNAPSHOTTER_DIR))' clone https://github.com/kubernetes-csi/external-snapshotter.git || git -C '$(EXTERNAL_SNAPSHOTTER_DIR)' fetch origin + @git -C '$(EXTERNAL_SNAPSHOTTER_DIR)' checkout '$(EXTERNAL_SNAPSHOTTER_VERSION)' + @git -C '$(EXTERNAL_SNAPSHOTTER_DIR)' config pull.ff only + .PHONY: clean clean: ## Clean resources clean: clean-deprecated rm -f bin/postgres-operator - rm -f config/rbac/role.yaml + rm -rf licenses/*/ [ ! -d testing/kuttl/e2e-generated ] || rm -r testing/kuttl/e2e-generated [ ! -d testing/kuttl/e2e-generated-other ] || rm -r testing/kuttl/e2e-generated-other - rm -rf build/crd/generated build/crd/*/generated - [ ! -f hack/tools/setup-envtest ] || hack/tools/setup-envtest --bin-dir=hack/tools/envtest cleanup [ ! -f hack/tools/setup-envtest ] || rm hack/tools/setup-envtest - [ ! -d hack/tools/envtest ] || rm -r hack/tools/envtest + [ ! -d hack/tools/envtest ] || { chmod -R u+w hack/tools/envtest && rm -r hack/tools/envtest; } [ ! -d hack/tools/pgmonitor ] || rm -rf hack/tools/pgmonitor + [ ! -d hack/tools/external-snapshotter ] || rm -rf hack/tools/external-snapshotter [ ! -n "$$(ls hack/tools)" ] || rm -r hack/tools/* [ ! -d hack/.kube ] || rm -r hack/.kube @@ -91,6 +92,8 @@ clean-deprecated: ## Clean deprecated resources @# crunchy-postgres-exporter used to live in this repo [ ! -d bin/crunchy-postgres-exporter ] || rm -r bin/crunchy-postgres-exporter [ ! -d build/crunchy-postgres-exporter ] || rm -r build/crunchy-postgres-exporter + @# CRDs used to require patching + [ ! -d build/crd ] || rm -r build/crd ##@ Deployment @@ -120,7 +123,7 @@ undeploy: ## Undeploy the PostgreSQL Operator .PHONY: deploy-dev deploy-dev: ## Deploy the PostgreSQL Operator locally -deploy-dev: PGO_FEATURE_GATES ?= "TablespaceVolumes=true" +deploy-dev: PGO_FEATURE_GATES ?= "TablespaceVolumes=true,VolumeSnapshots=true" deploy-dev: get-pgmonitor deploy-dev: build-postgres-operator deploy-dev: createnamespaces @@ -133,6 +136,9 @@ deploy-dev: createnamespaces CHECK_FOR_UPGRADES='$(if $(CHECK_FOR_UPGRADES),$(CHECK_FOR_UPGRADES),false)' \ KUBECONFIG=hack/.kube/postgres-operator/pgo \ PGO_NAMESPACE='postgres-operator' \ + PGO_INSTALLER='deploy-dev' \ + PGO_INSTALLER_ORIGIN='postgres-operator-repo' \ + BUILD_SOURCE='build-postgres-operator' \ $(shell kubectl kustomize ./config/dev | \ sed -ne '/^kind: Deployment/,/^---/ { \ /RELATED_IMAGE_/ { N; s,.*\(RELATED_[^[:space:]]*\).*value:[[:space:]]*\([^[:space:]]*\),\1="\2",; p; }; \ @@ -143,8 +149,9 @@ deploy-dev: createnamespaces ##@ Build - Binary .PHONY: build-postgres-operator build-postgres-operator: ## Build the postgres-operator binary - $(GO_BUILD) -ldflags '-X "main.versionString=$(PGO_VERSION)"' \ - -o bin/postgres-operator ./cmd/postgres-operator + CGO_ENABLED=1 $(GO_BUILD) $(\ + ) --ldflags '-X "main.versionString=$(PGO_VERSION)"' $(\ + ) --trimpath -o bin/postgres-operator ./cmd/postgres-operator ##@ Build - Images .PHONY: build-postgres-operator-image @@ -187,19 +194,19 @@ build-postgres-operator-image: build/postgres-operator/Dockerfile ##@ Test .PHONY: check check: ## Run basic go tests with coverage output - $(GO_TEST) -cover ./... +check: get-pgmonitor + QUERIES_CONFIG_DIR="$(CURDIR)/${QUERIES_CONFIG_DIR}" $(GO_TEST) -cover ./... # Available versions: curl -s 'https://storage.googleapis.com/kubebuilder-tools/' | grep -o '[^<]*' # - KUBEBUILDER_ATTACH_CONTROL_PLANE_OUTPUT=true .PHONY: check-envtest check-envtest: ## Run check using envtest and a mock kube api -check-envtest: ENVTEST_USE = hack/tools/setup-envtest --bin-dir=$(CURDIR)/hack/tools/envtest use $(ENVTEST_K8S_VERSION) +check-envtest: ENVTEST_USE = $(ENVTEST) --bin-dir=$(CURDIR)/hack/tools/envtest use $(ENVTEST_K8S_VERSION) check-envtest: SHELL = bash -check-envtest: get-pgmonitor - GOBIN='$(CURDIR)/hack/tools' $(GO) install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest +check-envtest: get-pgmonitor tools/setup-envtest get-external-snapshotter @$(ENVTEST_USE) --print=overview && echo source <($(ENVTEST_USE) --print=env) && PGO_NAMESPACE="postgres-operator" QUERIES_CONFIG_DIR="$(CURDIR)/${QUERIES_CONFIG_DIR}" \ - $(GO_TEST) -count=1 -cover -tags=envtest ./... + $(GO_TEST) -count=1 -cover ./... # The "PGO_TEST_TIMEOUT_SCALE" environment variable (default: 1) can be set to a # positive number that extends test timeouts. The following runs tests with @@ -207,11 +214,11 @@ check-envtest: get-pgmonitor # make check-envtest-existing PGO_TEST_TIMEOUT_SCALE=1.2 .PHONY: check-envtest-existing check-envtest-existing: ## Run check using envtest and an existing kube api -check-envtest-existing: get-pgmonitor +check-envtest-existing: get-pgmonitor get-external-snapshotter check-envtest-existing: createnamespaces kubectl apply --server-side -k ./config/dev USE_EXISTING_CLUSTER=true PGO_NAMESPACE="postgres-operator" QUERIES_CONFIG_DIR="$(CURDIR)/${QUERIES_CONFIG_DIR}" \ - $(GO_TEST) -count=1 -cover -p=1 -tags=envtest ./... + $(GO_TEST) -count=1 -cover -p=1 ./... kubectl delete -k ./config/dev # Expects operator to be running @@ -222,11 +229,11 @@ check-kuttl: ## example command: make check-kuttl KUTTL_TEST=' --config testing/kuttl/kuttl-test.yaml .PHONY: generate-kuttl -generate-kuttl: export KUTTL_PG_UPGRADE_FROM_VERSION ?= 14 -generate-kuttl: export KUTTL_PG_UPGRADE_TO_VERSION ?= 15 -generate-kuttl: export KUTTL_PG_VERSION ?= 15 -generate-kuttl: export KUTTL_POSTGIS_VERSION ?= 3.3 -generate-kuttl: export KUTTL_PSQL_IMAGE ?= registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-15.4-0 +generate-kuttl: export KUTTL_PG_UPGRADE_FROM_VERSION ?= 15 +generate-kuttl: export KUTTL_PG_UPGRADE_TO_VERSION ?= 16 +generate-kuttl: export KUTTL_PG_VERSION ?= 16 +generate-kuttl: export KUTTL_POSTGIS_VERSION ?= 3.4 +generate-kuttl: export KUTTL_PSQL_IMAGE ?= registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-16.3-1 generate-kuttl: export KUTTL_TEST_DELETE_NAMESPACE ?= kuttl-test-delete-namespace generate-kuttl: ## Generate kuttl tests [ ! -d testing/kuttl/e2e-generated ] || rm -r testing/kuttl/e2e-generated @@ -238,7 +245,6 @@ generate-kuttl: ## Generate kuttl tests 14 ) export KUTTL_BITNAMI_IMAGE_TAG=14.5.0-debian-11-r37 ;; \ 13 ) export KUTTL_BITNAMI_IMAGE_TAG=13.8.0-debian-11-r39 ;; \ 12 ) export KUTTL_BITNAMI_IMAGE_TAG=12.12.0-debian-11-r40 ;; \ - 11 ) export KUTTL_BITNAMI_IMAGE_TAG=11.17.0-debian-11-r39 ;; \ esac; \ render() { envsubst '"'"' \ $$KUTTL_PG_UPGRADE_FROM_VERSION $$KUTTL_PG_UPGRADE_TO_VERSION \ @@ -253,7 +259,7 @@ generate-kuttl: ## Generate kuttl tests ##@ Generate .PHONY: check-generate -check-generate: ## Check crd, crd-docs, deepcopy functions, and rbac generation +check-generate: ## Check crd, deepcopy functions, and rbac generation check-generate: generate-crd check-generate: generate-deepcopy check-generate: generate-rbac @@ -262,51 +268,53 @@ check-generate: generate-rbac git diff --exit-code -- pkg/apis .PHONY: generate -generate: ## Generate crd, crd-docs, deepcopy functions, and rbac +generate: ## Generate crd, deepcopy functions, and rbac generate: generate-crd -generate: generate-crd-docs generate: generate-deepcopy generate: generate-rbac .PHONY: generate-crd -generate-crd: ## Generate crd - GOBIN='$(CURDIR)/hack/tools' ./hack/controller-generator.sh \ - crd:crdVersions='v1' \ - paths='./pkg/apis/...' \ - output:dir='build/crd/postgresclusters/generated' # build/crd/{plural}/generated/{group}_{plural}.yaml - @ - GOBIN='$(CURDIR)/hack/tools' ./hack/controller-generator.sh \ - crd:crdVersions='v1' \ - paths='./pkg/apis/...' \ - output:dir='build/crd/pgupgrades/generated' # build/crd/{plural}/generated/{group}_{plural}.yaml - @ - GOBIN='$(CURDIR)/hack/tools' ./hack/controller-generator.sh \ +generate-crd: ## Generate Custom Resource Definitions (CRDs) +generate-crd: tools/controller-gen + $(CONTROLLER) \ crd:crdVersions='v1' \ paths='./pkg/apis/...' \ - output:dir='build/crd/pgadmins/generated' # build/crd/{plural}/generated/{group}_{plural}.yaml - @ - kubectl kustomize ./build/crd/postgresclusters > ./config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml - kubectl kustomize ./build/crd/pgupgrades > ./config/crd/bases/postgres-operator.crunchydata.com_pgupgrades.yaml - kubectl kustomize ./build/crd/pgadmins > ./config/crd/bases/postgres-operator.crunchydata.com_pgadmins.yaml - -.PHONY: generate-crd-docs -generate-crd-docs: ## Generate crd-docs - GOBIN='$(CURDIR)/hack/tools' $(GO) install fybrik.io/crdoc@v0.5.2 - ./hack/tools/crdoc \ - --resources ./config/crd/bases \ - --template ./hack/api-template.tmpl \ - --output ./docs/content/references/crd.md + output:dir='config/crd/bases' # {directory}/{group}_{plural}.yaml .PHONY: generate-deepcopy -generate-deepcopy: ## Generate deepcopy functions - GOBIN='$(CURDIR)/hack/tools' ./hack/controller-generator.sh \ +generate-deepcopy: ## Generate DeepCopy functions +generate-deepcopy: tools/controller-gen + $(CONTROLLER) \ object:headerFile='hack/boilerplate.go.txt' \ paths='./pkg/apis/postgres-operator.crunchydata.com/...' .PHONY: generate-rbac -generate-rbac: ## Generate rbac - GOBIN='$(CURDIR)/hack/tools' ./hack/generate-rbac.sh \ - './internal/...' 'config/rbac' +generate-rbac: ## Generate RBAC +generate-rbac: tools/controller-gen + $(CONTROLLER) \ + rbac:roleName='postgres-operator' \ + paths='./cmd/...' paths='./internal/...' \ + output:dir='config/rbac' # {directory}/role.yaml + +##@ Tools + +.PHONY: tools +tools: ## Download tools like controller-gen and kustomize if necessary. + +# go-get-tool will 'go install' any package $2 and install it to $1. +define go-get-tool +@[ -f '$(1)' ] || { echo Downloading '$(2)'; GOBIN='$(abspath $(dir $(1)))' $(GO) install '$(2)'; } +endef + +CONTROLLER ?= hack/tools/controller-gen +tools: tools/controller-gen +tools/controller-gen: + $(call go-get-tool,$(CONTROLLER),sigs.k8s.io/controller-tools/cmd/controller-gen@v0.16.4) + +ENVTEST ?= hack/tools/setup-envtest +tools: tools/setup-envtest +tools/setup-envtest: + $(call go-get-tool,$(ENVTEST),sigs.k8s.io/controller-runtime/tools/setup-envtest@latest) ##@ Release diff --git a/README.md b/README.md index 3e33c32f75..357734566e 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@

PGO: The Postgres Operator from Crunchy Data

- PGO: The Postgres Operator from Crunchy Data + PGO: The Postgres Operator from Crunchy Data

[![Go Report Card](https://goreportcard.com/badge/github.com/CrunchyData/postgres-operator)](https://goreportcard.com/report/github.com/CrunchyData/postgres-operator) @@ -18,9 +18,13 @@ With conveniences like cloning Postgres clusters to using rolling updates to rol PGO is developed with many years of production experience in automating Postgres management on Kubernetes, providing a seamless cloud native Postgres solution to keep your data always available. +Have questions or looking for help? [Join our Discord group](https://discord.gg/a7vWKG8Ec9). + # Installation -We recommend following our [Quickstart](https://access.crunchydata.com/documentation/postgres-operator/v5/quickstart/) for how to install and get up and running with PGO, the Postgres Operator from Crunchy Data. However, if you can't wait to try it out, here are some instructions to get Postgres up and running on Kubernetes: +Crunchy Data makes PGO available as the orchestration behind Crunchy Postgres for Kubernetes. Crunchy Postgres for Kubernetes is the integrated product that includes PostgreSQL, PGO and a collection of PostgreSQL tools and extensions that includes the various [open source components listed in the documentation](https://access.crunchydata.com/documentation/postgres-operator/latest/references/components). + +We recommend following our [Quickstart](https://access.crunchydata.com/documentation/postgres-operator/v5/quickstart/) for how to install and get up and running. However, if you can't wait to try it out, here are some instructions to get Postgres up and running on Kubernetes: 1. [Fork the Postgres Operator examples repository](https://github.com/CrunchyData/postgres-operator-examples/fork) and clone it to your host machine. For example: @@ -39,6 +43,8 @@ kubectl apply --server-side -k kustomize/install/default For more information please read the [Quickstart](https://access.crunchydata.com/documentation/postgres-operator/v5/quickstart/) and [Tutorial](https://access.crunchydata.com/documentation/postgres-operator/v5/tutorials/). +These installation instructions provide the steps necessary to install PGO along with Crunchy Data's Postgres distribution, Crunchy Postgres, as Crunchy Postgres for Kubernetes. In doing so the installation downloads a series of container images from Crunchy Data's Developer Portal. For more information on the use of container images downloaded from the Crunchy Data Developer Portal or other third party sources, please see 'License and Terms' below. The installation and use of PGO outside of the use of Crunchy Postgres for Kubernetes will require modifications of these installation instructions and creation of the necessary PostgreSQL and related containers. + # Cloud Native Postgres for Kubernetes PGO, the Postgres Operator from Crunchy Data, comes with all of the features you need for a complete cloud native Postgres experience on Kubernetes! @@ -179,22 +185,18 @@ In addition to the above, the geospatially enhanced PostgreSQL + PostGIS contain For more information about which versions of the PostgreSQL Operator include which components, please visit the [compatibility](https://access.crunchydata.com/documentation/postgres-operator/v5/references/components/) section of the documentation. -## Supported Platforms +## [Supported Platforms](https://access.crunchydata.com/documentation/postgres-operator/latest/overview/supported-platforms) PGO, the Postgres Operator from Crunchy Data, is tested on the following platforms: -- Kubernetes 1.24-1.27 -- OpenShift 4.10-4.13 +- Kubernetes +- OpenShift - Rancher - Google Kubernetes Engine (GKE), including Anthos - Amazon EKS - Microsoft AKS - VMware Tanzu -This list only includes the platforms that the Postgres Operator is specifically -tested on as part of the release process: PGO works on other Kubernetes -distributions as well. - # Contributing to the Project Want to contribute to the PostgreSQL Operator project? Great! We've put together @@ -208,7 +210,7 @@ Once you are ready to submit a Pull Request, please ensure you do the following: that you have followed the commit message format, added testing where appropriate, documented your changes, etc. 1. Open up a pull request based upon the guidelines. If you are adding a new - feature, please open up the pull request on the `master` branch. + feature, please open up the pull request on the `main` branch. 1. Please be as descriptive in your pull request as possible. If you are referencing an issue, please be sure to include the issue in your pull request @@ -216,7 +218,7 @@ Once you are ready to submit a Pull Request, please ensure you do the following: If you believe you have found a bug or have a detailed feature request, please open a GitHub issue and follow the guidelines for submitting a bug. -For general questions or community support, we welcome you to join our [community Discord](https://discord.gg/a7vWKG8Ec9) or the PGO project [community mailing list](https://groups.google.com/a/crunchydata.com/forum/#!forum/postgres-operator/join) and ask your questions there. +For general questions or community support, we welcome you to join our [community Discord](https://discord.gg/a7vWKG8Ec9) and ask your questions there. For other information, please visit the [Support](https://access.crunchydata.com/documentation/postgres-operator/latest/support/) section of the documentation. @@ -242,4 +244,10 @@ The image rollout can occur over the course of several days. To stay up-to-date on when releases are made available in the [Crunchy Data Developer Portal](https://www.crunchydata.com/developers), please sign up for the [Crunchy Data Developer Program Newsletter](https://www.crunchydata.com/developers#email). You can also [join the PGO project community discord](https://discord.gg/a7vWKG8Ec9) +# FAQs, License and Terms + +For more information regarding PGO, the Postgres Operator project from Crunchy Data, and Crunchy Postgres for Kubernetes, please see the [frequently asked questions](https://access.crunchydata.com/documentation/postgres-operator/latest/faq). + +The installation instructions provided in this repo are designed for the use of PGO along with Crunchy Data's Postgres distribution, Crunchy Postgres, as Crunchy Postgres for Kubernetes. The unmodified use of these installation instructions will result in downloading container images from Crunchy Data repositories - specifically the Crunchy Data Developer Portal. The use of container images downloaded from the Crunchy Data Developer Portal are subject to the [Crunchy Data Developer Program terms](https://www.crunchydata.com/developers/terms-of-use). + The PGO Postgres Operator project source code is available subject to the [Apache 2.0 license](LICENSE.md) with the PGO logo and branding assets covered by [our trademark guidelines](docs/static/logos/TRADEMARKS.md). diff --git a/bin/license_aggregator.sh b/bin/license_aggregator.sh index ee76031472..66f7284a97 100755 --- a/bin/license_aggregator.sh +++ b/bin/license_aggregator.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# Copyright 2021 - 2023 Crunchy Data Solutions, Inc. +# Copyright 2021 - 2024 Crunchy Data Solutions, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at diff --git a/build/crd/.gitignore b/build/crd/.gitignore deleted file mode 100644 index 8a65c2f7ef..0000000000 --- a/build/crd/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -/postgresclusters/generated/ -/pgupgrades/generated/ -/pgadmins/generated/ diff --git a/build/crd/pgadmins/kustomization.yaml b/build/crd/pgadmins/kustomization.yaml deleted file mode 100644 index 78888103ef..0000000000 --- a/build/crd/pgadmins/kustomization.yaml +++ /dev/null @@ -1,37 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -resources: -- generated/postgres-operator.crunchydata.com_pgadmins.yaml - -patches: -# Remove the zero status field included by controller-gen@v0.8.0. These zero -# values conflict with the CRD controller in Kubernetes before v1.22. -# - https://github.com/kubernetes-sigs/controller-tools/pull/630 -# - https://pr.k8s.io/100970 -- target: - group: apiextensions.k8s.io - version: v1 - kind: CustomResourceDefinition - name: pgadmins.postgres-operator.crunchydata.com - patch: |- - - op: remove - path: /status -- target: - group: apiextensions.k8s.io - version: v1 - kind: CustomResourceDefinition - name: pgadmins.postgres-operator.crunchydata.com - path: todos.yaml -- target: - group: apiextensions.k8s.io - version: v1 - kind: CustomResourceDefinition - name: pgadmins.postgres-operator.crunchydata.com -# The version below should match the version on the PostgresCluster CRD - patch: |- - - op: add - path: "/metadata/labels" - value: - app.kubernetes.io/name: pgo - app.kubernetes.io/version: latest diff --git a/build/crd/pgadmins/todos.yaml b/build/crd/pgadmins/todos.yaml deleted file mode 100644 index 285c688088..0000000000 --- a/build/crd/pgadmins/todos.yaml +++ /dev/null @@ -1,17 +0,0 @@ -- op: add - path: /work - value: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/imagePullSecrets/items/properties/name/description -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/config/properties/files/items/properties/configMap/properties/name/description -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/config/properties/files/items/properties/secret/properties/name/description -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/config/properties/ldapBindPassword/properties/name/description -- op: remove - path: /work diff --git a/build/crd/pgupgrades/kustomization.yaml b/build/crd/pgupgrades/kustomization.yaml deleted file mode 100644 index 67bca8fca8..0000000000 --- a/build/crd/pgupgrades/kustomization.yaml +++ /dev/null @@ -1,37 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -resources: -- generated/postgres-operator.crunchydata.com_pgupgrades.yaml - -patches: -# Remove the zero status field included by controller-gen@v0.8.0. These zero -# values conflict with the CRD controller in Kubernetes before v1.22. -# - https://github.com/kubernetes-sigs/controller-tools/pull/630 -# - https://pr.k8s.io/100970 -- target: - group: apiextensions.k8s.io - version: v1 - kind: CustomResourceDefinition - name: pgupgrades.postgres-operator.crunchydata.com - patch: |- - - op: remove - path: /status -- target: - group: apiextensions.k8s.io - version: v1 - kind: CustomResourceDefinition - name: pgupgrades.postgres-operator.crunchydata.com - path: todos.yaml -- target: - group: apiextensions.k8s.io - version: v1 - kind: CustomResourceDefinition - name: pgupgrades.postgres-operator.crunchydata.com -# The version below should match the version on the PostgresCluster CRD - patch: |- - - op: add - path: "/metadata/labels" - value: - app.kubernetes.io/name: pgo - app.kubernetes.io/version: latest diff --git a/build/crd/pgupgrades/todos.yaml b/build/crd/pgupgrades/todos.yaml deleted file mode 100644 index c0d2202859..0000000000 --- a/build/crd/pgupgrades/todos.yaml +++ /dev/null @@ -1,8 +0,0 @@ -- op: add - path: /work - value: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/imagePullSecrets/items/properties/name/description -- op: remove - path: /work diff --git a/build/crd/postgresclusters/condition.yaml b/build/crd/postgresclusters/condition.yaml deleted file mode 100644 index 577787b520..0000000000 --- a/build/crd/postgresclusters/condition.yaml +++ /dev/null @@ -1,24 +0,0 @@ -# PostgresCluster "v1beta1" is in "/spec/versions/0" - -- op: add - path: /spec/versions/0/schema/openAPIV3Schema/properties/status/properties/conditions/items/description - value: Condition contains details for one aspect of the current state of this API Resource. -- op: add - path: /spec/versions/0/schema/openAPIV3Schema/properties/status/properties/conditions/items/properties/type/description - value: type of condition in CamelCase. -- op: add - path: "/spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/instances/items/properties/containers/items\ - /properties/securityContext/properties/seccompProfile/properties/type/description" - value: >- - type indicates which kind of seccomp profile will be applied. Valid options are: - Localhost - a profile defined in a file on the node should be used. - RuntimeDefault - the container runtime default profile should be used. - Unconfined - no profile should be applied. -- op: add - path: "/spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/proxy/properties/pgBouncer/properties\ - /containers/items/properties/securityContext/properties/seccompProfile/properties/type/description" - value: >- - type indicates which kind of seccomp profile will be applied. Valid options are: - Localhost - a profile defined in a file on the node should be used. - RuntimeDefault - the container runtime default profile should be used. - Unconfined - no profile should be applied. diff --git a/build/crd/postgresclusters/kustomization.yaml b/build/crd/postgresclusters/kustomization.yaml deleted file mode 100644 index 4e790295c4..0000000000 --- a/build/crd/postgresclusters/kustomization.yaml +++ /dev/null @@ -1,42 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -resources: -- generated/postgres-operator.crunchydata.com_postgresclusters.yaml - -patchesJson6902: -- target: - group: apiextensions.k8s.io - version: v1 - kind: CustomResourceDefinition - name: postgresclusters.postgres-operator.crunchydata.com - path: condition.yaml -- target: - group: apiextensions.k8s.io - version: v1 - kind: CustomResourceDefinition - name: postgresclusters.postgres-operator.crunchydata.com - path: status.yaml -- target: - group: apiextensions.k8s.io - version: v1 - kind: CustomResourceDefinition - name: postgresclusters.postgres-operator.crunchydata.com - path: todos.yaml -- target: - group: apiextensions.k8s.io - version: v1 - kind: CustomResourceDefinition - name: postgresclusters.postgres-operator.crunchydata.com - path: validation.yaml -- target: - group: apiextensions.k8s.io - version: v1 - kind: CustomResourceDefinition - name: postgresclusters.postgres-operator.crunchydata.com - patch: |- - - op: add - path: "/metadata/labels" - value: - app.kubernetes.io/name: pgo - app.kubernetes.io/version: latest diff --git a/build/crd/postgresclusters/status.yaml b/build/crd/postgresclusters/status.yaml deleted file mode 100644 index eacd47582f..0000000000 --- a/build/crd/postgresclusters/status.yaml +++ /dev/null @@ -1,6 +0,0 @@ -# Remove the zero status field included by controller-gen@v0.8.0. These zero -# values conflict with the CRD controller in Kubernetes before v1.22. -# - https://github.com/kubernetes-sigs/controller-tools/pull/630 -# - https://pr.k8s.io/100970 -- op: remove - path: /status diff --git a/build/crd/postgresclusters/todos.yaml b/build/crd/postgresclusters/todos.yaml deleted file mode 100644 index daa05249a0..0000000000 --- a/build/crd/postgresclusters/todos.yaml +++ /dev/null @@ -1,89 +0,0 @@ -- op: add - path: /work - value: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/backups/properties/pgbackrest/properties/configuration/items/properties/configMap/properties/name/description -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/backups/properties/pgbackrest/properties/configuration/items/properties/secret/properties/name/description -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/backups/properties/pgbackrest/properties/repoHost/properties/sshConfigMap/properties/name/description -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/backups/properties/pgbackrest/properties/repoHost/properties/sshSecret/properties/name/description -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/config/properties/files/items/properties/configMap/properties/name/description -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/config/properties/files/items/properties/secret/properties/name/description -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/customReplicationTLSSecret/properties/name/description -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/customTLSSecret/properties/name/description -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/dataSource/properties/pgbackrest/properties/configuration/items/properties/configMap/properties/name/description -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/dataSource/properties/pgbackrest/properties/configuration/items/properties/secret/properties/name/description -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/imagePullSecrets/items/properties/name/description -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/instances/items/properties/containers/items/properties/env/items/properties/valueFrom/properties/configMapKeyRef/properties/name/description -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/instances/items/properties/containers/items/properties/env/items/properties/valueFrom/properties/secretKeyRef/properties/name/description -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/instances/items/properties/containers/items/properties/envFrom/items/properties/configMapRef/properties/name/description -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/instances/items/properties/containers/items/properties/envFrom/items/properties/secretRef/properties/name/description -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/monitoring/properties/pgmonitor/properties/exporter/properties/configuration/items/properties/configMap/properties/name/description -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/monitoring/properties/pgmonitor/properties/exporter/properties/configuration/items/properties/secret/properties/name/description -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/monitoring/properties/pgmonitor/properties/exporter/properties/customTLSSecret/properties/name/description -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/proxy/properties/pgBouncer/properties/config/properties/files/items/properties/configMap/properties/name/description -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/proxy/properties/pgBouncer/properties/config/properties/files/items/properties/secret/properties/name/description -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/proxy/properties/pgBouncer/properties/containers/items/properties/env/items/properties/valueFrom/properties/configMapKeyRef/properties/name/description -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/proxy/properties/pgBouncer/properties/containers/items/properties/env/items/properties/valueFrom/properties/secretKeyRef/properties/name/description -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/proxy/properties/pgBouncer/properties/containers/items/properties/envFrom/items/properties/configMapRef/properties/name/description -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/proxy/properties/pgBouncer/properties/containers/items/properties/envFrom/items/properties/secretRef/properties/name/description -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/proxy/properties/pgBouncer/properties/customTLSSecret/properties/name/description -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/userInterface/properties/pgAdmin/properties/config/properties/files/items/properties/configMap/properties/name/description -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/userInterface/properties/pgAdmin/properties/config/properties/files/items/properties/secret/properties/name/description -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/userInterface/properties/pgAdmin/properties/config/properties/ldapBindPassword/properties/name/description -- op: remove - path: /work diff --git a/build/crd/postgresclusters/validation.yaml b/build/crd/postgresclusters/validation.yaml deleted file mode 100644 index c619c4f11d..0000000000 --- a/build/crd/postgresclusters/validation.yaml +++ /dev/null @@ -1,76 +0,0 @@ -# PostgresCluster "v1beta1" is in "/spec/versions/0" - -# Make a temporary workspace. -- { op: add, path: /work, value: {} } - -# Containers should not run with a root GID. -# - https://kubernetes.io/docs/concepts/security/pod-security-standards/ -- op: add - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/supplementalGroups/items/minimum - value: 1 - -# Supplementary GIDs must fit within int32. -# - https://releases.k8s.io/v1.18.0/pkg/apis/core/validation/validation.go#L3659-L3663 -# - https://releases.k8s.io/v1.22.0/pkg/apis/core/validation/validation.go#L3923-L3927 -- op: add - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/supplementalGroups/items/maximum - value: 2147483647 # math.MaxInt32 - -# Make a copy of a standard PVC properties. -- op: copy - from: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/instances/items/properties/dataVolumeClaimSpec/properties - path: /work/pvcSpecProperties - -# Start an empty list when a standard PVC has no required fields. -- op: test - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/instances/items/properties/dataVolumeClaimSpec/required - value: null -- op: add - path: /work/pvcSpecRequired - value: [] - -# PersistentVolumeClaims must have an access mode. -# - https://releases.k8s.io/v1.18.0/pkg/apis/core/validation/validation.go#L1893-L1895 -# - https://releases.k8s.io/v1.22.0/pkg/apis/core/validation/validation.go#L2073-L2075 -- op: add - path: /work/pvcSpecRequired/- - value: accessModes -- op: add - path: /work/pvcSpecProperties/accessModes/minItems - value: 1 - -# PersistentVolumeClaims must have a storage request. -# - https://releases.k8s.io/v1.18.0/pkg/apis/core/validation/validation.go#L1904-L1911 -# - https://releases.k8s.io/v1.22.0/pkg/apis/core/validation/validation.go#L2101-L2108 -- op: add - path: /work/pvcSpecRequired/- - value: resources -- op: add - path: /work/pvcSpecProperties/resources/required - value: [requests] -- op: add - path: /work/pvcSpecProperties/resources/properties/requests/required - value: [storage] - -# Replace PVCs throughout the CRD. -- op: copy - from: /work/pvcSpecProperties - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/instances/items/properties/dataVolumeClaimSpec/properties -- op: copy - from: /work/pvcSpecRequired - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/instances/items/properties/dataVolumeClaimSpec/required -- op: copy - from: /work/pvcSpecProperties - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/instances/items/properties/walVolumeClaimSpec/properties -- op: copy - from: /work/pvcSpecRequired - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/instances/items/properties/walVolumeClaimSpec/required -- op: copy - from: /work/pvcSpecProperties - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/backups/properties/pgbackrest/properties/repos/items/properties/volume/properties/volumeClaimSpec/properties -- op: copy - from: /work/pvcSpecRequired - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/backups/properties/pgbackrest/properties/repos/items/properties/volume/properties/volumeClaimSpec/required - -# Remove the temporary workspace. -- { op: remove, path: /work } diff --git a/build/postgres-operator/Dockerfile b/build/postgres-operator/Dockerfile index a65ae04f22..69c5953761 100644 --- a/build/postgres-operator/Dockerfile +++ b/build/postgres-operator/Dockerfile @@ -1,4 +1,4 @@ -FROM registry.access.redhat.com/ubi8/ubi-micro +FROM registry.access.redhat.com/ubi8/ubi-minimal COPY licenses /licenses @@ -6,8 +6,7 @@ COPY bin/postgres-operator /usr/local/bin RUN mkdir -p /opt/crunchy/conf -COPY hack/tools/pgmonitor/postgres_exporter/common /opt/crunchy/conf -COPY hack/tools/pgmonitor/postgres_exporter/linux/queries_backrest.yml /opt/crunchy/conf +COPY hack/tools/queries /opt/crunchy/conf RUN chgrp -R 0 /opt/crunchy/conf && chmod -R g=u opt/crunchy/conf diff --git a/cmd/postgres-operator/main.go b/cmd/postgres-operator/main.go index 4a2ea7e56b..b2f8ae49b6 100644 --- a/cmd/postgres-operator/main.go +++ b/cmd/postgres-operator/main.go @@ -1,41 +1,38 @@ -package main - -/* -Copyright 2017 - 2023 Crunchy Data Solutions, Inc. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at +// Copyright 2017 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +package main import ( + "context" + "fmt" "net/http" "os" + "strconv" "strings" + "time" + "unicode" - "github.com/go-logr/logr" "go.opentelemetry.io/otel" + "k8s.io/apimachinery/pkg/util/validation" "k8s.io/client-go/discovery" "k8s.io/client-go/rest" - cruntime "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/healthz" "github.com/crunchydata/postgres-operator/internal/bridge" + "github.com/crunchydata/postgres-operator/internal/bridge/crunchybridgecluster" "github.com/crunchydata/postgres-operator/internal/controller/pgupgrade" "github.com/crunchydata/postgres-operator/internal/controller/postgrescluster" "github.com/crunchydata/postgres-operator/internal/controller/runtime" "github.com/crunchydata/postgres-operator/internal/controller/standalone_pgadmin" + "github.com/crunchydata/postgres-operator/internal/feature" + "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/logging" "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/internal/registration" "github.com/crunchydata/postgres-operator/internal/upgradecheck" - "github.com/crunchydata/postgres-operator/internal/util" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) var versionString string @@ -48,18 +45,83 @@ func assertNoError(err error) { } func initLogging() { - // Configure a singleton that treats logr.Logger.V(1) as logrus.DebugLevel. + // Configure a singleton that treats logging.Logger.V(1) as logrus.DebugLevel. var verbosity int if strings.EqualFold(os.Getenv("CRUNCHY_DEBUG"), "true") { verbosity = 1 } logging.SetLogSink(logging.Logrus(os.Stdout, versionString, 1, verbosity)) + + global := logging.FromContext(context.Background()) + runtime.SetLogger(global) +} + +//+kubebuilder:rbac:groups="coordination.k8s.io",resources="leases",verbs={get,create,update,watch} + +func initManager() (runtime.Options, error) { + log := logging.FromContext(context.Background()) + + options := runtime.Options{} + options.Cache.SyncPeriod = initialize.Pointer(time.Hour) + + options.HealthProbeBindAddress = ":8081" + + // Enable leader elections when configured with a valid Lease.coordination.k8s.io name. + // - https://docs.k8s.io/concepts/architecture/leases + // - https://releases.k8s.io/v1.30.0/pkg/apis/coordination/validation/validation.go#L26 + if lease := os.Getenv("PGO_CONTROLLER_LEASE_NAME"); len(lease) > 0 { + if errs := validation.IsDNS1123Subdomain(lease); len(errs) > 0 { + return options, fmt.Errorf("value for PGO_CONTROLLER_LEASE_NAME is invalid: %v", errs) + } + + options.LeaderElection = true + options.LeaderElectionID = lease + options.LeaderElectionNamespace = os.Getenv("PGO_NAMESPACE") + } + + // Check PGO_TARGET_NAMESPACE for backwards compatibility with + // "singlenamespace" installations + singlenamespace := strings.TrimSpace(os.Getenv("PGO_TARGET_NAMESPACE")) + + // Check PGO_TARGET_NAMESPACES for non-cluster-wide, multi-namespace + // installations + multinamespace := strings.TrimSpace(os.Getenv("PGO_TARGET_NAMESPACES")) + + // Initialize DefaultNamespaces if any target namespaces are set + if len(singlenamespace) > 0 || len(multinamespace) > 0 { + options.Cache.DefaultNamespaces = map[string]runtime.CacheConfig{} + } + + if len(singlenamespace) > 0 { + options.Cache.DefaultNamespaces[singlenamespace] = runtime.CacheConfig{} + } + + if len(multinamespace) > 0 { + for _, namespace := range strings.FieldsFunc(multinamespace, func(c rune) bool { + return c != '-' && !unicode.IsLetter(c) && !unicode.IsNumber(c) + }) { + options.Cache.DefaultNamespaces[namespace] = runtime.CacheConfig{} + } + } + + options.Controller.GroupKindConcurrency = map[string]int{ + "PostgresCluster." + v1beta1.GroupVersion.Group: 2, + } + + if s := os.Getenv("PGO_WORKERS"); s != "" { + if i, err := strconv.Atoi(s); err == nil && i > 0 { + options.Controller.GroupKindConcurrency["PostgresCluster."+v1beta1.GroupVersion.Group] = i + } else { + log.Error(err, "PGO_WORKERS must be a positive number") + } + } + + return options, nil } func main() { - // Set any supplied feature gates; panic on any unrecognized feature gate - err := util.AddAndSetFeatureGates(os.Getenv("PGO_FEATURE_GATES")) - assertNoError(err) + // This context is canceled by SIGINT, SIGTERM, or by calling shutdown. + ctx, shutdown := context.WithCancel(runtime.SignalHandler()) otelFlush, err := initOpenTelemetry() assertNoError(err) @@ -67,15 +129,12 @@ func main() { initLogging() - // create a context that will be used to stop all controllers on a SIGTERM or SIGINT - ctx := cruntime.SetupSignalHandler() log := logging.FromContext(ctx) log.V(1).Info("debug flag set to true") - log.Info("feature gates enabled", - "PGO_FEATURE_GATES", os.Getenv("PGO_FEATURE_GATES")) - - cruntime.SetLogger(log) + features := feature.NewGate() + assertNoError(features.Set(os.Getenv("PGO_FEATURE_GATES"))) + log.Info("feature gates enabled", "PGO_FEATURE_GATES", features.String()) cfg, err := runtime.GetConfig() assertNoError(err) @@ -87,7 +146,18 @@ func main() { // deprecation warnings when using an older version of a resource for backwards compatibility). rest.SetDefaultWarningHandler(rest.NoWarnings{}) - mgr, err := runtime.CreateRuntimeManager(os.Getenv("PGO_TARGET_NAMESPACE"), cfg, false) + options, err := initManager() + assertNoError(err) + + // Add to the Context that Manager passes to Reconciler.Start, Runnable.Start, + // and eventually Reconciler.Reconcile. + options.BaseContext = func() context.Context { + ctx := context.Background() + ctx = feature.NewContext(ctx, features) + return ctx + } + + mgr, err := runtime.NewManager(cfg, options) assertNoError(err) openshift := isOpenshift(cfg) @@ -95,10 +165,15 @@ func main() { log.Info("detected OpenShift environment") } + registrar, err := registration.NewRunner(os.Getenv("RSA_KEY"), os.Getenv("TOKEN_PATH"), shutdown) + assertNoError(err) + assertNoError(mgr.Add(registrar)) + token, _ := registrar.CheckToken() + // add all PostgreSQL Operator controllers to the runtime manager - addControllersToManager(mgr, openshift, log) + addControllersToManager(mgr, openshift, log, registrar) - if util.DefaultMutableFeatureGate.Enabled(util.BridgeIdentifiers) { + if features.Enabled(feature.BridgeIdentifiers) { constructor := func() *bridge.Client { client := bridge.NewClient(os.Getenv("PGO_BRIDGE_URL"), versionString) client.Transport = otelTransportWrapper()(http.DefaultTransport) @@ -113,12 +188,22 @@ func main() { if !upgradeCheckingDisabled { log.Info("upgrade checking enabled") // get the URL for the check for upgrades endpoint if set in the env - assertNoError(upgradecheck.ManagedScheduler(mgr, - openshift, os.Getenv("CHECK_FOR_UPGRADES_URL"), versionString)) + assertNoError( + upgradecheck.ManagedScheduler( + mgr, + openshift, + os.Getenv("CHECK_FOR_UPGRADES_URL"), + versionString, + token, + )) } else { log.Info("upgrade checking disabled") } + // Enable health probes + assertNoError(mgr.AddHealthzCheck("health", healthz.Ping)) + assertNoError(mgr.AddReadyzCheck("check", healthz.Ping)) + log.Info("starting controller runtime manager and will wait for signal to exit") assertNoError(mgr.Start(ctx)) @@ -127,18 +212,14 @@ func main() { // addControllersToManager adds all PostgreSQL Operator controllers to the provided controller // runtime manager. -func addControllersToManager(mgr manager.Manager, openshift bool, log logr.Logger) { +func addControllersToManager(mgr runtime.Manager, openshift bool, log logging.Logger, reg registration.Registration) { pgReconciler := &postgrescluster.Reconciler{ - Client: mgr.GetClient(), - IsOpenShift: openshift, - Owner: postgrescluster.ControllerName, - PGOVersion: versionString, - Recorder: mgr.GetEventRecorderFor(postgrescluster.ControllerName), - // TODO(tlandreth) Replace the contents of cpk_rsa_key.pub with a key from a - // Crunchy authorization server. - Registration: util.GetRegistration(os.Getenv("RSA_KEY"), os.Getenv("TOKEN_PATH"), log), - RegistrationURL: os.Getenv("REGISTRATION_URL"), - Tracer: otel.Tracer(postgrescluster.ControllerName), + Client: mgr.GetClient(), + IsOpenShift: openshift, + Owner: postgrescluster.ControllerName, + Recorder: mgr.GetEventRecorderFor(postgrescluster.ControllerName), + Registration: reg, + Tracer: otel.Tracer(postgrescluster.ControllerName), } if err := pgReconciler.SetupWithManager(mgr); err != nil { @@ -147,9 +228,10 @@ func addControllersToManager(mgr manager.Manager, openshift bool, log logr.Logge } upgradeReconciler := &pgupgrade.PGUpgradeReconciler{ - Client: mgr.GetClient(), - Owner: "pgupgrade-controller", - Scheme: mgr.GetScheme(), + Client: mgr.GetClient(), + Owner: "pgupgrade-controller", + Recorder: mgr.GetEventRecorderFor("pgupgrade-controller"), + Registration: reg, } if err := upgradeReconciler.SetupWithManager(mgr); err != nil { @@ -158,16 +240,35 @@ func addControllersToManager(mgr manager.Manager, openshift bool, log logr.Logge } pgAdminReconciler := &standalone_pgadmin.PGAdminReconciler{ - Client: mgr.GetClient(), - Owner: "pgadmin-controller", - Recorder: mgr.GetEventRecorderFor(naming.ControllerPGAdmin), - Scheme: mgr.GetScheme(), + Client: mgr.GetClient(), + Owner: "pgadmin-controller", + Recorder: mgr.GetEventRecorderFor(naming.ControllerPGAdmin), + IsOpenShift: openshift, } if err := pgAdminReconciler.SetupWithManager(mgr); err != nil { log.Error(err, "unable to create PGAdmin controller") os.Exit(1) } + + constructor := func() bridge.ClientInterface { + client := bridge.NewClient(os.Getenv("PGO_BRIDGE_URL"), versionString) + client.Transport = otelTransportWrapper()(http.DefaultTransport) + return client + } + + crunchyBridgeClusterReconciler := &crunchybridgecluster.CrunchyBridgeClusterReconciler{ + Client: mgr.GetClient(), + Owner: "crunchybridgecluster-controller", + // TODO(crunchybridgecluster): recorder? + // Recorder: mgr.GetEventRecorderFor(naming...), + NewClient: constructor, + } + + if err := crunchyBridgeClusterReconciler.SetupWithManager(mgr); err != nil { + log.Error(err, "unable to create CrunchyBridgeCluster controller") + os.Exit(1) + } } func isOpenshift(cfg *rest.Config) bool { diff --git a/cmd/postgres-operator/main_test.go b/cmd/postgres-operator/main_test.go new file mode 100644 index 0000000000..f369ce6bd3 --- /dev/null +++ b/cmd/postgres-operator/main_test.go @@ -0,0 +1,118 @@ +// Copyright 2017 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package main + +import ( + "reflect" + "testing" + "time" + + "gotest.tools/v3/assert" + "gotest.tools/v3/assert/cmp" +) + +func TestInitManager(t *testing.T) { + t.Run("Defaults", func(t *testing.T) { + options, err := initManager() + assert.NilError(t, err) + + if assert.Check(t, options.Cache.SyncPeriod != nil) { + assert.Equal(t, *options.Cache.SyncPeriod, time.Hour) + } + + assert.Assert(t, options.HealthProbeBindAddress == ":8081") + + assert.DeepEqual(t, options.Controller.GroupKindConcurrency, + map[string]int{ + "PostgresCluster.postgres-operator.crunchydata.com": 2, + }) + + assert.Assert(t, options.Cache.DefaultNamespaces == nil) + assert.Assert(t, options.LeaderElection == false) + + { + options.Cache.SyncPeriod = nil + options.Controller.GroupKindConcurrency = nil + options.HealthProbeBindAddress = "" + + assert.Assert(t, reflect.ValueOf(options).IsZero(), + "expected remaining fields to be unset:\n%+v", options) + } + }) + + t.Run("PGO_CONTROLLER_LEASE_NAME", func(t *testing.T) { + t.Setenv("PGO_NAMESPACE", "test-namespace") + + t.Run("Invalid", func(t *testing.T) { + t.Setenv("PGO_CONTROLLER_LEASE_NAME", "INVALID_NAME") + + options, err := initManager() + assert.ErrorContains(t, err, "PGO_CONTROLLER_LEASE_NAME") + assert.ErrorContains(t, err, "invalid") + + assert.Assert(t, options.LeaderElection == false) + assert.Equal(t, options.LeaderElectionNamespace, "") + }) + + t.Run("Valid", func(t *testing.T) { + t.Setenv("PGO_CONTROLLER_LEASE_NAME", "valid-name") + + options, err := initManager() + assert.NilError(t, err) + assert.Assert(t, options.LeaderElection == true) + assert.Equal(t, options.LeaderElectionNamespace, "test-namespace") + assert.Equal(t, options.LeaderElectionID, "valid-name") + }) + }) + + t.Run("PGO_TARGET_NAMESPACE", func(t *testing.T) { + t.Setenv("PGO_TARGET_NAMESPACE", "some-such") + + options, err := initManager() + assert.NilError(t, err) + assert.Assert(t, cmp.Len(options.Cache.DefaultNamespaces, 1), + "expected only one configured namespace") + + assert.Assert(t, cmp.Contains(options.Cache.DefaultNamespaces, "some-such")) + }) + + t.Run("PGO_TARGET_NAMESPACES", func(t *testing.T) { + t.Setenv("PGO_TARGET_NAMESPACES", "some-such,another-one") + + options, err := initManager() + assert.NilError(t, err) + assert.Assert(t, cmp.Len(options.Cache.DefaultNamespaces, 2), + "expect two configured namespaces") + + assert.Assert(t, cmp.Contains(options.Cache.DefaultNamespaces, "some-such")) + assert.Assert(t, cmp.Contains(options.Cache.DefaultNamespaces, "another-one")) + }) + + t.Run("PGO_WORKERS", func(t *testing.T) { + t.Run("Invalid", func(t *testing.T) { + for _, v := range []string{"-3", "0", "3.14"} { + t.Setenv("PGO_WORKERS", v) + + options, err := initManager() + assert.NilError(t, err) + assert.DeepEqual(t, options.Controller.GroupKindConcurrency, + map[string]int{ + "PostgresCluster.postgres-operator.crunchydata.com": 2, + }) + } + }) + + t.Run("Valid", func(t *testing.T) { + t.Setenv("PGO_WORKERS", "19") + + options, err := initManager() + assert.NilError(t, err) + assert.DeepEqual(t, options.Controller.GroupKindConcurrency, + map[string]int{ + "PostgresCluster.postgres-operator.crunchydata.com": 19, + }) + }) + }) +} diff --git a/cmd/postgres-operator/open_telemetry.go b/cmd/postgres-operator/open_telemetry.go index 5d53d039a7..2c9eedc135 100644 --- a/cmd/postgres-operator/open_telemetry.go +++ b/cmd/postgres-operator/open_telemetry.go @@ -1,19 +1,8 @@ -package main - -/* -Copyright 2021 - 2023 Crunchy Data Solutions, Inc. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +package main import ( "context" diff --git a/config/README.md b/config/README.md index 87708d16ff..73d2e59e6f 100644 --- a/config/README.md +++ b/config/README.md @@ -1,16 +1,7 @@ @@ -19,9 +10,6 @@ - The `default` target installs the operator in the `postgres-operator` namespace and configures it to manage resources in all namespaces. -- The `singlenamespace` target installs the operator in the `postgres-operator` - namespace and configures it to manage resources in that same namespace. - diff --git a/config/crd/bases/postgres-operator.crunchydata.com_crunchybridgeclusters.yaml b/config/crd/bases/postgres-operator.crunchydata.com_crunchybridgeclusters.yaml new file mode 100644 index 0000000000..82db84b466 --- /dev/null +++ b/config/crd/bases/postgres-operator.crunchydata.com_crunchybridgeclusters.yaml @@ -0,0 +1,290 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.4 + name: crunchybridgeclusters.postgres-operator.crunchydata.com +spec: + group: postgres-operator.crunchydata.com + names: + kind: CrunchyBridgeCluster + listKind: CrunchyBridgeClusterList + plural: crunchybridgeclusters + singular: crunchybridgecluster + scope: Namespaced + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + description: CrunchyBridgeCluster is the Schema for the crunchybridgeclusters + API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + CrunchyBridgeClusterSpec defines the desired state of CrunchyBridgeCluster + to be managed by Crunchy Data Bridge + properties: + clusterName: + description: The name of the cluster + maxLength: 50 + minLength: 5 + pattern: ^[A-Za-z][A-Za-z0-9\-_ ]*[A-Za-z0-9]$ + type: string + isHa: + description: |- + Whether the cluster is high availability, + meaning that it has a secondary it can fail over to quickly + in case the primary becomes unavailable. + type: boolean + isProtected: + description: |- + Whether the cluster is protected. Protected clusters can't be destroyed until + their protected flag is removed + type: boolean + majorVersion: + description: |- + The ID of the cluster's major Postgres version. + Currently Bridge offers 13-17 + maximum: 17 + minimum: 13 + type: integer + metadata: + description: Metadata contains metadata for custom resources + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + plan: + description: The ID of the cluster's plan. Determines instance, CPU, + and memory. + type: string + provider: + description: |- + The cloud provider where the cluster is located. + Currently Bridge offers aws, azure, and gcp only + enum: + - aws + - azure + - gcp + type: string + x-kubernetes-validations: + - message: immutable + rule: self == oldSelf + region: + description: The provider region where the cluster is located. + type: string + x-kubernetes-validations: + - message: immutable + rule: self == oldSelf + roles: + description: |- + Roles for which to create Secrets that contain their credentials which + are retrieved from the Bridge API. An empty list creates no role secrets. + Removing a role from this list does NOT drop the role nor revoke their + access, but it will delete that role's secret from the kube cluster. + items: + properties: + name: + description: |- + Name of the role within Crunchy Bridge. + More info: https://docs.crunchybridge.com/concepts/users + type: string + secretName: + description: The name of the Secret that will hold the role + credentials. + maxLength: 253 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + required: + - name + - secretName + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + secret: + description: The name of the secret containing the API key and team + id + type: string + storage: + anyOf: + - type: integer + - type: string + description: |- + The amount of storage available to the cluster in gigabytes. + The amount must be an integer, followed by Gi (gibibytes) or G (gigabytes) to match Kubernetes conventions. + If the amount is given in Gi, we round to the nearest G value. + The minimum value allowed by Bridge is 10 GB. + The maximum value allowed by Bridge is 65535 GB. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - clusterName + - isHa + - majorVersion + - plan + - provider + - region + - secret + - storage + type: object + status: + description: CrunchyBridgeClusterStatus defines the observed state of + CrunchyBridgeCluster + properties: + conditions: + description: conditions represent the observations of postgres cluster's + current state. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + host: + description: The Hostname of the postgres cluster in Bridge, provided + by Bridge API and null until then. + type: string + id: + description: The ID of the postgres cluster in Bridge, provided by + Bridge API and null until then. + type: string + isHa: + description: |- + Whether the cluster is high availability, meaning that it has a secondary it can fail + over to quickly in case the primary becomes unavailable. + type: boolean + isProtected: + description: |- + Whether the cluster is protected. Protected clusters can't be destroyed until + their protected flag is removed + type: boolean + majorVersion: + description: The cluster's major Postgres version. + type: integer + name: + description: The name of the cluster in Bridge. + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + on which the status was based. + format: int64 + minimum: 0 + type: integer + ongoingUpgrade: + description: The cluster upgrade as represented by Bridge + items: + properties: + flavor: + type: string + starting_from: + type: string + state: + type: string + required: + - flavor + - starting_from + - state + type: object + type: array + plan: + description: The ID of the cluster's plan. Determines instance, CPU, + and memory. + type: string + responses: + description: Most recent, raw responses from Bridge API + type: object + x-kubernetes-preserve-unknown-fields: true + state: + description: State of cluster in Bridge. + type: string + storage: + anyOf: + - type: integer + - type: string + description: The amount of storage available to the cluster. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/postgres-operator.crunchydata.com_pgadmins.yaml b/config/crd/bases/postgres-operator.crunchydata.com_pgadmins.yaml index f0dae5f9c3..da729cfaf2 100644 --- a/config/crd/bases/postgres-operator.crunchydata.com_pgadmins.yaml +++ b/config/crd/bases/postgres-operator.crunchydata.com_pgadmins.yaml @@ -1,12 +1,9 @@ +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.8.0 - creationTimestamp: null - labels: - app.kubernetes.io/name: pgo - app.kubernetes.io/version: latest + controller-gen.kubebuilder.io/version: v0.16.4 name: pgadmins.postgres-operator.crunchydata.com spec: group: postgres-operator.crunchydata.com @@ -20,17 +17,22 @@ spec: - name: v1beta1 schema: openAPIV3Schema: - description: PGAdmin is the Schema for the pgadmins API + description: PGAdmin is the Schema for the PGAdmin API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -38,30 +40,29 @@ spec: description: PGAdminSpec defines the desired state of PGAdmin properties: affinity: - description: 'Scheduling constraints of the PGAdmin pod. More info: - https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node' + description: |- + Scheduling constraints of the PGAdmin pod. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node properties: nodeAffinity: description: Describes node affinity scheduling rules for the pod. properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to - nodes that satisfy the affinity expressions specified by - this field, but it may choose a node that violates one or - more of the expressions. The node that is most preferred - is the one with the greatest sum of weights, i.e. for each - node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, - etc.), compute a sum by iterating through the elements of - this field and adding "weight" to the sum if the node matches - the corresponding matchExpressions; the node(s) with the - highest sum are the most preferred. + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. items: - description: An empty preferred scheduling term matches - all objects with implicit weight 0 (i.e. it's a no-op). - A null preferred scheduling term matches no objects (i.e. - is also a no-op). + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). properties: preference: description: A node selector term, associated with the @@ -71,75 +72,72 @@ spec: description: A list of node selector requirements by node's labels. items: - description: A node selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists, DoesNotExist. Gt, and - Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string values. If - the operator is In or NotIn, the values - array must be non-empty. If the operator - is Exists or DoesNotExist, the values array - must be empty. If the operator is Gt or - Lt, the values array must have a single - element, which will be interpreted as an - integer. This array is replaced during a - strategic merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. items: - description: A node selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists, DoesNotExist. Gt, and - Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string values. If - the operator is In or NotIn, the values - array must be non-empty. If the operator - is Exists or DoesNotExist, the values array - must be empty. If the operator is Gt or - Lt, the values array must have a single - element, which will be interpreted as an - integer. This array is replaced during a - strategic merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic weight: description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. @@ -150,116 +148,115 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this - field are not met at scheduling time, the pod will not be - scheduled onto the node. If the affinity requirements specified - by this field cease to be met at some point during pod execution - (e.g. due to an update), the system may or may not try to - eventually evict the pod from its node. + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. properties: nodeSelectorTerms: description: Required. A list of node selector terms. The terms are ORed. items: - description: A null or empty node selector term matches - no objects. The requirements of them are ANDed. The - TopologySelectorTerm type implements a subset of the - NodeSelectorTerm. + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. properties: matchExpressions: description: A list of node selector requirements by node's labels. items: - description: A node selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists, DoesNotExist. Gt, and - Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string values. If - the operator is In or NotIn, the values - array must be non-empty. If the operator - is Exists or DoesNotExist, the values array - must be empty. If the operator is Gt or - Lt, the values array must have a single - element, which will be interpreted as an - integer. This array is replaced during a - strategic merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. items: - description: A node selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists, DoesNotExist. Gt, and - Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string values. If - the operator is In or NotIn, the values - array must be non-empty. If the operator - is Exists or DoesNotExist, the values array - must be empty. If the operator is Gt or - Lt, the values array must have a single - element, which will be interpreted as an - integer. This array is replaced during a - strategic merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object + x-kubernetes-map-type: atomic type: object podAffinity: description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to - nodes that satisfy the affinity expressions specified by - this field, but it may choose a node that violates one or - more of the expressions. The node that is most preferred - is the one with the greatest sum of weights, i.e. for each - node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, - etc.), compute a sum by iterating through the elements of - this field and adding "weight" to the sum if the node has - pods which matches the corresponding podAffinityTerm; the + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm @@ -270,137 +267,161 @@ spec: with the corresponding weight. properties: labelSelector: - description: A label query over a set of resources, - in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, a key, - and an operator that relates the key and - values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and - DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. This - array is replaced during a strategic + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is - "In", and the values array contains only "value". - The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set of namespaces - that the term applies to. The term is applied - to the union of the namespaces selected by this - field and the ones listed in the namespaces field. - null selector and null or empty namespaces list - means "this pod's namespace". An empty selector - ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, a key, - and an operator that relates the key and - values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and - DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. This - array is replaced during a strategic + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is - "In", and the values array contains only "value". - The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static list - of namespace names that the term applies to. The - term is applied to the union of the namespaces - listed in this field and the ones selected by - namespaceSelector. null or empty namespaces list - and null namespaceSelector means "this pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods - matching the labelSelector in the specified namespaces, - where co-located is defined as running on a node - whose value of the label with key topologyKey - matches that of any node on which any of the selected - pods is running. Empty topologyKey is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object weight: - description: weight associated with matching the corresponding - podAffinityTerm, in the range 1-100. + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. format: int32 type: integer required: @@ -408,146 +429,177 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this - field are not met at scheduling time, the pod will not be - scheduled onto the node. If the affinity requirements specified - by this field cease to be met at some point during pod execution - (e.g. due to a pod label update), the system may or may - not try to eventually evict the pod from its node. When - there are multiple elements, the lists of nodes corresponding - to each podAffinityTerm are intersected, i.e. all terms - must be satisfied. + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. items: - description: Defines a set of pods (namely those matching - the labelSelector relative to the given namespace(s)) - that this pod should be co-located (affinity) or not co-located - (anti-affinity) with, where co-located is defined as running - on a node whose value of the label with key - matches that of any node on which a pod of the set of - pods is running + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running properties: labelSelector: - description: A label query over a set of resources, - in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. If the - operator is Exists or DoesNotExist, the - values array must be empty. This array is - replaced during a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is "In", - and the values array contains only "value". The - requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set of namespaces - that the term applies to. The term is applied to the - union of the namespaces selected by this field and - the ones listed in the namespaces field. null selector - and null or empty namespaces list means "this pod's - namespace". An empty selector ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. If the - operator is Exists or DoesNotExist, the - values array must be empty. This array is - replaced during a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is "In", - and the values array contains only "value". The - requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static list of namespace - names that the term applies to. The term is applied - to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. null or - empty namespaces list and null namespaceSelector means - "this pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where - co-located is defined as running on a node whose value - of the label with key topologyKey matches that of - any node on which any of the selected pods is running. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. Empty topologyKey is not allowed. type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object podAntiAffinity: description: Describes pod anti-affinity scheduling rules (e.g. @@ -555,16 +607,15 @@ spec: other pod(s)). properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to - nodes that satisfy the anti-affinity expressions specified - by this field, but it may choose a node that violates one - or more of the expressions. The node that is most preferred - is the one with the greatest sum of weights, i.e. for each - node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling anti-affinity expressions, - etc.), compute a sum by iterating through the elements of - this field and adding "weight" to the sum if the node has - pods which matches the corresponding podAffinityTerm; the + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm @@ -575,137 +626,161 @@ spec: with the corresponding weight. properties: labelSelector: - description: A label query over a set of resources, - in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, a key, - and an operator that relates the key and - values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and - DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. This - array is replaced during a strategic + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is - "In", and the values array contains only "value". - The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set of namespaces - that the term applies to. The term is applied - to the union of the namespaces selected by this - field and the ones listed in the namespaces field. - null selector and null or empty namespaces list - means "this pod's namespace". An empty selector - ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, a key, - and an operator that relates the key and - values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and - DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. This - array is replaced during a strategic + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is - "In", and the values array contains only "value". - The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static list - of namespace names that the term applies to. The - term is applied to the union of the namespaces - listed in this field and the ones selected by - namespaceSelector. null or empty namespaces list - and null namespaceSelector means "this pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods - matching the labelSelector in the specified namespaces, - where co-located is defined as running on a node - whose value of the label with key topologyKey - matches that of any node on which any of the selected - pods is running. Empty topologyKey is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object weight: - description: weight associated with matching the corresponding - podAffinityTerm, in the range 1-100. + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. format: int32 type: integer required: @@ -713,176 +788,324 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements specified by - this field are not met at scheduling time, the pod will - not be scheduled onto the node. If the anti-affinity requirements - specified by this field cease to be met at some point during - pod execution (e.g. due to a pod label update), the system - may or may not try to eventually evict the pod from its - node. When there are multiple elements, the lists of nodes - corresponding to each podAffinityTerm are intersected, i.e. - all terms must be satisfied. + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. items: - description: Defines a set of pods (namely those matching - the labelSelector relative to the given namespace(s)) - that this pod should be co-located (affinity) or not co-located - (anti-affinity) with, where co-located is defined as running - on a node whose value of the label with key - matches that of any node on which a pod of the set of - pods is running + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running properties: labelSelector: - description: A label query over a set of resources, - in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. If the - operator is Exists or DoesNotExist, the - values array must be empty. This array is - replaced during a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is "In", - and the values array contains only "value". The - requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set of namespaces - that the term applies to. The term is applied to the - union of the namespaces selected by this field and - the ones listed in the namespaces field. null selector - and null or empty namespaces list means "this pod's - namespace". An empty selector ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. If the - operator is Exists or DoesNotExist, the - values array must be empty. This array is - replaced during a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is "In", - and the values array contains only "value". The - requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static list of namespace - names that the term applies to. The term is applied - to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. null or - empty namespaces list and null namespaceSelector means - "this pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where - co-located is defined as running on a node whose value - of the label with key topologyKey matches that of - any node on which any of the selected pods is running. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. Empty topologyKey is not allowed. type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object type: object config: - description: Configuration settings for the pgAdmin process. Changes - to any of these values will be loaded without validation. Be careful, - as you may put pgAdmin into an unusable state. + description: |- + Configuration settings for the pgAdmin process. Changes to any of these + values will be loaded without validation. Be careful, as + you may put pgAdmin into an unusable state. properties: + configDatabaseURI: + description: |- + A Secret containing the value for the CONFIG_DATABASE_URI setting. + More info: https://www.pgadmin.org/docs/pgadmin4/latest/external_database.html + properties: + key: + description: The key of the secret to select from. Must be + a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must be + defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic files: - description: Files allows the user to mount projected volumes - into the pgAdmin container so that files can be referenced by - pgAdmin as needed. + description: |- + Files allows the user to mount projected volumes into the pgAdmin + container so that files can be referenced by pgAdmin as needed. items: description: Projection that may be projected along with other supported volume types properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume root to write + the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object configMap: description: configMap information about the configMap data to project properties: items: - description: items if unspecified, each key-value pair - in the Data field of the referenced ConfigMap will - be projected into the volume as a file whose name - is the key and content is the value. If specified, - the listed keys will be projected into the specified - paths, and unlisted keys will not be present. If a - key is specified which is not present in the ConfigMap, - the volume setup will error unless it is marked optional. - Paths must be relative and may not contain the '..' - path or start with '..'. + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. items: description: Maps a string key to a path within a volume. @@ -891,22 +1114,20 @@ spec: description: key is the key to project. type: string mode: - description: 'mode is Optional: mode bits used - to set permissions on this file. Must be an - octal value between 0000 and 0777 or a decimal - value between 0 and 511. YAML accepts both octal - and decimal values, JSON requires decimal values - for mode bits. If not specified, the volume - defaultMode will be used. This might be in conflict - with other options that affect the file mode, - like fsGroup, and the result can be other mode - bits set.' + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: path is the relative path of the - file to map the key to. May not be an absolute - path. May not contain the path element '..'. + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. May not start with the string '..'. type: string required: @@ -914,14 +1135,22 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: description: optional specify whether the ConfigMap or its keys must be defined type: boolean type: object + x-kubernetes-map-type: atomic downwardAPI: description: downwardAPI information about the downwardAPI data to project @@ -934,8 +1163,8 @@ spec: properties: fieldRef: description: 'Required: Selects a field of the - pod: only annotations, labels, name and namespace - are supported.' + pod: only annotations, labels, name, namespace + and uid are supported.' properties: apiVersion: description: Version of the schema the FieldPath @@ -948,17 +1177,15 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic mode: - description: 'Optional: mode bits used to set - permissions on this file, must be an octal value - between 0000 and 0777 or a decimal value between - 0 and 511. YAML accepts both octal and decimal - values, JSON requires decimal values for mode - bits. If not specified, the volume defaultMode - will be used. This might be in conflict with - other options that affect the file mode, like - fsGroup, and the result can be other mode bits - set.' + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: @@ -969,10 +1196,9 @@ spec: path must not start with ''..''' type: string resourceFieldRef: - description: 'Selects a resource of the container: - only resources limits and requests (limits.cpu, - limits.memory, requests.cpu and requests.memory) - are currently supported.' + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. properties: containerName: description: 'Container name: required for @@ -992,26 +1218,26 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic required: - path type: object type: array + x-kubernetes-list-type: atomic type: object secret: description: secret information about the secret data to project properties: items: - description: items if unspecified, each key-value pair - in the Data field of the referenced Secret will be - projected into the volume as a file whose name is - the key and content is the value. If specified, the - listed keys will be projected into the specified paths, - and unlisted keys will not be present. If a key is - specified which is not present in the Secret, the - volume setup will error unless it is marked optional. - Paths must be relative and may not contain the '..' - path or start with '..'. + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. items: description: Maps a string key to a path within a volume. @@ -1020,22 +1246,20 @@ spec: description: key is the key to project. type: string mode: - description: 'mode is Optional: mode bits used - to set permissions on this file. Must be an - octal value between 0000 and 0777 or a decimal - value between 0 and 511. YAML accepts both octal - and decimal values, JSON requires decimal values - for mode bits. If not specified, the volume - defaultMode will be used. This might be in conflict - with other options that affect the file mode, - like fsGroup, and the result can be other mode - bits set.' + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: path is the relative path of the - file to map the key to. May not be an absolute - path. May not contain the path element '..'. + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. May not start with the string '..'. type: string required: @@ -1043,55 +1267,76 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: description: optional field specify whether the Secret or its key must be defined type: boolean type: object + x-kubernetes-map-type: atomic serviceAccountToken: description: serviceAccountToken is information about the serviceAccountToken data to project properties: audience: - description: audience is the intended audience of the - token. A recipient of a token must identify itself - with an identifier specified in the audience of the - token, and otherwise should reject the token. The - audience defaults to the identifier of the apiserver. + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. type: string expirationSeconds: - description: expirationSeconds is the requested duration - of validity of the service account token. As the token - approaches expiration, the kubelet volume plugin will - proactively rotate the service account token. The - kubelet will start trying to rotate the token if the - token is older than 80 percent of its time to live - or if the token is older than 24 hours.Defaults to - 1 hour and must be at least 10 minutes. + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. format: int64 type: integer path: - description: path is the path relative to the mount - point of the file to project the token into. + description: |- + path is the path relative to the mount point of the file to project the + token into. type: string required: - path type: object type: object type: array + gunicorn: + description: |- + Settings for the gunicorn server. + More info: https://docs.gunicorn.org/en/latest/settings.html + type: object + x-kubernetes-preserve-unknown-fields: true ldapBindPassword: - description: 'A Secret containing the value for the LDAP_BIND_PASSWORD - setting. More info: https://www.pgadmin.org/docs/pgadmin4/latest/ldap.html' + description: |- + A Secret containing the value for the LDAP_BIND_PASSWORD setting. + More info: https://www.pgadmin.org/docs/pgadmin4/latest/ldap.html properties: key: description: The key of the secret to select from. Must be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: description: Specify whether the Secret or its key must be @@ -1100,37 +1345,44 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic settings: - description: 'Settings for the pgAdmin server process. Keys should - be uppercase and values must be constants. More info: https://www.pgadmin.org/docs/pgadmin4/latest/config_py.html' + description: |- + Settings for the pgAdmin server process. Keys should be uppercase and + values must be constants. + More info: https://www.pgadmin.org/docs/pgadmin4/latest/config_py.html type: object x-kubernetes-preserve-unknown-fields: true type: object dataVolumeClaimSpec: - description: 'Defines a PersistentVolumeClaim for pgAdmin data. More - info: https://kubernetes.io/docs/concepts/storage/persistent-volumes' + description: |- + Defines a PersistentVolumeClaim for pgAdmin data. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes properties: accessModes: - description: 'accessModes contains the desired access modes the - volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 items: type: string type: array + x-kubernetes-list-type: atomic dataSource: - description: 'dataSource field can be used to specify either: + description: |- + dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) - * An existing PVC (PersistentVolumeClaim) If the provisioner - or an external controller can support the specified data source, - it will create a new volume based on the contents of the specified - data source. If the AnyVolumeDataSource feature gate is enabled, - this field will always have the same contents as the DataSourceRef - field.' + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. properties: apiGroup: - description: APIGroup is the group for the resource being - referenced. If APIGroup is not specified, the specified - Kind must be in the core API group. For any other third-party - types, APIGroup is required. + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. type: string kind: description: Kind is the type of resource being referenced @@ -1142,32 +1394,38 @@ spec: - kind - name type: object + x-kubernetes-map-type: atomic dataSourceRef: - description: 'dataSourceRef specifies the object from which to - populate the volume with data, if a non-empty volume is desired. - This may be any local object from a non-empty API group (non - core object) or a PersistentVolumeClaim object. When this field - is specified, volume binding will only succeed if the type of - the specified object matches some installed volume populator - or dynamic provisioner. This field will replace the functionality - of the DataSource field and as such if both fields are non-empty, - they must have the same value. For backwards compatibility, - both fields (DataSource and DataSourceRef) will be set to the - same value automatically if one of them is empty and the other - is non-empty. There are two important differences between DataSource - and DataSourceRef: * While DataSource only allows two specific - types of objects, DataSourceRef allows any non-core object, - as well as PersistentVolumeClaim objects. * While DataSource - ignores disallowed values (dropping them), DataSourceRef preserves - all values, and generates an error if a disallowed value is - specified. (Beta) Using this field requires the AnyVolumeDataSource - feature gate to be enabled.' + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. properties: apiGroup: - description: APIGroup is the group for the resource being - referenced. If APIGroup is not specified, the specified - Kind must be in the core API group. For any other third-party - types, APIGroup is required. + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. type: string kind: description: Kind is the type of resource being referenced @@ -1175,16 +1433,23 @@ spec: name: description: Name is the name of resource being referenced type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string required: - kind - name type: object resources: - description: 'resources represents the minimum resources the volume - should have. If RecoverVolumeExpansionFailure feature is enabled - users are allowed to specify resource requirements that are - lower than previous value but must still be higher than capacity - recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources properties: limits: additionalProperties: @@ -1193,8 +1458,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -1203,10 +1469,11 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object selector: @@ -1217,51 +1484,69 @@ spec: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, NotIn, - Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string values. If - the operator is In or NotIn, the values array must - be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced - during a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. A - single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is "key", - the operator is "In", and the values array contains only - "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic storageClassName: - description: 'storageClassName is the name of the StorageClass - required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. type: string volumeMode: - description: volumeMode defines what type of volume is required - by the claim. Value of Filesystem is implied when not included - in claim spec. + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. type: string volumeName: description: volumeName is the binding reference to the PersistentVolume @@ -1272,25 +1557,36 @@ spec: description: The image name to use for pgAdmin instance. type: string imagePullPolicy: - description: 'ImagePullPolicy is used to determine when Kubernetes - will attempt to pull (download) container images. More info: https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy' + description: |- + ImagePullPolicy is used to determine when Kubernetes will attempt to + pull (download) container images. + More info: https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy enum: - Always - Never - IfNotPresent type: string imagePullSecrets: - description: The image pull secrets used to pull from a private registry. + description: |- + The image pull secrets used to pull from a private registry. Changing this value causes all running PGAdmin pods to restart. https://k8s.io/docs/tasks/configure-pod-container/pull-image-private-registry/ items: - description: LocalObjectReference contains enough information to - let you locate the referenced object inside the same namespace. + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. properties: name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string type: object + x-kubernetes-map-type: atomic type: array metadata: description: Metadata contains metadata for custom resources @@ -1305,12 +1601,39 @@ spec: type: object type: object priorityClassName: - description: 'Priority class name for the PGAdmin pod. Changing this - value causes PGAdmin pod to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/' + description: |- + Priority class name for the PGAdmin pod. Changing this + value causes PGAdmin pod to restart. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/ type: string resources: description: Resource requirements for the PGAdmin container. properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -1318,8 +1641,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources - allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -1328,116 +1652,188 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object serverGroups: - description: ServerGroups for importing PostgresClusters to pgAdmin. - To create a pgAdmin with no selectors, leave this field empty. A - pgAdmin created with no `ServerGroups` will not automatically add - any servers through discovery. PostgresClusters can still be added - manually. + description: |- + ServerGroups for importing PostgresClusters to pgAdmin. + To create a pgAdmin with no selectors, leave this field empty. + A pgAdmin created with no `ServerGroups` will not automatically + add any servers through discovery. PostgresClusters can still be + added manually. items: properties: name: - description: The name for the ServerGroup in pgAdmin. Must be - unique in the pgAdmin's ServerGroups since it becomes the - ServerGroup name in pgAdmin. + description: |- + The name for the ServerGroup in pgAdmin. + Must be unique in the pgAdmin's ServerGroups since it becomes the ServerGroup name in pgAdmin. + type: string + postgresClusterName: + description: PostgresClusterName selects one cluster to add + to pgAdmin by name. type: string postgresClusterSelector: - description: PostgresClusterSelector selects clusters to dynamically - add to pgAdmin by matching labels. An empty selector like - `{}` will select ALL clusters in the namespace. + description: |- + PostgresClusterSelector selects clusters to dynamically add to pgAdmin by matching labels. + An empty selector like `{}` will select ALL clusters in the namespace. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, NotIn, - Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string values. - If the operator is In or NotIn, the values array - must be non-empty. If the operator is Exists or - DoesNotExist, the values array must be empty. This - array is replaced during a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is - "key", the operator is "In", and the values array contains - only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic required: - name - - postgresClusterSelector type: object + x-kubernetes-validations: + - message: exactly one of "postgresClusterName" or "postgresClusterSelector" + is required + rule: '[has(self.postgresClusterName),has(self.postgresClusterSelector)].exists_one(x,x)' type: array + serviceName: + description: |- + ServiceName will be used as the name of a ClusterIP service pointing + to the pgAdmin pod and port. If the service already exists, PGO will + update the service. For more information about services reference + the Kubernetes and CrunchyData documentation. + https://kubernetes.io/docs/concepts/services-networking/service/ + type: string tolerations: - description: 'Tolerations of the PGAdmin pod. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration' + description: |- + Tolerations of the PGAdmin pod. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration items: - description: The pod this Toleration is attached to tolerates any - taint that matches the triple using the matching - operator . + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . properties: effect: - description: Effect indicates the taint effect to match. Empty - means match all taint effects. When specified, allowed values - are NoSchedule, PreferNoSchedule and NoExecute. + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. type: string key: - description: Key is the taint key that the toleration applies - to. Empty means match all taint keys. If the key is empty, - operator must be Exists; this combination means to match all - values and all keys. + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. type: string operator: - description: Operator represents a key's relationship to the - value. Valid operators are Exists and Equal. Defaults to Equal. - Exists is equivalent to wildcard for value, so that a pod - can tolerate all taints of a particular category. + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. type: string tolerationSeconds: - description: TolerationSeconds represents the period of time - the toleration (which must be of effect NoExecute, otherwise - this field is ignored) tolerates the taint. By default, it - is not set, which means tolerate the taint forever (do not - evict). Zero and negative values will be treated as 0 (evict - immediately) by the system. + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. format: int64 type: integer value: - description: Value is the taint value the toleration matches - to. If the operator is Exists, the value should be empty, - otherwise just a regular string. + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + users: + description: |- + pgAdmin users that are managed via the PGAdmin spec. Users can still + be added via the pgAdmin GUI, but those users will not show up here. + items: + properties: + passwordRef: + description: A reference to the secret that holds the user's + password. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + role: + description: |- + Role determines whether the user has admin privileges or not. + Defaults to User. Valid options are Administrator and User. + enum: + - Administrator + - User type: string + username: + description: |- + The username for User in pgAdmin. + Must be unique in the pgAdmin's users list. + type: string + required: + - passwordRef + - username type: object type: array + x-kubernetes-list-map-keys: + - username + x-kubernetes-list-type: map required: - dataVolumeClaimSpec type: object @@ -1445,47 +1841,39 @@ spec: description: PGAdminStatus defines the observed state of PGAdmin properties: conditions: - description: 'conditions represent the observations of pgadmin''s - current state. Known .status.conditions.type are: "PersistentVolumeResizing", - "Progressing", "ProxyAvailable"' + description: |- + conditions represent the observations of pgAdmin's current state. + Known .status.conditions.type is: "PersistentVolumeResizing" items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - type FooStatus struct{ // Represents the observations of a foo's - current state. // Known .status.conditions.type are: \"Available\", - \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge - // +listType=map // +listMapKey=type Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. format: int64 minimum: 0 type: integer reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. This field may not be empty. maxLength: 1024 minLength: 1 @@ -1500,10 +1888,6 @@ spec: type: string type: description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -1518,6 +1902,14 @@ spec: x-kubernetes-list-map-keys: - type x-kubernetes-list-type: map + imageSHA: + description: ImageSHA represents the image SHA for the container running + pgAdmin. + type: string + majorVersion: + description: MajorVersion represents the major version of the running + pgAdmin. + type: integer observedGeneration: description: observedGeneration represents the .metadata.generation on which the status was based. diff --git a/config/crd/bases/postgres-operator.crunchydata.com_pgupgrades.yaml b/config/crd/bases/postgres-operator.crunchydata.com_pgupgrades.yaml index b35c209b37..4ae831cfc7 100644 --- a/config/crd/bases/postgres-operator.crunchydata.com_pgupgrades.yaml +++ b/config/crd/bases/postgres-operator.crunchydata.com_pgupgrades.yaml @@ -1,12 +1,9 @@ +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.8.0 - creationTimestamp: null - labels: - app.kubernetes.io/name: pgo - app.kubernetes.io/version: latest + controller-gen.kubebuilder.io/version: v0.16.4 name: pgupgrades.postgres-operator.crunchydata.com spec: group: postgres-operator.crunchydata.com @@ -23,14 +20,19 @@ spec: description: PGUpgrade is the Schema for the pgupgrades API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -38,30 +40,29 @@ spec: description: PGUpgradeSpec defines the desired state of PGUpgrade properties: affinity: - description: 'Scheduling constraints of the PGUpgrade pod. More info: - https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node' + description: |- + Scheduling constraints of the PGUpgrade pod. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node properties: nodeAffinity: description: Describes node affinity scheduling rules for the pod. properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to - nodes that satisfy the affinity expressions specified by - this field, but it may choose a node that violates one or - more of the expressions. The node that is most preferred - is the one with the greatest sum of weights, i.e. for each - node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, - etc.), compute a sum by iterating through the elements of - this field and adding "weight" to the sum if the node matches - the corresponding matchExpressions; the node(s) with the - highest sum are the most preferred. + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. items: - description: An empty preferred scheduling term matches - all objects with implicit weight 0 (i.e. it's a no-op). - A null preferred scheduling term matches no objects (i.e. - is also a no-op). + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). properties: preference: description: A node selector term, associated with the @@ -71,75 +72,72 @@ spec: description: A list of node selector requirements by node's labels. items: - description: A node selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists, DoesNotExist. Gt, and - Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string values. If - the operator is In or NotIn, the values - array must be non-empty. If the operator - is Exists or DoesNotExist, the values array - must be empty. If the operator is Gt or - Lt, the values array must have a single - element, which will be interpreted as an - integer. This array is replaced during a - strategic merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. items: - description: A node selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists, DoesNotExist. Gt, and - Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string values. If - the operator is In or NotIn, the values - array must be non-empty. If the operator - is Exists or DoesNotExist, the values array - must be empty. If the operator is Gt or - Lt, the values array must have a single - element, which will be interpreted as an - integer. This array is replaced during a - strategic merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic weight: description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. @@ -150,116 +148,115 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this - field are not met at scheduling time, the pod will not be - scheduled onto the node. If the affinity requirements specified - by this field cease to be met at some point during pod execution - (e.g. due to an update), the system may or may not try to - eventually evict the pod from its node. + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. properties: nodeSelectorTerms: description: Required. A list of node selector terms. The terms are ORed. items: - description: A null or empty node selector term matches - no objects. The requirements of them are ANDed. The - TopologySelectorTerm type implements a subset of the - NodeSelectorTerm. + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. properties: matchExpressions: description: A list of node selector requirements by node's labels. items: - description: A node selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists, DoesNotExist. Gt, and - Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string values. If - the operator is In or NotIn, the values - array must be non-empty. If the operator - is Exists or DoesNotExist, the values array - must be empty. If the operator is Gt or - Lt, the values array must have a single - element, which will be interpreted as an - integer. This array is replaced during a - strategic merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. items: - description: A node selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists, DoesNotExist. Gt, and - Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string values. If - the operator is In or NotIn, the values - array must be non-empty. If the operator - is Exists or DoesNotExist, the values array - must be empty. If the operator is Gt or - Lt, the values array must have a single - element, which will be interpreted as an - integer. This array is replaced during a - strategic merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object + x-kubernetes-map-type: atomic type: object podAffinity: description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to - nodes that satisfy the affinity expressions specified by - this field, but it may choose a node that violates one or - more of the expressions. The node that is most preferred - is the one with the greatest sum of weights, i.e. for each - node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, - etc.), compute a sum by iterating through the elements of - this field and adding "weight" to the sum if the node has - pods which matches the corresponding podAffinityTerm; the + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm @@ -270,137 +267,161 @@ spec: with the corresponding weight. properties: labelSelector: - description: A label query over a set of resources, - in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, a key, - and an operator that relates the key and - values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and - DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. This - array is replaced during a strategic + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is - "In", and the values array contains only "value". - The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set of namespaces - that the term applies to. The term is applied - to the union of the namespaces selected by this - field and the ones listed in the namespaces field. - null selector and null or empty namespaces list - means "this pod's namespace". An empty selector - ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, a key, - and an operator that relates the key and - values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and - DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. This - array is replaced during a strategic + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is - "In", and the values array contains only "value". - The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static list - of namespace names that the term applies to. The - term is applied to the union of the namespaces - listed in this field and the ones selected by - namespaceSelector. null or empty namespaces list - and null namespaceSelector means "this pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods - matching the labelSelector in the specified namespaces, - where co-located is defined as running on a node - whose value of the label with key topologyKey - matches that of any node on which any of the selected - pods is running. Empty topologyKey is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object weight: - description: weight associated with matching the corresponding - podAffinityTerm, in the range 1-100. + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. format: int32 type: integer required: @@ -408,146 +429,177 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this - field are not met at scheduling time, the pod will not be - scheduled onto the node. If the affinity requirements specified - by this field cease to be met at some point during pod execution - (e.g. due to a pod label update), the system may or may - not try to eventually evict the pod from its node. When - there are multiple elements, the lists of nodes corresponding - to each podAffinityTerm are intersected, i.e. all terms - must be satisfied. + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. items: - description: Defines a set of pods (namely those matching - the labelSelector relative to the given namespace(s)) - that this pod should be co-located (affinity) or not co-located - (anti-affinity) with, where co-located is defined as running - on a node whose value of the label with key - matches that of any node on which a pod of the set of - pods is running + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running properties: labelSelector: - description: A label query over a set of resources, - in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. If the - operator is Exists or DoesNotExist, the - values array must be empty. This array is - replaced during a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is "In", - and the values array contains only "value". The - requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set of namespaces - that the term applies to. The term is applied to the - union of the namespaces selected by this field and - the ones listed in the namespaces field. null selector - and null or empty namespaces list means "this pod's - namespace". An empty selector ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. If the - operator is Exists or DoesNotExist, the - values array must be empty. This array is - replaced during a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is "In", - and the values array contains only "value". The - requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static list of namespace - names that the term applies to. The term is applied - to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. null or - empty namespaces list and null namespaceSelector means - "this pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where - co-located is defined as running on a node whose value - of the label with key topologyKey matches that of - any node on which any of the selected pods is running. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. Empty topologyKey is not allowed. type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object podAntiAffinity: description: Describes pod anti-affinity scheduling rules (e.g. @@ -555,16 +607,15 @@ spec: other pod(s)). properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to - nodes that satisfy the anti-affinity expressions specified - by this field, but it may choose a node that violates one - or more of the expressions. The node that is most preferred - is the one with the greatest sum of weights, i.e. for each - node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling anti-affinity expressions, - etc.), compute a sum by iterating through the elements of - this field and adding "weight" to the sum if the node has - pods which matches the corresponding podAffinityTerm; the + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm @@ -575,137 +626,161 @@ spec: with the corresponding weight. properties: labelSelector: - description: A label query over a set of resources, - in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, a key, - and an operator that relates the key and - values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and - DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. This - array is replaced during a strategic + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is - "In", and the values array contains only "value". - The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set of namespaces - that the term applies to. The term is applied - to the union of the namespaces selected by this - field and the ones listed in the namespaces field. - null selector and null or empty namespaces list - means "this pod's namespace". An empty selector - ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, a key, - and an operator that relates the key and - values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and - DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. This - array is replaced during a strategic + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is - "In", and the values array contains only "value". - The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static list - of namespace names that the term applies to. The - term is applied to the union of the namespaces - listed in this field and the ones selected by - namespaceSelector. null or empty namespaces list - and null namespaceSelector means "this pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods - matching the labelSelector in the specified namespaces, - where co-located is defined as running on a node - whose value of the label with key topologyKey - matches that of any node on which any of the selected - pods is running. Empty topologyKey is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object weight: - description: weight associated with matching the corresponding - podAffinityTerm, in the range 1-100. + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. format: int32 type: integer required: @@ -713,176 +788,218 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements specified by - this field are not met at scheduling time, the pod will - not be scheduled onto the node. If the anti-affinity requirements - specified by this field cease to be met at some point during - pod execution (e.g. due to a pod label update), the system - may or may not try to eventually evict the pod from its - node. When there are multiple elements, the lists of nodes - corresponding to each podAffinityTerm are intersected, i.e. - all terms must be satisfied. + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. items: - description: Defines a set of pods (namely those matching - the labelSelector relative to the given namespace(s)) - that this pod should be co-located (affinity) or not co-located - (anti-affinity) with, where co-located is defined as running - on a node whose value of the label with key - matches that of any node on which a pod of the set of - pods is running + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running properties: labelSelector: - description: A label query over a set of resources, - in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. If the - operator is Exists or DoesNotExist, the - values array must be empty. This array is - replaced during a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is "In", - and the values array contains only "value". The - requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set of namespaces - that the term applies to. The term is applied to the - union of the namespaces selected by this field and - the ones listed in the namespaces field. null selector - and null or empty namespaces list means "this pod's - namespace". An empty selector ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. If the - operator is Exists or DoesNotExist, the - values array must be empty. This array is - replaced during a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is "In", - and the values array contains only "value". The - requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static list of namespace - names that the term applies to. The term is applied - to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. null or - empty namespaces list and null namespaceSelector means - "this pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where - co-located is defined as running on a node whose value - of the label with key topologyKey matches that of - any node on which any of the selected pods is running. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. Empty topologyKey is not allowed. type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object type: object fromPostgresVersion: description: The major version of PostgreSQL before the upgrade. - maximum: 16 - minimum: 10 + maximum: 17 + minimum: 11 type: integer image: description: The image name to use for major PostgreSQL upgrades. type: string imagePullPolicy: - description: 'ImagePullPolicy is used to determine when Kubernetes - will attempt to pull (download) container images. More info: https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy' + description: |- + ImagePullPolicy is used to determine when Kubernetes will attempt to + pull (download) container images. + More info: https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy enum: - Always - Never - IfNotPresent type: string imagePullSecrets: - description: The image pull secrets used to pull from a private registry. + description: |- + The image pull secrets used to pull from a private registry. Changing this value causes all running PGUpgrade pods to restart. https://k8s.io/docs/tasks/configure-pod-container/pull-image-private-registry/ items: - description: LocalObjectReference contains enough information to - let you locate the referenced object inside the same namespace. + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. properties: name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string type: object + x-kubernetes-map-type: atomic type: array metadata: description: Metadata contains metadata for custom resources @@ -901,12 +1018,39 @@ spec: minLength: 1 type: string priorityClassName: - description: 'Priority class name for the PGUpgrade pod. Changing - this value causes PGUpgrade pod to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/' + description: |- + Priority class name for the PGUpgrade pod. Changing this + value causes PGUpgrade pod to restart. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/ type: string resources: description: Resource requirements for the PGUpgrade container. properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -914,8 +1058,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources - allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -924,59 +1069,61 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object toPostgresImage: - description: The image name to use for PostgreSQL containers after - upgrade. When omitted, the value comes from an operator environment - variable. + description: |- + The image name to use for PostgreSQL containers after upgrade. + When omitted, the value comes from an operator environment variable. type: string toPostgresVersion: description: The major version of PostgreSQL to be upgraded to. - maximum: 16 - minimum: 10 + maximum: 17 + minimum: 11 type: integer tolerations: - description: 'Tolerations of the PGUpgrade pod. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration' + description: |- + Tolerations of the PGUpgrade pod. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration items: - description: The pod this Toleration is attached to tolerates any - taint that matches the triple using the matching - operator . + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . properties: effect: - description: Effect indicates the taint effect to match. Empty - means match all taint effects. When specified, allowed values - are NoSchedule, PreferNoSchedule and NoExecute. + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. type: string key: - description: Key is the taint key that the toleration applies - to. Empty means match all taint keys. If the key is empty, - operator must be Exists; this combination means to match all - values and all keys. + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. type: string operator: - description: Operator represents a key's relationship to the - value. Valid operators are Exists and Equal. Defaults to Equal. - Exists is equivalent to wildcard for value, so that a pod - can tolerate all taints of a particular category. + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. type: string tolerationSeconds: - description: TolerationSeconds represents the period of time - the toleration (which must be of effect NoExecute, otherwise - this field is ignored) tolerates the taint. By default, it - is not set, which means tolerate the taint forever (do not - evict). Zero and negative values will be treated as 0 (evict - immediately) by the system. + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. format: int64 type: integer value: - description: Value is the taint value the toleration matches - to. If the operator is Exists, the value should be empty, - otherwise just a regular string. + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. type: string type: object type: array @@ -992,43 +1139,35 @@ spec: description: conditions represent the observations of PGUpgrade's current state. items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - type FooStatus struct{ // Represents the observations of a foo's - current state. // Known .status.conditions.type are: \"Available\", - \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge - // +listType=map // +listMapKey=type Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. format: int64 minimum: 0 type: integer reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. This field may not be empty. maxLength: 1024 minLength: 1 @@ -1043,10 +1182,6 @@ spec: type: string type: description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string diff --git a/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml b/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml index 2e66275521..6f9dd40f02 100644 --- a/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml +++ b/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml @@ -1,12 +1,9 @@ +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.8.0 - creationTimestamp: null - labels: - app.kubernetes.io/name: pgo - app.kubernetes.io/version: latest + controller-gen.kubebuilder.io/version: v0.16.4 name: postgresclusters.postgres-operator.crunchydata.com spec: group: postgres-operator.crunchydata.com @@ -23,14 +20,19 @@ spec: description: PostgresCluster is the Schema for the postgresclusters API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -44,30 +46,121 @@ spec: description: pgBackRest archive configuration properties: configuration: - description: 'Projected volumes containing custom pgBackRest - configuration. These files are mounted under "/etc/pgbackrest/conf.d" - alongside any pgBackRest configuration generated by the - PostgreSQL Operator: https://pgbackrest.org/configuration.html' + description: |- + Projected volumes containing custom pgBackRest configuration. These files are mounted + under "/etc/pgbackrest/conf.d" alongside any pgBackRest configuration generated by the + PostgreSQL Operator: + https://pgbackrest.org/configuration.html items: description: Projection that may be projected along with other supported volume types properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume root + to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object configMap: description: configMap information about the configMap data to project properties: items: - description: items if unspecified, each key-value - pair in the Data field of the referenced ConfigMap - will be projected into the volume as a file whose - name is the key and content is the value. If specified, - the listed keys will be projected into the specified - paths, and unlisted keys will not be present. - If a key is specified which is not present in - the ConfigMap, the volume setup will error unless - it is marked optional. Paths must be relative - and may not contain the '..' path or start with - '..'. + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. items: description: Maps a string key to a path within a volume. @@ -76,39 +169,43 @@ spec: description: key is the key to project. type: string mode: - description: 'mode is Optional: mode bits - used to set permissions on this file. Must - be an octal value between 0000 and 0777 - or a decimal value between 0 and 511. YAML - accepts both octal and decimal values, JSON - requires decimal values for mode bits. If - not specified, the volume defaultMode will - be used. This might be in conflict with - other options that affect the file mode, - like fsGroup, and the result can be other - mode bits set.' + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: path is the relative path of - the file to map the key to. May not be an - absolute path. May not contain the path - element '..'. May not start with the string - '..'. + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. type: string required: - key - path type: object type: array + x-kubernetes-list-type: atomic name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: description: optional specify whether the ConfigMap or its keys must be defined type: boolean type: object + x-kubernetes-map-type: atomic downwardAPI: description: downwardAPI information about the downwardAPI data to project @@ -123,8 +220,8 @@ spec: properties: fieldRef: description: 'Required: Selects a field of - the pod: only annotations, labels, name - and namespace are supported.' + the pod: only annotations, labels, name, + namespace and uid are supported.' properties: apiVersion: description: Version of the schema the @@ -138,17 +235,15 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic mode: - description: 'Optional: mode bits used to - set permissions on this file, must be an - octal value between 0000 and 0777 or a decimal - value between 0 and 511. YAML accepts both - octal and decimal values, JSON requires - decimal values for mode bits. If not specified, - the volume defaultMode will be used. This - might be in conflict with other options - that affect the file mode, like fsGroup, - and the result can be other mode bits set.' + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: @@ -159,10 +254,9 @@ spec: the relative path must not start with ''..''' type: string resourceFieldRef: - description: 'Selects a resource of the container: - only resources limits and requests (limits.cpu, - limits.memory, requests.cpu and requests.memory) - are currently supported.' + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. properties: containerName: description: 'Container name: required @@ -183,27 +277,26 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic required: - path type: object type: array + x-kubernetes-list-type: atomic type: object secret: description: secret information about the secret data to project properties: items: - description: items if unspecified, each key-value - pair in the Data field of the referenced Secret - will be projected into the volume as a file whose - name is the key and content is the value. If specified, - the listed keys will be projected into the specified - paths, and unlisted keys will not be present. - If a key is specified which is not present in - the Secret, the volume setup will error unless - it is marked optional. Paths must be relative - and may not contain the '..' path or start with - '..'. + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. items: description: Maps a string key to a path within a volume. @@ -212,66 +305,68 @@ spec: description: key is the key to project. type: string mode: - description: 'mode is Optional: mode bits - used to set permissions on this file. Must - be an octal value between 0000 and 0777 - or a decimal value between 0 and 511. YAML - accepts both octal and decimal values, JSON - requires decimal values for mode bits. If - not specified, the volume defaultMode will - be used. This might be in conflict with - other options that affect the file mode, - like fsGroup, and the result can be other - mode bits set.' + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: path is the relative path of - the file to map the key to. May not be an - absolute path. May not contain the path - element '..'. May not start with the string - '..'. + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. type: string required: - key - path type: object type: array + x-kubernetes-list-type: atomic name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: description: optional field specify whether the Secret or its key must be defined type: boolean type: object + x-kubernetes-map-type: atomic serviceAccountToken: description: serviceAccountToken is information about the serviceAccountToken data to project properties: audience: - description: audience is the intended audience of - the token. A recipient of a token must identify - itself with an identifier specified in the audience - of the token, and otherwise should reject the - token. The audience defaults to the identifier - of the apiserver. + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. type: string expirationSeconds: - description: expirationSeconds is the requested - duration of validity of the service account token. - As the token approaches expiration, the kubelet - volume plugin will proactively rotate the service - account token. The kubelet will start trying to - rotate the token if the token is older than 80 - percent of its time to live or if the token is - older than 24 hours.Defaults to 1 hour and must - be at least 10 minutes. + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. format: int64 type: integer path: - description: path is the path relative to the mount - point of the file to project the token into. + description: |- + path is the path relative to the mount point of the file to project the + token into. type: string required: - path @@ -281,48 +376,46 @@ spec: global: additionalProperties: type: string - description: 'Global pgBackRest configuration settings. These - settings are included in the "global" section of the pgBackRest - configuration generated by the PostgreSQL Operator, and - then mounted under "/etc/pgbackrest/conf.d": https://pgbackrest.org/configuration.html' + description: |- + Global pgBackRest configuration settings. These settings are included in the "global" + section of the pgBackRest configuration generated by the PostgreSQL Operator, and then + mounted under "/etc/pgbackrest/conf.d": + https://pgbackrest.org/configuration.html type: object image: - description: The image name to use for pgBackRest containers. Utilized - to run pgBackRest repository hosts and backups. The image - may also be set using the RELATED_IMAGE_PGBACKREST environment - variable + description: |- + The image name to use for pgBackRest containers. Utilized to run + pgBackRest repository hosts and backups. The image may also be set using + the RELATED_IMAGE_PGBACKREST environment variable type: string jobs: description: Jobs field allows configuration for all backup jobs properties: affinity: - description: 'Scheduling constraints of pgBackRest backup - Job pods. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node' + description: |- + Scheduling constraints of pgBackRest backup Job pods. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node properties: nodeAffinity: description: Describes node affinity scheduling rules for the pod. properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule - pods to nodes that satisfy the affinity expressions - specified by this field, but it may choose a - node that violates one or more of the expressions. - The node that is most preferred is the one with - the greatest sum of weights, i.e. for each node - that meets all of the scheduling requirements - (resource request, requiredDuringScheduling - affinity expressions, etc.), compute a sum by - iterating through the elements of this field - and adding "weight" to the sum if the node matches - the corresponding matchExpressions; the node(s) - with the highest sum are the most preferred. + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. items: - description: An empty preferred scheduling term - matches all objects with implicit weight 0 - (i.e. it's a no-op). A null preferred scheduling - term matches no objects (i.e. is also a no-op). + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). properties: preference: description: A node selector term, associated @@ -332,85 +425,72 @@ spec: description: A list of node selector requirements by node's labels. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's - relationship to a set of values. - Valid operators are In, NotIn, - Exists, DoesNotExist. Gt, and - Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string - values. If the operator is In - or NotIn, the values array must - be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - If the operator is Gt or Lt, - the values array must have a - single element, which will be - interpreted as an integer. This - array is replaced during a strategic - merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's - relationship to a set of values. - Valid operators are In, NotIn, - Exists, DoesNotExist. Gt, and - Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string - values. If the operator is In - or NotIn, the values array must - be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - If the operator is Gt or Lt, - the values array must have a - single element, which will be - interpreted as an integer. This - array is replaced during a strategic - merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic weight: description: Weight associated with matching the corresponding nodeSelectorTerm, in @@ -422,112 +502,100 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified - by this field are not met at scheduling time, - the pod will not be scheduled onto the node. - If the affinity requirements specified by this - field cease to be met at some point during pod - execution (e.g. due to an update), the system - may or may not try to eventually evict the pod - from its node. + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. properties: nodeSelectorTerms: description: Required. A list of node selector terms. The terms are ORed. items: - description: A null or empty node selector - term matches no objects. The requirements - of them are ANDed. The TopologySelectorTerm - type implements a subset of the NodeSelectorTerm. + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. properties: matchExpressions: description: A list of node selector requirements by node's labels. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's - relationship to a set of values. - Valid operators are In, NotIn, - Exists, DoesNotExist. Gt, and - Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string - values. If the operator is In - or NotIn, the values array must - be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - If the operator is Gt or Lt, - the values array must have a - single element, which will be - interpreted as an integer. This - array is replaced during a strategic - merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's - relationship to a set of values. - Valid operators are In, NotIn, - Exists, DoesNotExist. Gt, and - Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string - values. If the operator is In - or NotIn, the values array must - be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - If the operator is Gt or Lt, - the values array must have a - single element, which will be - interpreted as an integer. This - array is replaced during a strategic - merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object + x-kubernetes-map-type: atomic type: object podAffinity: description: Describes pod affinity scheduling rules @@ -535,20 +603,16 @@ spec: etc. as some other pod(s)). properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule - pods to nodes that satisfy the affinity expressions - specified by this field, but it may choose a - node that violates one or more of the expressions. - The node that is most preferred is the one with - the greatest sum of weights, i.e. for each node - that meets all of the scheduling requirements - (resource request, requiredDuringScheduling - affinity expressions, etc.), compute a sum by - iterating through the elements of this field - and adding "weight" to the sum if the node has - pods which matches the corresponding podAffinityTerm; - the node(s) with the highest sum are the most - preferred. + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node @@ -559,19 +623,18 @@ spec: associated with the corresponding weight. properties: labelSelector: - description: A label query over a set - of resources, in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector - requirement is a selector that - contains values, a key, and - an operator that relates the - key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -579,66 +642,82 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to - a set of values. Valid operators - are In, NotIn, Exists and - DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an - array of string values. - If the operator is In or - NotIn, the values array - must be non-empty. If the - operator is Exists or DoesNotExist, - the values array must be - empty. This array is replaced - during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map - of {key,value} pairs. A single - {key,value} in the matchLabels - map is equivalent to an element - of matchExpressions, whose key - field is "key", the operator is - "In", and the values array contains - only "value". The requirements - are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the - set of namespaces that the term applies - to. The term is applied to the union - of the namespaces selected by this - field and the ones listed in the namespaces - field. null selector and null or empty - namespaces list means "this pod's - namespace". An empty selector ({}) - matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector - requirement is a selector that - contains values, a key, and - an operator that relates the - key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -646,76 +725,61 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to - a set of values. Valid operators - are In, NotIn, Exists and - DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an - array of string values. - If the operator is In or - NotIn, the values array - must be non-empty. If the - operator is Exists or DoesNotExist, - the values array must be - empty. This array is replaced - during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map - of {key,value} pairs. A single - {key,value} in the matchLabels - map is equivalent to an element - of matchExpressions, whose key - field is "key", the operator is - "In", and the values array contains - only "value". The requirements - are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a - static list of namespace names that - the term applies to. The term is applied - to the union of the namespaces listed - in this field and the ones selected - by namespaceSelector. null or empty - namespaces list and null namespaceSelector - means "this pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where - co-located is defined as running on - a node whose value of the label with - key topologyKey matches that of any - node on which any of the selected - pods is running. Empty topologyKey - is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object weight: - description: weight associated with matching - the corresponding podAffinityTerm, in - the range 1-100. + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. format: int32 type: integer required: @@ -723,41 +787,38 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified - by this field are not met at scheduling time, - the pod will not be scheduled onto the node. - If the affinity requirements specified by this - field cease to be met at some point during pod - execution (e.g. due to a pod label update), - the system may or may not try to eventually - evict the pod from its node. When there are - multiple elements, the lists of nodes corresponding - to each podAffinityTerm are intersected, i.e. - all terms must be satisfied. + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. items: - description: Defines a set of pods (namely those - matching the labelSelector relative to the - given namespace(s)) that this pod should be - co-located (affinity) or not co-located (anti-affinity) - with, where co-located is defined as running - on a node whose value of the label with key - matches that of any node on - which a pod of the set of pods is running + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running properties: labelSelector: - description: A label query over a set of - resources, in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -765,60 +826,82 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set - of namespaces that the term applies to. - The term is applied to the union of the - namespaces selected by this field and - the ones listed in the namespaces field. - null selector and null or empty namespaces - list means "this pod's namespace". An - empty selector ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -826,67 +909,59 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static - list of namespace names that the term - applies to. The term is applied to the - union of the namespaces listed in this - field and the ones selected by namespaceSelector. - null or empty namespaces list and null - namespaceSelector means "this pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where co-located - is defined as running on a node whose - value of the label with key topologyKey - matches that of any node on which any - of the selected pods is running. Empty - topologyKey is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object podAntiAffinity: description: Describes pod anti-affinity scheduling @@ -894,20 +969,16 @@ spec: zone, etc. as some other pod(s)). properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule - pods to nodes that satisfy the anti-affinity - expressions specified by this field, but it - may choose a node that violates one or more - of the expressions. The node that is most preferred - is the one with the greatest sum of weights, - i.e. for each node that meets all of the scheduling - requirements (resource request, requiredDuringScheduling - anti-affinity expressions, etc.), compute a - sum by iterating through the elements of this - field and adding "weight" to the sum if the - node has pods which matches the corresponding - podAffinityTerm; the node(s) with the highest - sum are the most preferred. + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node @@ -918,19 +989,18 @@ spec: associated with the corresponding weight. properties: labelSelector: - description: A label query over a set - of resources, in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector - requirement is a selector that - contains values, a key, and - an operator that relates the - key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -938,66 +1008,82 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to - a set of values. Valid operators - are In, NotIn, Exists and - DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an - array of string values. - If the operator is In or - NotIn, the values array - must be non-empty. If the - operator is Exists or DoesNotExist, - the values array must be - empty. This array is replaced - during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map - of {key,value} pairs. A single - {key,value} in the matchLabels - map is equivalent to an element - of matchExpressions, whose key - field is "key", the operator is - "In", and the values array contains - only "value". The requirements - are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the - set of namespaces that the term applies - to. The term is applied to the union - of the namespaces selected by this - field and the ones listed in the namespaces - field. null selector and null or empty - namespaces list means "this pod's - namespace". An empty selector ({}) - matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector - requirement is a selector that - contains values, a key, and - an operator that relates the - key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -1005,76 +1091,61 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to - a set of values. Valid operators - are In, NotIn, Exists and - DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an - array of string values. - If the operator is In or - NotIn, the values array - must be non-empty. If the - operator is Exists or DoesNotExist, - the values array must be - empty. This array is replaced - during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map - of {key,value} pairs. A single - {key,value} in the matchLabels - map is equivalent to an element - of matchExpressions, whose key - field is "key", the operator is - "In", and the values array contains - only "value". The requirements - are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a - static list of namespace names that - the term applies to. The term is applied - to the union of the namespaces listed - in this field and the ones selected - by namespaceSelector. null or empty - namespaces list and null namespaceSelector - means "this pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where - co-located is defined as running on - a node whose value of the label with - key topologyKey matches that of any - node on which any of the selected - pods is running. Empty topologyKey - is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object weight: - description: weight associated with matching - the corresponding podAffinityTerm, in - the range 1-100. + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. format: int32 type: integer required: @@ -1082,41 +1153,38 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements - specified by this field are not met at scheduling - time, the pod will not be scheduled onto the - node. If the anti-affinity requirements specified - by this field cease to be met at some point - during pod execution (e.g. due to a pod label - update), the system may or may not try to eventually - evict the pod from its node. When there are - multiple elements, the lists of nodes corresponding - to each podAffinityTerm are intersected, i.e. - all terms must be satisfied. + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. items: - description: Defines a set of pods (namely those - matching the labelSelector relative to the - given namespace(s)) that this pod should be - co-located (affinity) or not co-located (anti-affinity) - with, where co-located is defined as running - on a node whose value of the label with key - matches that of any node on - which a pod of the set of pods is running + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running properties: labelSelector: - description: A label query over a set of - resources, in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -1124,60 +1192,82 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set - of namespaces that the term applies to. - The term is applied to the union of the - namespaces selected by this field and - the ones listed in the namespaces field. - null selector and null or empty namespaces - list means "this pod's namespace". An - empty selector ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -1185,77 +1275,97 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static - list of namespace names that the term - applies to. The term is applied to the - union of the namespaces listed in this - field and the ones selected by namespaceSelector. - null or empty namespaces list and null - namespaceSelector means "this pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where co-located - is defined as running on a node whose - value of the label with key topologyKey - matches that of any node on which any - of the selected pods is running. Empty - topologyKey is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object type: object priorityClassName: - description: 'Priority class name for the pgBackRest backup - Job pods. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/' + description: |- + Priority class name for the pgBackRest backup Job pods. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/ type: string resources: - description: Resource limits for backup jobs. Includes - manual, scheduled and replica create backups + description: |- + Resource limits for backup jobs. Includes manual, scheduled and replica + create backups properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -1263,8 +1373,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount - of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -1273,61 +1384,58 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount - of compute resources required. If Requests is omitted - for a container, it defaults to Limits if that is - explicitly specified, otherwise to an implementation-defined - value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object tolerations: - description: 'Tolerations of pgBackRest backup Job pods. - More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration' + description: |- + Tolerations of pgBackRest backup Job pods. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration items: - description: The pod this Toleration is attached to - tolerates any taint that matches the triple - using the matching operator . + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . properties: effect: - description: Effect indicates the taint effect to - match. Empty means match all taint effects. When - specified, allowed values are NoSchedule, PreferNoSchedule - and NoExecute. + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. type: string key: - description: Key is the taint key that the toleration - applies to. Empty means match all taint keys. - If the key is empty, operator must be Exists; - this combination means to match all values and - all keys. + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. type: string operator: - description: Operator represents a key's relationship - to the value. Valid operators are Exists and Equal. - Defaults to Equal. Exists is equivalent to wildcard - for value, so that a pod can tolerate all taints - of a particular category. + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. type: string tolerationSeconds: - description: TolerationSeconds represents the period - of time the toleration (which must be of effect - NoExecute, otherwise this field is ignored) tolerates - the taint. By default, it is not set, which means - tolerate the taint forever (do not evict). Zero - and negative values will be treated as 0 (evict - immediately) by the system. + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. format: int64 type: integer value: - description: Value is the taint value the toleration - matches to. If the operator is Exists, the value - should be empty, otherwise just a regular string. + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. type: string type: object type: array ttlSecondsAfterFinished: - description: 'Limit the lifetime of a Job that has finished. - More info: https://kubernetes.io/docs/concepts/workloads/controllers/job' + description: |- + Limit the lifetime of a Job that has finished. + More info: https://kubernetes.io/docs/concepts/workloads/controllers/job format: int32 minimum: 60 type: integer @@ -1337,8 +1445,9 @@ spec: Jobs properties: options: - description: Command line options to include when running - the pgBackRest backup command. https://pgbackrest.org/command.html#command-backup + description: |- + Command line options to include when running the pgBackRest backup command. + https://pgbackrest.org/command.html#command-backup items: type: string type: array @@ -1363,40 +1472,36 @@ spec: type: object type: object repoHost: - description: Defines configuration for a pgBackRest dedicated - repository host. This section is only applicable if at - least one "volume" (i.e. PVC-based) repository is defined - in the "repos" section, therefore enabling a dedicated repository - host Deployment. + description: |- + Defines configuration for a pgBackRest dedicated repository host. This section is only + applicable if at least one "volume" (i.e. PVC-based) repository is defined in the "repos" + section, therefore enabling a dedicated repository host Deployment. properties: affinity: - description: 'Scheduling constraints of the Dedicated - repo host pod. Changing this value causes repo host - to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node' + description: |- + Scheduling constraints of the Dedicated repo host pod. + Changing this value causes repo host to restart. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node properties: nodeAffinity: description: Describes node affinity scheduling rules for the pod. properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule - pods to nodes that satisfy the affinity expressions - specified by this field, but it may choose a - node that violates one or more of the expressions. - The node that is most preferred is the one with - the greatest sum of weights, i.e. for each node - that meets all of the scheduling requirements - (resource request, requiredDuringScheduling - affinity expressions, etc.), compute a sum by - iterating through the elements of this field - and adding "weight" to the sum if the node matches - the corresponding matchExpressions; the node(s) - with the highest sum are the most preferred. + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. items: - description: An empty preferred scheduling term - matches all objects with implicit weight 0 - (i.e. it's a no-op). A null preferred scheduling - term matches no objects (i.e. is also a no-op). + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). properties: preference: description: A node selector term, associated @@ -1406,85 +1511,72 @@ spec: description: A list of node selector requirements by node's labels. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's - relationship to a set of values. - Valid operators are In, NotIn, - Exists, DoesNotExist. Gt, and - Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string - values. If the operator is In - or NotIn, the values array must - be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - If the operator is Gt or Lt, - the values array must have a - single element, which will be - interpreted as an integer. This - array is replaced during a strategic - merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's - relationship to a set of values. - Valid operators are In, NotIn, - Exists, DoesNotExist. Gt, and - Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string - values. If the operator is In - or NotIn, the values array must - be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - If the operator is Gt or Lt, - the values array must have a - single element, which will be - interpreted as an integer. This - array is replaced during a strategic - merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic weight: description: Weight associated with matching the corresponding nodeSelectorTerm, in @@ -1496,112 +1588,100 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified - by this field are not met at scheduling time, - the pod will not be scheduled onto the node. - If the affinity requirements specified by this - field cease to be met at some point during pod - execution (e.g. due to an update), the system - may or may not try to eventually evict the pod - from its node. + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. properties: nodeSelectorTerms: description: Required. A list of node selector terms. The terms are ORed. items: - description: A null or empty node selector - term matches no objects. The requirements - of them are ANDed. The TopologySelectorTerm - type implements a subset of the NodeSelectorTerm. + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. properties: matchExpressions: description: A list of node selector requirements by node's labels. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's - relationship to a set of values. - Valid operators are In, NotIn, - Exists, DoesNotExist. Gt, and - Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string - values. If the operator is In - or NotIn, the values array must - be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - If the operator is Gt or Lt, - the values array must have a - single element, which will be - interpreted as an integer. This - array is replaced during a strategic - merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's - relationship to a set of values. - Valid operators are In, NotIn, - Exists, DoesNotExist. Gt, and - Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string - values. If the operator is In - or NotIn, the values array must - be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - If the operator is Gt or Lt, - the values array must have a - single element, which will be - interpreted as an integer. This - array is replaced during a strategic - merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object + x-kubernetes-map-type: atomic type: object podAffinity: description: Describes pod affinity scheduling rules @@ -1609,20 +1689,16 @@ spec: etc. as some other pod(s)). properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule - pods to nodes that satisfy the affinity expressions - specified by this field, but it may choose a - node that violates one or more of the expressions. - The node that is most preferred is the one with - the greatest sum of weights, i.e. for each node - that meets all of the scheduling requirements - (resource request, requiredDuringScheduling - affinity expressions, etc.), compute a sum by - iterating through the elements of this field - and adding "weight" to the sum if the node has - pods which matches the corresponding podAffinityTerm; - the node(s) with the highest sum are the most - preferred. + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node @@ -1633,19 +1709,18 @@ spec: associated with the corresponding weight. properties: labelSelector: - description: A label query over a set - of resources, in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector - requirement is a selector that - contains values, a key, and - an operator that relates the - key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -1653,66 +1728,82 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to - a set of values. Valid operators - are In, NotIn, Exists and - DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an - array of string values. - If the operator is In or - NotIn, the values array - must be non-empty. If the - operator is Exists or DoesNotExist, - the values array must be - empty. This array is replaced - during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map - of {key,value} pairs. A single - {key,value} in the matchLabels - map is equivalent to an element - of matchExpressions, whose key - field is "key", the operator is - "In", and the values array contains - only "value". The requirements - are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the - set of namespaces that the term applies - to. The term is applied to the union - of the namespaces selected by this - field and the ones listed in the namespaces - field. null selector and null or empty - namespaces list means "this pod's - namespace". An empty selector ({}) - matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector - requirement is a selector that - contains values, a key, and - an operator that relates the - key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -1720,76 +1811,61 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to - a set of values. Valid operators - are In, NotIn, Exists and - DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an - array of string values. - If the operator is In or - NotIn, the values array - must be non-empty. If the - operator is Exists or DoesNotExist, - the values array must be - empty. This array is replaced - during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map - of {key,value} pairs. A single - {key,value} in the matchLabels - map is equivalent to an element - of matchExpressions, whose key - field is "key", the operator is - "In", and the values array contains - only "value". The requirements - are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a - static list of namespace names that - the term applies to. The term is applied - to the union of the namespaces listed - in this field and the ones selected - by namespaceSelector. null or empty - namespaces list and null namespaceSelector - means "this pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where - co-located is defined as running on - a node whose value of the label with - key topologyKey matches that of any - node on which any of the selected - pods is running. Empty topologyKey - is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object weight: - description: weight associated with matching - the corresponding podAffinityTerm, in - the range 1-100. + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. format: int32 type: integer required: @@ -1797,41 +1873,38 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified - by this field are not met at scheduling time, - the pod will not be scheduled onto the node. - If the affinity requirements specified by this - field cease to be met at some point during pod - execution (e.g. due to a pod label update), - the system may or may not try to eventually - evict the pod from its node. When there are - multiple elements, the lists of nodes corresponding - to each podAffinityTerm are intersected, i.e. - all terms must be satisfied. + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. items: - description: Defines a set of pods (namely those - matching the labelSelector relative to the - given namespace(s)) that this pod should be - co-located (affinity) or not co-located (anti-affinity) - with, where co-located is defined as running - on a node whose value of the label with key - matches that of any node on - which a pod of the set of pods is running + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running properties: labelSelector: - description: A label query over a set of - resources, in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -1839,60 +1912,82 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set - of namespaces that the term applies to. - The term is applied to the union of the - namespaces selected by this field and - the ones listed in the namespaces field. - null selector and null or empty namespaces - list means "this pod's namespace". An - empty selector ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -1900,67 +1995,59 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static - list of namespace names that the term - applies to. The term is applied to the - union of the namespaces listed in this - field and the ones selected by namespaceSelector. - null or empty namespaces list and null - namespaceSelector means "this pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where co-located - is defined as running on a node whose - value of the label with key topologyKey - matches that of any node on which any - of the selected pods is running. Empty - topologyKey is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object podAntiAffinity: description: Describes pod anti-affinity scheduling @@ -1968,20 +2055,16 @@ spec: zone, etc. as some other pod(s)). properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule - pods to nodes that satisfy the anti-affinity - expressions specified by this field, but it - may choose a node that violates one or more - of the expressions. The node that is most preferred - is the one with the greatest sum of weights, - i.e. for each node that meets all of the scheduling - requirements (resource request, requiredDuringScheduling - anti-affinity expressions, etc.), compute a - sum by iterating through the elements of this - field and adding "weight" to the sum if the - node has pods which matches the corresponding - podAffinityTerm; the node(s) with the highest - sum are the most preferred. + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node @@ -1992,19 +2075,18 @@ spec: associated with the corresponding weight. properties: labelSelector: - description: A label query over a set - of resources, in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector - requirement is a selector that - contains values, a key, and - an operator that relates the - key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -2012,66 +2094,82 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to - a set of values. Valid operators - are In, NotIn, Exists and - DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an - array of string values. - If the operator is In or - NotIn, the values array - must be non-empty. If the - operator is Exists or DoesNotExist, - the values array must be - empty. This array is replaced - during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map - of {key,value} pairs. A single - {key,value} in the matchLabels - map is equivalent to an element - of matchExpressions, whose key - field is "key", the operator is - "In", and the values array contains - only "value". The requirements - are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the - set of namespaces that the term applies - to. The term is applied to the union - of the namespaces selected by this - field and the ones listed in the namespaces - field. null selector and null or empty - namespaces list means "this pod's - namespace". An empty selector ({}) - matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector - requirement is a selector that - contains values, a key, and - an operator that relates the - key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -2079,76 +2177,61 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to - a set of values. Valid operators - are In, NotIn, Exists and - DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an - array of string values. - If the operator is In or - NotIn, the values array - must be non-empty. If the - operator is Exists or DoesNotExist, - the values array must be - empty. This array is replaced - during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map - of {key,value} pairs. A single - {key,value} in the matchLabels - map is equivalent to an element - of matchExpressions, whose key - field is "key", the operator is - "In", and the values array contains - only "value". The requirements - are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a - static list of namespace names that - the term applies to. The term is applied - to the union of the namespaces listed - in this field and the ones selected - by namespaceSelector. null or empty - namespaces list and null namespaceSelector - means "this pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where - co-located is defined as running on - a node whose value of the label with - key topologyKey matches that of any - node on which any of the selected - pods is running. Empty topologyKey - is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object weight: - description: weight associated with matching - the corresponding podAffinityTerm, in - the range 1-100. + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. format: int32 type: integer required: @@ -2156,41 +2239,38 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements - specified by this field are not met at scheduling - time, the pod will not be scheduled onto the - node. If the anti-affinity requirements specified - by this field cease to be met at some point - during pod execution (e.g. due to a pod label - update), the system may or may not try to eventually - evict the pod from its node. When there are - multiple elements, the lists of nodes corresponding - to each podAffinityTerm are intersected, i.e. - all terms must be satisfied. + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. items: - description: Defines a set of pods (namely those - matching the labelSelector relative to the - given namespace(s)) that this pod should be - co-located (affinity) or not co-located (anti-affinity) - with, where co-located is defined as running - on a node whose value of the label with key - matches that of any node on - which a pod of the set of pods is running + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running properties: labelSelector: - description: A label query over a set of - resources, in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -2198,60 +2278,82 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set - of namespaces that the term applies to. - The term is applied to the union of the - namespaces selected by this field and - the ones listed in the namespaces field. - null selector and null or empty namespaces - list means "this pod's namespace". An - empty selector ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -2259,78 +2361,97 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static - list of namespace names that the term - applies to. The term is applied to the - union of the namespaces listed in this - field and the ones selected by namespaceSelector. - null or empty namespaces list and null - namespaceSelector means "this pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where co-located - is defined as running on a node whose - value of the label with key topologyKey - matches that of any node on which any - of the selected pods is running. Empty - topologyKey is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object type: object priorityClassName: - description: 'Priority class name for the pgBackRest repo - host pod. Changing this value causes PostgreSQL to restart. - More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/' + description: |- + Priority class name for the pgBackRest repo host pod. Changing this value + causes PostgreSQL to restart. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/ type: string resources: description: Resource requirements for a pgBackRest repository host properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -2338,8 +2459,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount - of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -2348,29 +2470,27 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount - of compute resources required. If Requests is omitted - for a container, it defaults to Limits if that is - explicitly specified, otherwise to an implementation-defined - value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object sshConfigMap: - description: 'ConfigMap containing custom SSH configuration. - Deprecated: Repository hosts use mTLS for encryption, - authentication, and authorization.' + description: |- + ConfigMap containing custom SSH configuration. + Deprecated: Repository hosts use mTLS for encryption, authentication, and authorization. properties: items: - description: items if unspecified, each key-value - pair in the Data field of the referenced ConfigMap - will be projected into the volume as a file whose - name is the key and content is the value. If specified, - the listed keys will be projected into the specified - paths, and unlisted keys will not be present. If - a key is specified which is not present in the ConfigMap, - the volume setup will error unless it is marked - optional. Paths must be relative and may not contain - the '..' path or start with '..'. + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. items: description: Maps a string key to a path within a volume. @@ -2379,22 +2499,20 @@ spec: description: key is the key to project. type: string mode: - description: 'mode is Optional: mode bits used - to set permissions on this file. Must be an - octal value between 0000 and 0777 or a decimal - value between 0 and 511. YAML accepts both - octal and decimal values, JSON requires decimal - values for mode bits. If not specified, the - volume defaultMode will be used. This might - be in conflict with other options that affect - the file mode, like fsGroup, and the result - can be other mode bits set.' + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: path is the relative path of the - file to map the key to. May not be an absolute - path. May not contain the path element '..'. + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. May not start with the string '..'. type: string required: @@ -2402,30 +2520,36 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: description: optional specify whether the ConfigMap or its keys must be defined type: boolean type: object + x-kubernetes-map-type: atomic sshSecret: - description: 'Secret containing custom SSH keys. Deprecated: - Repository hosts use mTLS for encryption, authentication, - and authorization.' + description: |- + Secret containing custom SSH keys. + Deprecated: Repository hosts use mTLS for encryption, authentication, and authorization. properties: items: - description: items if unspecified, each key-value - pair in the Data field of the referenced Secret - will be projected into the volume as a file whose - name is the key and content is the value. If specified, - the listed keys will be projected into the specified - paths, and unlisted keys will not be present. If - a key is specified which is not present in the Secret, - the volume setup will error unless it is marked - optional. Paths must be relative and may not contain - the '..' path or start with '..'. + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. items: description: Maps a string key to a path within a volume. @@ -2434,22 +2558,20 @@ spec: description: key is the key to project. type: string mode: - description: 'mode is Optional: mode bits used - to set permissions on this file. Must be an - octal value between 0000 and 0777 or a decimal - value between 0 and 511. YAML accepts both - octal and decimal values, JSON requires decimal - values for mode bits. If not specified, the - volume defaultMode will be used. This might - be in conflict with other options that affect - the file mode, like fsGroup, and the result - can be other mode bits set.' + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: path is the relative path of the - file to map the key to. May not be an absolute - path. May not contain the path element '..'. + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. May not start with the string '..'. type: string required: @@ -2457,208 +2579,236 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: description: optional field specify whether the Secret or its key must be defined type: boolean type: object + x-kubernetes-map-type: atomic tolerations: - description: 'Tolerations of a PgBackRest repo host pod. - Changing this value causes a restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration' + description: |- + Tolerations of a PgBackRest repo host pod. Changing this value causes a restart. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration items: - description: The pod this Toleration is attached to - tolerates any taint that matches the triple - using the matching operator . + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . properties: effect: - description: Effect indicates the taint effect to - match. Empty means match all taint effects. When - specified, allowed values are NoSchedule, PreferNoSchedule - and NoExecute. + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. type: string key: - description: Key is the taint key that the toleration - applies to. Empty means match all taint keys. - If the key is empty, operator must be Exists; - this combination means to match all values and - all keys. + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. type: string operator: - description: Operator represents a key's relationship - to the value. Valid operators are Exists and Equal. - Defaults to Equal. Exists is equivalent to wildcard - for value, so that a pod can tolerate all taints - of a particular category. + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. type: string tolerationSeconds: - description: TolerationSeconds represents the period - of time the toleration (which must be of effect - NoExecute, otherwise this field is ignored) tolerates - the taint. By default, it is not set, which means - tolerate the taint forever (do not evict). Zero - and negative values will be treated as 0 (evict - immediately) by the system. + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. format: int64 type: integer value: - description: Value is the taint value the toleration - matches to. If the operator is Exists, the value - should be empty, otherwise just a regular string. + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. type: string type: object type: array topologySpreadConstraints: - description: 'Topology spread constraints of a Dedicated - repo host pod. Changing this value causes the repo host - to restart. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/' + description: |- + Topology spread constraints of a Dedicated repo host pod. Changing this + value causes the repo host to restart. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ items: description: TopologySpreadConstraint specifies how to spread matching pods among the given topology. properties: labelSelector: - description: LabelSelector is used to find matching - pods. Pods that match this label selector are - counted to determine the number of pods in their - corresponding topology domain. + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, a key, - and an operator that relates the key and - values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and - DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. This - array is replaced during a strategic + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is - "In", and the values array contains only "value". - The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic maxSkew: - description: 'MaxSkew describes the degree to which - pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, - it is the maximum permitted difference between - the number of matching pods in the target topology - and the global minimum. The global minimum is - the minimum number of matching pods in an eligible - domain or zero if the number of eligible domains - is less than MinDomains. For example, in a 3-zone - cluster, MaxSkew is set to 1, and pods with the - same labelSelector spread as 2/2/1: In this case, - the global minimum is 1. | zone1 | zone2 | zone3 - | | P P | P P | P | - if MaxSkew is 1, - incoming pod can only be scheduled to zone3 to - become 2/2/2; scheduling it onto zone1(zone2) - would make the ActualSkew(3-1) on zone1(zone2) - violate MaxSkew(1). - if MaxSkew is 2, incoming - pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, - it is used to give higher precedence to topologies - that satisfy it. It''s a required field. Default - value is 1 and 0 is not allowed.' + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. format: int32 type: integer minDomains: - description: "MinDomains indicates a minimum number - of eligible domains. When the number of eligible - domains with matching topology keys is less than - minDomains, Pod Topology Spread treats \"global - minimum\" as 0, and then the calculation of Skew - is performed. And when the number of eligible - domains with matching topology keys equals or - greater than minDomains, this value has no effect - on scheduling. As a result, when the number of - eligible domains is less than minDomains, scheduler - won't schedule more than maxSkew Pods to those - domains. If value is nil, the constraint behaves - as if MinDomains is equal to 1. Valid values are - integers greater than 0. When value is not nil, - WhenUnsatisfiable must be DoNotSchedule. \n For - example, in a 3-zone cluster, MaxSkew is set to - 2, MinDomains is set to 5 and pods with the same - labelSelector spread as 2/2/2: | zone1 | zone2 - | zone3 | | P P | P P | P P | The number - of domains is less than 5(MinDomains), so \"global - minimum\" is treated as 0. In this situation, - new pod with the same labelSelector cannot be - scheduled, because computed skew will be 3(3 - - 0) if new Pod is scheduled to any of the three - zones, it will violate MaxSkew. \n This is an - alpha field and requires enabling MinDomainsInPodTopologySpread - feature gate." + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. format: int32 type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string topologyKey: - description: TopologyKey is the key of node labels. - Nodes that have a label with this key and identical - values are considered to be in the same topology. - We consider each as a "bucket", and - try to put balanced number of pods into each bucket. - We define a domain as a particular instance of - a topology. Also, we define an eligible domain - as a domain whose nodes match the node selector. - e.g. If TopologyKey is "kubernetes.io/hostname", - each Node is a domain of that topology. And, if - TopologyKey is "topology.kubernetes.io/zone", - each zone is a domain of that topology. It's a - required field. + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. type: string whenUnsatisfiable: - description: 'WhenUnsatisfiable indicates how to - deal with a pod if it doesn''t satisfy the spread - constraint. - DoNotSchedule (default) tells the - scheduler not to schedule it. - ScheduleAnyway - tells the scheduler to schedule the pod in any - location, but giving higher precedence to topologies - that would help reduce the skew. A constraint - is considered "Unsatisfiable" for an incoming - pod if and only if every possible node assignment - for that pod would violate "MaxSkew" on some topology. - For example, in a 3-zone cluster, MaxSkew is set - to 1, and pods with the same labelSelector spread - as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | - If WhenUnsatisfiable is set to DoNotSchedule, - incoming pod can only be scheduled to zone2(zone3) - to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) - satisfies MaxSkew(1). In other words, the cluster - can still be imbalanced, but scheduler won''t - make it *more* imbalanced. It''s a required field.' + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. type: string required: - maxSkew @@ -2695,12 +2845,13 @@ spec: - bucket type: object name: - description: The name of the the repository + description: The name of the repository pattern: ^repo[1-4] type: string s3: - description: RepoS3 represents a pgBackRest repository - that is created using AWS S3 (or S3-compatible) storage + description: |- + RepoS3 represents a pgBackRest repository that is created using AWS S3 (or S3-compatible) + storage properties: bucket: description: The S3 bucket utilized for the repository @@ -2719,26 +2870,30 @@ spec: - region type: object schedules: - description: 'Defines the schedules for the pgBackRest - backups Full, Differential and Incremental backup - types are supported: https://pgbackrest.org/user-guide.html#concept/backup' + description: |- + Defines the schedules for the pgBackRest backups + Full, Differential and Incremental backup types are supported: + https://pgbackrest.org/user-guide.html#concept/backup properties: differential: - description: 'Defines the Cron schedule for a differential - pgBackRest backup. Follows the standard Cron schedule - syntax: https://k8s.io/docs/concepts/workloads/controllers/cron-jobs/#cron-schedule-syntax' + description: |- + Defines the Cron schedule for a differential pgBackRest backup. + Follows the standard Cron schedule syntax: + https://k8s.io/docs/concepts/workloads/controllers/cron-jobs/#cron-schedule-syntax minLength: 6 type: string full: - description: 'Defines the Cron schedule for a full - pgBackRest backup. Follows the standard Cron schedule - syntax: https://k8s.io/docs/concepts/workloads/controllers/cron-jobs/#cron-schedule-syntax' + description: |- + Defines the Cron schedule for a full pgBackRest backup. + Follows the standard Cron schedule syntax: + https://k8s.io/docs/concepts/workloads/controllers/cron-jobs/#cron-schedule-syntax minLength: 6 type: string incremental: - description: 'Defines the Cron schedule for an incremental - pgBackRest backup. Follows the standard Cron schedule - syntax: https://k8s.io/docs/concepts/workloads/controllers/cron-jobs/#cron-schedule-syntax' + description: |- + Defines the Cron schedule for an incremental pgBackRest backup. + Follows the standard Cron schedule syntax: + https://k8s.io/docs/concepts/workloads/controllers/cron-jobs/#cron-schedule-syntax minLength: 6 type: string type: object @@ -2751,32 +2906,29 @@ spec: used to create and/or bind a volume properties: accessModes: - description: 'accessModes contains the desired - access modes the volume should have. More - info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 items: type: string - minItems: 1 type: array + x-kubernetes-list-type: atomic dataSource: - description: 'dataSource field can be used to - specify either: * An existing VolumeSnapshot - object (snapshot.storage.k8s.io/VolumeSnapshot) + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) - If the provisioner or an external controller - can support the specified data source, it - will create a new volume based on the contents - of the specified data source. If the AnyVolumeDataSource - feature gate is enabled, this field will always - have the same contents as the DataSourceRef - field.' + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. properties: apiGroup: - description: APIGroup is the group for the - resource being referenced. If APIGroup - is not specified, the specified Kind must - be in the core API group. For any other - third-party types, APIGroup is required. + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. type: string kind: description: Kind is the type of resource @@ -2790,40 +2942,38 @@ spec: - kind - name type: object + x-kubernetes-map-type: atomic dataSourceRef: - description: 'dataSourceRef specifies the object - from which to populate the volume with data, - if a non-empty volume is desired. This may - be any local object from a non-empty API group - (non core object) or a PersistentVolumeClaim - object. When this field is specified, volume - binding will only succeed if the type of the - specified object matches some installed volume - populator or dynamic provisioner. This field - will replace the functionality of the DataSource - field and as such if both fields are non-empty, - they must have the same value. For backwards - compatibility, both fields (DataSource and - DataSourceRef) will be set to the same value - automatically if one of them is empty and - the other is non-empty. There are two important - differences between DataSource and DataSourceRef: - * While DataSource only allows two specific - types of objects, DataSourceRef allows any - non-core object, as well as PersistentVolumeClaim - objects. * While DataSource ignores disallowed - values (dropping them), DataSourceRef preserves - all values, and generates an error if a disallowed - value is specified. (Beta) Using this field - requires the AnyVolumeDataSource feature gate - to be enabled.' + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. properties: apiGroup: - description: APIGroup is the group for the - resource being referenced. If APIGroup - is not specified, the specified Kind must - be in the core API group. For any other - third-party types, APIGroup is required. + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. type: string kind: description: Kind is the type of resource @@ -2833,18 +2983,23 @@ spec: description: Name is the name of resource being referenced type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string required: - kind - name type: object resources: - description: 'resources represents the minimum - resources the volume should have. If RecoverVolumeExpansionFailure - feature is enabled users are allowed to specify - resource requirements that are lower than - previous value but must still be higher than - capacity recorded in the status field of the - claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources properties: limits: additionalProperties: @@ -2853,9 +3008,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum - amount of compute resources allowed. More - info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -2864,17 +3019,12 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum - amount of compute resources required. - If Requests is omitted for a container, - it defaults to Limits if that is explicitly - specified, otherwise to an implementation-defined - value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - required: - - storage + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object - required: - - requests type: object selector: description: selector is a label query over @@ -2885,68 +3035,82 @@ spec: of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a - key's relationship to a set of values. - Valid operators are In, NotIn, Exists - and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of - string values. If the operator is - In or NotIn, the values array must - be non-empty. If the operator is - Exists or DoesNotExist, the values - array must be empty. This array - is replaced during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic storageClassName: - description: 'storageClassName is the name of - the StorageClass required by the claim. More - info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. type: string volumeMode: - description: volumeMode defines what type of - volume is required by the claim. Value of - Filesystem is implied when not included in - claim spec. + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. type: string volumeName: description: volumeName is the binding reference to the PersistentVolume backing this claim. type: string - required: - - accessModes - - resources type: object + x-kubernetes-validations: + - message: missing accessModes + rule: has(self.accessModes) && size(self.accessModes) + > 0 + - message: missing storage request + rule: has(self.resources) && has(self.resources.requests) + && has(self.resources.requests.storage) required: - volumeClaimSpec type: object @@ -2963,32 +3127,29 @@ spec: using pgBackRest properties: affinity: - description: 'Scheduling constraints of the pgBackRest - restore Job. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node' + description: |- + Scheduling constraints of the pgBackRest restore Job. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node properties: nodeAffinity: description: Describes node affinity scheduling rules for the pod. properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule - pods to nodes that satisfy the affinity expressions - specified by this field, but it may choose a - node that violates one or more of the expressions. - The node that is most preferred is the one with - the greatest sum of weights, i.e. for each node - that meets all of the scheduling requirements - (resource request, requiredDuringScheduling - affinity expressions, etc.), compute a sum by - iterating through the elements of this field - and adding "weight" to the sum if the node matches - the corresponding matchExpressions; the node(s) - with the highest sum are the most preferred. + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. items: - description: An empty preferred scheduling term - matches all objects with implicit weight 0 - (i.e. it's a no-op). A null preferred scheduling - term matches no objects (i.e. is also a no-op). + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). properties: preference: description: A node selector term, associated @@ -2998,85 +3159,72 @@ spec: description: A list of node selector requirements by node's labels. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's - relationship to a set of values. - Valid operators are In, NotIn, - Exists, DoesNotExist. Gt, and - Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string - values. If the operator is In - or NotIn, the values array must - be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - If the operator is Gt or Lt, - the values array must have a - single element, which will be - interpreted as an integer. This - array is replaced during a strategic - merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's - relationship to a set of values. - Valid operators are In, NotIn, - Exists, DoesNotExist. Gt, and - Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string - values. If the operator is In - or NotIn, the values array must - be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - If the operator is Gt or Lt, - the values array must have a - single element, which will be - interpreted as an integer. This - array is replaced during a strategic - merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic weight: description: Weight associated with matching the corresponding nodeSelectorTerm, in @@ -3088,112 +3236,100 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified - by this field are not met at scheduling time, - the pod will not be scheduled onto the node. - If the affinity requirements specified by this - field cease to be met at some point during pod - execution (e.g. due to an update), the system - may or may not try to eventually evict the pod - from its node. + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. properties: nodeSelectorTerms: description: Required. A list of node selector terms. The terms are ORed. items: - description: A null or empty node selector - term matches no objects. The requirements - of them are ANDed. The TopologySelectorTerm - type implements a subset of the NodeSelectorTerm. + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. properties: matchExpressions: description: A list of node selector requirements by node's labels. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's - relationship to a set of values. - Valid operators are In, NotIn, - Exists, DoesNotExist. Gt, and - Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string - values. If the operator is In - or NotIn, the values array must - be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - If the operator is Gt or Lt, - the values array must have a - single element, which will be - interpreted as an integer. This - array is replaced during a strategic - merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's - relationship to a set of values. - Valid operators are In, NotIn, - Exists, DoesNotExist. Gt, and - Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string - values. If the operator is In - or NotIn, the values array must - be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - If the operator is Gt or Lt, - the values array must have a - single element, which will be - interpreted as an integer. This - array is replaced during a strategic - merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object + x-kubernetes-map-type: atomic type: object podAffinity: description: Describes pod affinity scheduling rules @@ -3201,20 +3337,16 @@ spec: etc. as some other pod(s)). properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule - pods to nodes that satisfy the affinity expressions - specified by this field, but it may choose a - node that violates one or more of the expressions. - The node that is most preferred is the one with - the greatest sum of weights, i.e. for each node - that meets all of the scheduling requirements - (resource request, requiredDuringScheduling - affinity expressions, etc.), compute a sum by - iterating through the elements of this field - and adding "weight" to the sum if the node has - pods which matches the corresponding podAffinityTerm; - the node(s) with the highest sum are the most - preferred. + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node @@ -3225,19 +3357,18 @@ spec: associated with the corresponding weight. properties: labelSelector: - description: A label query over a set - of resources, in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector - requirement is a selector that - contains values, a key, and - an operator that relates the - key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -3245,66 +3376,82 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to - a set of values. Valid operators - are In, NotIn, Exists and - DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an - array of string values. - If the operator is In or - NotIn, the values array - must be non-empty. If the - operator is Exists or DoesNotExist, - the values array must be - empty. This array is replaced - during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map - of {key,value} pairs. A single - {key,value} in the matchLabels - map is equivalent to an element - of matchExpressions, whose key - field is "key", the operator is - "In", and the values array contains - only "value". The requirements - are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the - set of namespaces that the term applies - to. The term is applied to the union - of the namespaces selected by this - field and the ones listed in the namespaces - field. null selector and null or empty - namespaces list means "this pod's - namespace". An empty selector ({}) - matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector - requirement is a selector that - contains values, a key, and - an operator that relates the - key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -3312,76 +3459,61 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to - a set of values. Valid operators - are In, NotIn, Exists and - DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an - array of string values. - If the operator is In or - NotIn, the values array - must be non-empty. If the - operator is Exists or DoesNotExist, - the values array must be - empty. This array is replaced - during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map - of {key,value} pairs. A single - {key,value} in the matchLabels - map is equivalent to an element - of matchExpressions, whose key - field is "key", the operator is - "In", and the values array contains - only "value". The requirements - are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a - static list of namespace names that - the term applies to. The term is applied - to the union of the namespaces listed - in this field and the ones selected - by namespaceSelector. null or empty - namespaces list and null namespaceSelector - means "this pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where - co-located is defined as running on - a node whose value of the label with - key topologyKey matches that of any - node on which any of the selected - pods is running. Empty topologyKey - is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object weight: - description: weight associated with matching - the corresponding podAffinityTerm, in - the range 1-100. + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. format: int32 type: integer required: @@ -3389,41 +3521,38 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified - by this field are not met at scheduling time, - the pod will not be scheduled onto the node. - If the affinity requirements specified by this - field cease to be met at some point during pod - execution (e.g. due to a pod label update), - the system may or may not try to eventually - evict the pod from its node. When there are - multiple elements, the lists of nodes corresponding - to each podAffinityTerm are intersected, i.e. - all terms must be satisfied. + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. items: - description: Defines a set of pods (namely those - matching the labelSelector relative to the - given namespace(s)) that this pod should be - co-located (affinity) or not co-located (anti-affinity) - with, where co-located is defined as running - on a node whose value of the label with key - matches that of any node on - which a pod of the set of pods is running + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running properties: labelSelector: - description: A label query over a set of - resources, in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -3431,60 +3560,82 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set - of namespaces that the term applies to. - The term is applied to the union of the - namespaces selected by this field and - the ones listed in the namespaces field. - null selector and null or empty namespaces - list means "this pod's namespace". An - empty selector ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -3492,67 +3643,59 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static - list of namespace names that the term - applies to. The term is applied to the - union of the namespaces listed in this - field and the ones selected by namespaceSelector. - null or empty namespaces list and null - namespaceSelector means "this pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where co-located - is defined as running on a node whose - value of the label with key topologyKey - matches that of any node on which any - of the selected pods is running. Empty - topologyKey is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object podAntiAffinity: description: Describes pod anti-affinity scheduling @@ -3560,20 +3703,16 @@ spec: zone, etc. as some other pod(s)). properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule - pods to nodes that satisfy the anti-affinity - expressions specified by this field, but it - may choose a node that violates one or more - of the expressions. The node that is most preferred - is the one with the greatest sum of weights, - i.e. for each node that meets all of the scheduling - requirements (resource request, requiredDuringScheduling - anti-affinity expressions, etc.), compute a - sum by iterating through the elements of this - field and adding "weight" to the sum if the - node has pods which matches the corresponding - podAffinityTerm; the node(s) with the highest - sum are the most preferred. + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node @@ -3584,19 +3723,18 @@ spec: associated with the corresponding weight. properties: labelSelector: - description: A label query over a set - of resources, in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector - requirement is a selector that - contains values, a key, and - an operator that relates the - key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -3604,66 +3742,82 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to - a set of values. Valid operators - are In, NotIn, Exists and - DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an - array of string values. - If the operator is In or - NotIn, the values array - must be non-empty. If the - operator is Exists or DoesNotExist, - the values array must be - empty. This array is replaced - during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map - of {key,value} pairs. A single - {key,value} in the matchLabels - map is equivalent to an element - of matchExpressions, whose key - field is "key", the operator is - "In", and the values array contains - only "value". The requirements - are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the - set of namespaces that the term applies - to. The term is applied to the union - of the namespaces selected by this - field and the ones listed in the namespaces - field. null selector and null or empty - namespaces list means "this pod's - namespace". An empty selector ({}) - matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector - requirement is a selector that - contains values, a key, and - an operator that relates the - key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -3671,76 +3825,61 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to - a set of values. Valid operators - are In, NotIn, Exists and - DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an - array of string values. - If the operator is In or - NotIn, the values array - must be non-empty. If the - operator is Exists or DoesNotExist, - the values array must be - empty. This array is replaced - during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map - of {key,value} pairs. A single - {key,value} in the matchLabels - map is equivalent to an element - of matchExpressions, whose key - field is "key", the operator is - "In", and the values array contains - only "value". The requirements - are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a - static list of namespace names that - the term applies to. The term is applied - to the union of the namespaces listed - in this field and the ones selected - by namespaceSelector. null or empty - namespaces list and null namespaceSelector - means "this pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where - co-located is defined as running on - a node whose value of the label with - key topologyKey matches that of any - node on which any of the selected - pods is running. Empty topologyKey - is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object weight: - description: weight associated with matching - the corresponding podAffinityTerm, in - the range 1-100. + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. format: int32 type: integer required: @@ -3748,41 +3887,38 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements - specified by this field are not met at scheduling - time, the pod will not be scheduled onto the - node. If the anti-affinity requirements specified - by this field cease to be met at some point - during pod execution (e.g. due to a pod label - update), the system may or may not try to eventually - evict the pod from its node. When there are - multiple elements, the lists of nodes corresponding - to each podAffinityTerm are intersected, i.e. - all terms must be satisfied. + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. items: - description: Defines a set of pods (namely those - matching the labelSelector relative to the - given namespace(s)) that this pod should be - co-located (affinity) or not co-located (anti-affinity) - with, where co-located is defined as running - on a node whose value of the label with key - matches that of any node on - which a pod of the set of pods is running + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running properties: labelSelector: - description: A label query over a set of - resources, in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -3790,60 +3926,82 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set - of namespaces that the term applies to. - The term is applied to the union of the - namespaces selected by this field and - the ones listed in the namespaces field. - null selector and null or empty namespaces - list means "this pod's namespace". An - empty selector ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -3851,80 +4009,70 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static - list of namespace names that the term - applies to. The term is applied to the - union of the namespaces listed in this - field and the ones selected by namespaceSelector. - null or empty namespaces list and null - namespaceSelector means "this pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where co-located - is defined as running on a node whose - value of the label with key topologyKey - matches that of any node on which any - of the selected pods is running. Empty - topologyKey is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object type: object clusterName: - description: The name of an existing PostgresCluster to - use as the data source for the new PostgresCluster. - Defaults to the name of the PostgresCluster being created - if not provided. + description: |- + The name of an existing PostgresCluster to use as the data source for the new PostgresCluster. + Defaults to the name of the PostgresCluster being created if not provided. type: string clusterNamespace: - description: The namespace of the cluster specified as - the data source using the clusterName field. Defaults - to the namespace of the PostgresCluster being created - if not provided. + description: |- + The namespace of the cluster specified as the data source using the clusterName field. + Defaults to the namespace of the PostgresCluster being created if not provided. type: string enabled: default: false @@ -3932,27 +4080,55 @@ spec: are enabled for this PostgresCluster. type: boolean options: - description: Command line options to include when running - the pgBackRest restore command. https://pgbackrest.org/command.html#command-restore + description: |- + Command line options to include when running the pgBackRest restore command. + https://pgbackrest.org/command.html#command-restore items: type: string type: array priorityClassName: - description: 'Priority class name for the pgBackRest restore - Job pod. Changing this value causes PostgreSQL to restart. - More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/' + description: |- + Priority class name for the pgBackRest restore Job pod. Changing this + value causes PostgreSQL to restart. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/ type: string repoName: - description: The name of the pgBackRest repo within the - source PostgresCluster that contains the backups that - should be utilized to perform a pgBackRest restore when - initializing the data source for the new PostgresCluster. + description: |- + The name of the pgBackRest repo within the source PostgresCluster that contains the backups + that should be utilized to perform a pgBackRest restore when initializing the data source + for the new PostgresCluster. pattern: ^repo[1-4] type: string resources: description: Resource requirements for the pgBackRest restore Job. properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -3960,8 +4136,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount - of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -3970,55 +4147,51 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount - of compute resources required. If Requests is omitted - for a container, it defaults to Limits if that is - explicitly specified, otherwise to an implementation-defined - value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object tolerations: - description: 'Tolerations of the pgBackRest restore Job. - More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration' + description: |- + Tolerations of the pgBackRest restore Job. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration items: - description: The pod this Toleration is attached to - tolerates any taint that matches the triple - using the matching operator . + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . properties: effect: - description: Effect indicates the taint effect to - match. Empty means match all taint effects. When - specified, allowed values are NoSchedule, PreferNoSchedule - and NoExecute. + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. type: string key: - description: Key is the taint key that the toleration - applies to. Empty means match all taint keys. - If the key is empty, operator must be Exists; - this combination means to match all values and - all keys. + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. type: string operator: - description: Operator represents a key's relationship - to the value. Valid operators are Exists and Equal. - Defaults to Equal. Exists is equivalent to wildcard - for value, so that a pod can tolerate all taints - of a particular category. + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. type: string tolerationSeconds: - description: TolerationSeconds represents the period - of time the toleration (which must be of effect - NoExecute, otherwise this field is ignored) tolerates - the taint. By default, it is not set, which means - tolerate the taint forever (do not evict). Zero - and negative values will be treated as 0 (evict - immediately) by the system. + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. format: int64 type: integer value: - description: Value is the taint value the toleration - matches to. If the operator is Exists, the value - should be empty, otherwise just a regular string. + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. type: string type: object type: array @@ -4036,6 +4209,32 @@ spec: resources: description: Resource requirements for a sidecar container properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -4043,8 +4242,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount - of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -4053,12 +4253,11 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount - of compute resources required. If Requests is - omitted for a container, it defaults to Limits - if that is explicitly specified, otherwise to - an implementation-defined value. More info: - https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object type: object @@ -4069,6 +4268,32 @@ spec: resources: description: Resource requirements for a sidecar container properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -4076,8 +4301,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount - of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -4086,12 +4312,11 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount - of compute resources required. If Requests is - omitted for a container, it defaults to Limits - if that is explicitly specified, otherwise to - an implementation-defined value. More info: - https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object type: object @@ -4099,8 +4324,17 @@ spec: required: - repos type: object - required: - - pgbackrest + snapshots: + description: VolumeSnapshot configuration + properties: + volumeSnapshotClassName: + description: Name of the VolumeSnapshotClass that should be + used by VolumeSnapshots + minLength: 1 + type: string + required: + - volumeSnapshotClassName + type: object type: object config: properties: @@ -4109,21 +4343,111 @@ spec: description: Projection that may be projected along with other supported volume types properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume root to write + the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object configMap: description: configMap information about the configMap data to project properties: items: - description: items if unspecified, each key-value pair - in the Data field of the referenced ConfigMap will - be projected into the volume as a file whose name - is the key and content is the value. If specified, - the listed keys will be projected into the specified - paths, and unlisted keys will not be present. If a - key is specified which is not present in the ConfigMap, - the volume setup will error unless it is marked optional. - Paths must be relative and may not contain the '..' - path or start with '..'. + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. items: description: Maps a string key to a path within a volume. @@ -4132,22 +4456,20 @@ spec: description: key is the key to project. type: string mode: - description: 'mode is Optional: mode bits used - to set permissions on this file. Must be an - octal value between 0000 and 0777 or a decimal - value between 0 and 511. YAML accepts both octal - and decimal values, JSON requires decimal values - for mode bits. If not specified, the volume - defaultMode will be used. This might be in conflict - with other options that affect the file mode, - like fsGroup, and the result can be other mode - bits set.' + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: path is the relative path of the - file to map the key to. May not be an absolute - path. May not contain the path element '..'. + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. May not start with the string '..'. type: string required: @@ -4155,14 +4477,22 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: description: optional specify whether the ConfigMap or its keys must be defined type: boolean type: object + x-kubernetes-map-type: atomic downwardAPI: description: downwardAPI information about the downwardAPI data to project @@ -4175,8 +4505,8 @@ spec: properties: fieldRef: description: 'Required: Selects a field of the - pod: only annotations, labels, name and namespace - are supported.' + pod: only annotations, labels, name, namespace + and uid are supported.' properties: apiVersion: description: Version of the schema the FieldPath @@ -4189,17 +4519,15 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic mode: - description: 'Optional: mode bits used to set - permissions on this file, must be an octal value - between 0000 and 0777 or a decimal value between - 0 and 511. YAML accepts both octal and decimal - values, JSON requires decimal values for mode - bits. If not specified, the volume defaultMode - will be used. This might be in conflict with - other options that affect the file mode, like - fsGroup, and the result can be other mode bits - set.' + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: @@ -4210,10 +4538,9 @@ spec: path must not start with ''..''' type: string resourceFieldRef: - description: 'Selects a resource of the container: - only resources limits and requests (limits.cpu, - limits.memory, requests.cpu and requests.memory) - are currently supported.' + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. properties: containerName: description: 'Container name: required for @@ -4233,26 +4560,26 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic required: - path type: object type: array + x-kubernetes-list-type: atomic type: object secret: description: secret information about the secret data to project properties: items: - description: items if unspecified, each key-value pair - in the Data field of the referenced Secret will be - projected into the volume as a file whose name is - the key and content is the value. If specified, the - listed keys will be projected into the specified paths, - and unlisted keys will not be present. If a key is - specified which is not present in the Secret, the - volume setup will error unless it is marked optional. - Paths must be relative and may not contain the '..' - path or start with '..'. + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. items: description: Maps a string key to a path within a volume. @@ -4261,22 +4588,20 @@ spec: description: key is the key to project. type: string mode: - description: 'mode is Optional: mode bits used - to set permissions on this file. Must be an - octal value between 0000 and 0777 or a decimal - value between 0 and 511. YAML accepts both octal - and decimal values, JSON requires decimal values - for mode bits. If not specified, the volume - defaultMode will be used. This might be in conflict - with other options that affect the file mode, - like fsGroup, and the result can be other mode - bits set.' + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: path is the relative path of the - file to map the key to. May not be an absolute - path. May not contain the path element '..'. + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. May not start with the string '..'. type: string required: @@ -4284,39 +4609,47 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: description: optional field specify whether the Secret or its key must be defined type: boolean type: object + x-kubernetes-map-type: atomic serviceAccountToken: description: serviceAccountToken is information about the serviceAccountToken data to project properties: audience: - description: audience is the intended audience of the - token. A recipient of a token must identify itself - with an identifier specified in the audience of the - token, and otherwise should reject the token. The - audience defaults to the identifier of the apiserver. + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. type: string expirationSeconds: - description: expirationSeconds is the requested duration - of validity of the service account token. As the token - approaches expiration, the kubelet volume plugin will - proactively rotate the service account token. The - kubelet will start trying to rotate the token if the - token is older than 80 percent of its time to live - or if the token is older than 24 hours.Defaults to - 1 hour and must be at least 10 minutes. + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. format: int64 type: integer path: - description: path is the path relative to the mount - point of the file to project the token into. + description: |- + path is the path relative to the mount point of the file to project the + token into. type: string required: - path @@ -4325,23 +4658,23 @@ spec: type: array type: object customReplicationTLSSecret: - description: 'The secret containing the replication client certificates - and keys for secure connections to the PostgreSQL server. It will - need to contain the client TLS certificate, TLS key and the Certificate - Authority certificate with the data keys set to tls.crt, tls.key - and ca.crt, respectively. NOTE: If CustomReplicationClientTLSSecret - is provided, CustomTLSSecret MUST be provided and the ca.crt provided - must be the same.' + description: |- + The secret containing the replication client certificates and keys for + secure connections to the PostgreSQL server. It will need to contain the + client TLS certificate, TLS key and the Certificate Authority certificate + with the data keys set to tls.crt, tls.key and ca.crt, respectively. + NOTE: If CustomReplicationClientTLSSecret is provided, CustomTLSSecret + MUST be provided and the ca.crt provided must be the same. properties: items: - description: items if unspecified, each key-value pair in the - Data field of the referenced Secret will be projected into the - volume as a file whose name is the key and content is the value. - If specified, the listed keys will be projected into the specified - paths, and unlisted keys will not be present. If a key is specified - which is not present in the Secret, the volume setup will error - unless it is marked optional. Paths must be relative and may - not contain the '..' path or start with '..'. + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. items: description: Maps a string key to a path within a volume. properties: @@ -4349,54 +4682,64 @@ spec: description: key is the key to project. type: string mode: - description: 'mode is Optional: mode bits used to set permissions - on this file. Must be an octal value between 0000 and - 0777 or a decimal value between 0 and 511. YAML accepts - both octal and decimal values, JSON requires decimal values - for mode bits. If not specified, the volume defaultMode - will be used. This might be in conflict with other options - that affect the file mode, like fsGroup, and the result - can be other mode bits set.' + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: path is the relative path of the file to map - the key to. May not be an absolute path. May not contain - the path element '..'. May not start with the string '..'. + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. type: string required: - key - path type: object type: array + x-kubernetes-list-type: atomic name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: description: optional field specify whether the Secret or its key must be defined type: boolean type: object + x-kubernetes-map-type: atomic customTLSSecret: - description: 'The secret containing the Certificates and Keys to encrypt - PostgreSQL traffic will need to contain the server TLS certificate, - TLS key and the Certificate Authority certificate with the data - keys set to tls.crt, tls.key and ca.crt, respectively. It will then - be mounted as a volume projection to the ''/pgconf/tls'' directory. - For more information on Kubernetes secret projections, please see + description: |- + The secret containing the Certificates and Keys to encrypt PostgreSQL + traffic will need to contain the server TLS certificate, TLS key and the + Certificate Authority certificate with the data keys set to tls.crt, + tls.key and ca.crt, respectively. It will then be mounted as a volume + projection to the '/pgconf/tls' directory. For more information on + Kubernetes secret projections, please see https://k8s.io/docs/concepts/configuration/secret/#projection-of-secret-keys-to-specific-paths NOTE: If CustomTLSSecret is provided, CustomReplicationClientTLSSecret - MUST be provided and the ca.crt provided must be the same.' + MUST be provided and the ca.crt provided must be the same. properties: items: - description: items if unspecified, each key-value pair in the - Data field of the referenced Secret will be projected into the - volume as a file whose name is the key and content is the value. - If specified, the listed keys will be projected into the specified - paths, and unlisted keys will not be present. If a key is specified - which is not present in the Secret, the volume setup will error - unless it is marked optional. Paths must be relative and may - not contain the '..' path or start with '..'. + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. items: description: Maps a string key to a path within a volume. properties: @@ -4404,72 +4747,78 @@ spec: description: key is the key to project. type: string mode: - description: 'mode is Optional: mode bits used to set permissions - on this file. Must be an octal value between 0000 and - 0777 or a decimal value between 0 and 511. YAML accepts - both octal and decimal values, JSON requires decimal values - for mode bits. If not specified, the volume defaultMode - will be used. This might be in conflict with other options - that affect the file mode, like fsGroup, and the result - can be other mode bits set.' + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: path is the relative path of the file to map - the key to. May not be an absolute path. May not contain - the path element '..'. May not start with the string '..'. + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. type: string required: - key - path type: object type: array + x-kubernetes-list-type: atomic name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: description: optional field specify whether the Secret or its key must be defined type: boolean type: object + x-kubernetes-map-type: atomic dataSource: description: Specifies a data source for bootstrapping the PostgreSQL cluster. properties: pgbackrest: - description: 'Defines a pgBackRest cloud-based data source that - can be used to pre-populate the the PostgreSQL data directory - for a new PostgreSQL cluster using a pgBackRest restore. The - PGBackRest field is incompatible with the PostgresCluster field: - only one data source can be used for pre-populating a new PostgreSQL - cluster' + description: |- + Defines a pgBackRest cloud-based data source that can be used to pre-populate the + PostgreSQL data directory for a new PostgreSQL cluster using a pgBackRest restore. + The PGBackRest field is incompatible with the PostgresCluster field: only one + data source can be used for pre-populating a new PostgreSQL cluster properties: affinity: - description: 'Scheduling constraints of the pgBackRest restore - Job. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node' + description: |- + Scheduling constraints of the pgBackRest restore Job. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node properties: nodeAffinity: description: Describes node affinity scheduling rules for the pod. properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule - pods to nodes that satisfy the affinity expressions - specified by this field, but it may choose a node - that violates one or more of the expressions. The - node that is most preferred is the one with the - greatest sum of weights, i.e. for each node that - meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, - etc.), compute a sum by iterating through the elements - of this field and adding "weight" to the sum if - the node matches the corresponding matchExpressions; - the node(s) with the highest sum are the most preferred. + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. items: - description: An empty preferred scheduling term - matches all objects with implicit weight 0 (i.e. - it's a no-op). A null preferred scheduling term - matches no objects (i.e. is also a no-op). + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). properties: preference: description: A node selector term, associated @@ -4479,79 +4828,72 @@ spec: description: A list of node selector requirements by node's labels. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators - are In, NotIn, Exists, DoesNotExist. - Gt, and Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string values. - If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. - If the operator is Gt or Lt, the - values array must have a single - element, which will be interpreted - as an integer. This array is replaced - during a strategic merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators - are In, NotIn, Exists, DoesNotExist. - Gt, and Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string values. - If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. - If the operator is Gt or Lt, the - values array must have a single - element, which will be interpreted - as an integer. This array is replaced - during a strategic merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic weight: description: Weight associated with matching the corresponding nodeSelectorTerm, in the @@ -4563,105 +4905,100 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified - by this field are not met at scheduling time, the - pod will not be scheduled onto the node. If the - affinity requirements specified by this field cease - to be met at some point during pod execution (e.g. - due to an update), the system may or may not try - to eventually evict the pod from its node. + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. properties: nodeSelectorTerms: description: Required. A list of node selector terms. The terms are ORed. items: - description: A null or empty node selector term - matches no objects. The requirements of them - are ANDed. The TopologySelectorTerm type implements - a subset of the NodeSelectorTerm. + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. properties: matchExpressions: description: A list of node selector requirements by node's labels. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators - are In, NotIn, Exists, DoesNotExist. - Gt, and Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string values. - If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. - If the operator is Gt or Lt, the - values array must have a single - element, which will be interpreted - as an integer. This array is replaced - during a strategic merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators - are In, NotIn, Exists, DoesNotExist. - Gt, and Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string values. - If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. - If the operator is Gt or Lt, the - values array must have a single - element, which will be interpreted - as an integer. This array is replaced - during a strategic merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object + x-kubernetes-map-type: atomic type: object podAffinity: description: Describes pod affinity scheduling rules (e.g. @@ -4669,19 +5006,16 @@ spec: other pod(s)). properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule - pods to nodes that satisfy the affinity expressions - specified by this field, but it may choose a node - that violates one or more of the expressions. The - node that is most preferred is the one with the - greatest sum of weights, i.e. for each node that - meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, - etc.), compute a sum by iterating through the elements - of this field and adding "weight" to the sum if - the node has pods which matches the corresponding - podAffinityTerm; the node(s) with the highest sum - are the most preferred. + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred @@ -4692,18 +5026,18 @@ spec: associated with the corresponding weight. properties: labelSelector: - description: A label query over a set of - resources, in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -4711,60 +5045,82 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set - of namespaces that the term applies to. - The term is applied to the union of the - namespaces selected by this field and - the ones listed in the namespaces field. - null selector and null or empty namespaces - list means "this pod's namespace". An - empty selector ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -4772,70 +5128,61 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static - list of namespace names that the term - applies to. The term is applied to the - union of the namespaces listed in this - field and the ones selected by namespaceSelector. - null or empty namespaces list and null - namespaceSelector means "this pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where co-located - is defined as running on a node whose - value of the label with key topologyKey - matches that of any node on which any - of the selected pods is running. Empty - topologyKey is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object weight: - description: weight associated with matching - the corresponding podAffinityTerm, in the - range 1-100. + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. format: int32 type: integer required: @@ -4843,161 +5190,179 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified - by this field are not met at scheduling time, the - pod will not be scheduled onto the node. If the - affinity requirements specified by this field cease - to be met at some point during pod execution (e.g. - due to a pod label update), the system may or may - not try to eventually evict the pod from its node. - When there are multiple elements, the lists of nodes - corresponding to each podAffinityTerm are intersected, - i.e. all terms must be satisfied. + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. items: - description: Defines a set of pods (namely those - matching the labelSelector relative to the given - namespace(s)) that this pod should be co-located - (affinity) or not co-located (anti-affinity) with, - where co-located is defined as running on a node - whose value of the label with key - matches that of any node on which a pod of the - set of pods is running + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running properties: labelSelector: - description: A label query over a set of resources, - in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a - key's relationship to a set of values. - Valid operators are In, NotIn, Exists - and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of - string values. If the operator is - In or NotIn, the values array must - be non-empty. If the operator is - Exists or DoesNotExist, the values - array must be empty. This array - is replaced during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set of namespaces - that the term applies to. The term is applied - to the union of the namespaces selected by - this field and the ones listed in the namespaces - field. null selector and null or empty namespaces - list means "this pod's namespace". An empty - selector ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a - key's relationship to a set of values. - Valid operators are In, NotIn, Exists - and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of - string values. If the operator is - In or NotIn, the values array must - be non-empty. If the operator is - Exists or DoesNotExist, the values - array must be empty. This array - is replaced during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static list - of namespace names that the term applies to. - The term is applied to the union of the namespaces - listed in this field and the ones selected - by namespaceSelector. null or empty namespaces - list and null namespaceSelector means "this - pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the - pods matching the labelSelector in the specified - namespaces, where co-located is defined as - running on a node whose value of the label - with key topologyKey matches that of any node - on which any of the selected pods is running. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. Empty topologyKey is not allowed. type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object podAntiAffinity: description: Describes pod anti-affinity scheduling rules @@ -5005,19 +5370,16 @@ spec: etc. as some other pod(s)). properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule - pods to nodes that satisfy the anti-affinity expressions - specified by this field, but it may choose a node - that violates one or more of the expressions. The - node that is most preferred is the one with the - greatest sum of weights, i.e. for each node that - meets all of the scheduling requirements (resource - request, requiredDuringScheduling anti-affinity - expressions, etc.), compute a sum by iterating through - the elements of this field and adding "weight" to - the sum if the node has pods which matches the corresponding - podAffinityTerm; the node(s) with the highest sum - are the most preferred. + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred @@ -5028,18 +5390,18 @@ spec: associated with the corresponding weight. properties: labelSelector: - description: A label query over a set of - resources, in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -5047,60 +5409,82 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object - namespaceSelector: - description: A label query over the set - of namespaces that the term applies to. - The term is applied to the union of the - namespaces selected by this field and - the ones listed in the namespaces field. - null selector and null or empty namespaces - list means "this pod's namespace". An - empty selector ({}) matches all namespaces. - properties: + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -5108,70 +5492,61 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static - list of namespace names that the term - applies to. The term is applied to the - union of the namespaces listed in this - field and the ones selected by namespaceSelector. - null or empty namespaces list and null - namespaceSelector means "this pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where co-located - is defined as running on a node whose - value of the label with key topologyKey - matches that of any node on which any - of the selected pods is running. Empty - topologyKey is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object weight: - description: weight associated with matching - the corresponding podAffinityTerm, in the - range 1-100. + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. format: int32 type: integer required: @@ -5179,188 +5554,297 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements specified - by this field are not met at scheduling time, the - pod will not be scheduled onto the node. If the - anti-affinity requirements specified by this field - cease to be met at some point during pod execution - (e.g. due to a pod label update), the system may - or may not try to eventually evict the pod from - its node. When there are multiple elements, the - lists of nodes corresponding to each podAffinityTerm - are intersected, i.e. all terms must be satisfied. + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. items: - description: Defines a set of pods (namely those - matching the labelSelector relative to the given - namespace(s)) that this pod should be co-located - (affinity) or not co-located (anti-affinity) with, - where co-located is defined as running on a node - whose value of the label with key - matches that of any node on which a pod of the - set of pods is running + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running properties: labelSelector: - description: A label query over a set of resources, - in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a - key's relationship to a set of values. - Valid operators are In, NotIn, Exists - and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of - string values. If the operator is - In or NotIn, the values array must - be non-empty. If the operator is - Exists or DoesNotExist, the values - array must be empty. This array - is replaced during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set of namespaces - that the term applies to. The term is applied - to the union of the namespaces selected by - this field and the ones listed in the namespaces - field. null selector and null or empty namespaces - list means "this pod's namespace". An empty - selector ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a - key's relationship to a set of values. - Valid operators are In, NotIn, Exists - and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of - string values. If the operator is - In or NotIn, the values array must - be non-empty. If the operator is - Exists or DoesNotExist, the values - array must be empty. This array - is replaced during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static list - of namespace names that the term applies to. - The term is applied to the union of the namespaces - listed in this field and the ones selected - by namespaceSelector. null or empty namespaces - list and null namespaceSelector means "this - pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the - pods matching the labelSelector in the specified - namespaces, where co-located is defined as - running on a node whose value of the label - with key topologyKey matches that of any node - on which any of the selected pods is running. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. Empty topologyKey is not allowed. type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object type: object configuration: - description: 'Projected volumes containing custom pgBackRest - configuration. These files are mounted under "/etc/pgbackrest/conf.d" - alongside any pgBackRest configuration generated by the - PostgreSQL Operator: https://pgbackrest.org/configuration.html' + description: |- + Projected volumes containing custom pgBackRest configuration. These files are mounted + under "/etc/pgbackrest/conf.d" alongside any pgBackRest configuration generated by the + PostgreSQL Operator: + https://pgbackrest.org/configuration.html items: description: Projection that may be projected along with other supported volume types properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume root + to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object configMap: description: configMap information about the configMap data to project properties: items: - description: items if unspecified, each key-value - pair in the Data field of the referenced ConfigMap - will be projected into the volume as a file whose - name is the key and content is the value. If specified, - the listed keys will be projected into the specified - paths, and unlisted keys will not be present. - If a key is specified which is not present in - the ConfigMap, the volume setup will error unless - it is marked optional. Paths must be relative - and may not contain the '..' path or start with - '..'. + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. items: description: Maps a string key to a path within a volume. @@ -5369,39 +5853,43 @@ spec: description: key is the key to project. type: string mode: - description: 'mode is Optional: mode bits - used to set permissions on this file. Must - be an octal value between 0000 and 0777 - or a decimal value between 0 and 511. YAML - accepts both octal and decimal values, JSON - requires decimal values for mode bits. If - not specified, the volume defaultMode will - be used. This might be in conflict with - other options that affect the file mode, - like fsGroup, and the result can be other - mode bits set.' + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: path is the relative path of - the file to map the key to. May not be an - absolute path. May not contain the path - element '..'. May not start with the string - '..'. + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. type: string required: - key - path type: object type: array + x-kubernetes-list-type: atomic name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: description: optional specify whether the ConfigMap or its keys must be defined type: boolean type: object + x-kubernetes-map-type: atomic downwardAPI: description: downwardAPI information about the downwardAPI data to project @@ -5416,8 +5904,8 @@ spec: properties: fieldRef: description: 'Required: Selects a field of - the pod: only annotations, labels, name - and namespace are supported.' + the pod: only annotations, labels, name, + namespace and uid are supported.' properties: apiVersion: description: Version of the schema the @@ -5431,17 +5919,15 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic mode: - description: 'Optional: mode bits used to - set permissions on this file, must be an - octal value between 0000 and 0777 or a decimal - value between 0 and 511. YAML accepts both - octal and decimal values, JSON requires - decimal values for mode bits. If not specified, - the volume defaultMode will be used. This - might be in conflict with other options - that affect the file mode, like fsGroup, - and the result can be other mode bits set.' + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: @@ -5452,10 +5938,9 @@ spec: the relative path must not start with ''..''' type: string resourceFieldRef: - description: 'Selects a resource of the container: - only resources limits and requests (limits.cpu, - limits.memory, requests.cpu and requests.memory) - are currently supported.' + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. properties: containerName: description: 'Container name: required @@ -5476,27 +5961,26 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic required: - path type: object type: array + x-kubernetes-list-type: atomic type: object secret: description: secret information about the secret data to project properties: items: - description: items if unspecified, each key-value - pair in the Data field of the referenced Secret - will be projected into the volume as a file whose - name is the key and content is the value. If specified, - the listed keys will be projected into the specified - paths, and unlisted keys will not be present. - If a key is specified which is not present in - the Secret, the volume setup will error unless - it is marked optional. Paths must be relative - and may not contain the '..' path or start with - '..'. + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. items: description: Maps a string key to a path within a volume. @@ -5505,66 +5989,68 @@ spec: description: key is the key to project. type: string mode: - description: 'mode is Optional: mode bits - used to set permissions on this file. Must - be an octal value between 0000 and 0777 - or a decimal value between 0 and 511. YAML - accepts both octal and decimal values, JSON - requires decimal values for mode bits. If - not specified, the volume defaultMode will - be used. This might be in conflict with - other options that affect the file mode, - like fsGroup, and the result can be other - mode bits set.' + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: path is the relative path of - the file to map the key to. May not be an - absolute path. May not contain the path - element '..'. May not start with the string - '..'. + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. type: string required: - key - path type: object type: array + x-kubernetes-list-type: atomic name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: description: optional field specify whether the Secret or its key must be defined type: boolean type: object + x-kubernetes-map-type: atomic serviceAccountToken: description: serviceAccountToken is information about the serviceAccountToken data to project properties: audience: - description: audience is the intended audience of - the token. A recipient of a token must identify - itself with an identifier specified in the audience - of the token, and otherwise should reject the - token. The audience defaults to the identifier - of the apiserver. + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. type: string expirationSeconds: - description: expirationSeconds is the requested - duration of validity of the service account token. - As the token approaches expiration, the kubelet - volume plugin will proactively rotate the service - account token. The kubelet will start trying to - rotate the token if the token is older than 80 - percent of its time to live or if the token is - older than 24 hours.Defaults to 1 hour and must - be at least 10 minutes. + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. format: int64 type: integer path: - description: path is the path relative to the mount - point of the file to project the token into. + description: |- + path is the path relative to the mount point of the file to project the + token into. type: string required: - path @@ -5574,21 +6060,24 @@ spec: global: additionalProperties: type: string - description: 'Global pgBackRest configuration settings. These - settings are included in the "global" section of the pgBackRest - configuration generated by the PostgreSQL Operator, and - then mounted under "/etc/pgbackrest/conf.d": https://pgbackrest.org/configuration.html' + description: |- + Global pgBackRest configuration settings. These settings are included in the "global" + section of the pgBackRest configuration generated by the PostgreSQL Operator, and then + mounted under "/etc/pgbackrest/conf.d": + https://pgbackrest.org/configuration.html type: object options: - description: Command line options to include when running - the pgBackRest restore command. https://pgbackrest.org/command.html#command-restore + description: |- + Command line options to include when running the pgBackRest restore command. + https://pgbackrest.org/command.html#command-restore items: type: string type: array priorityClassName: - description: 'Priority class name for the pgBackRest restore - Job pod. Changing this value causes PostgreSQL to restart. - More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/' + description: |- + Priority class name for the pgBackRest restore Job pod. Changing this + value causes PostgreSQL to restart. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/ type: string repo: description: Defines a pgBackRest repository @@ -5615,12 +6104,13 @@ spec: - bucket type: object name: - description: The name of the the repository + description: The name of the repository pattern: ^repo[1-4] type: string s3: - description: RepoS3 represents a pgBackRest repository - that is created using AWS S3 (or S3-compatible) storage + description: |- + RepoS3 represents a pgBackRest repository that is created using AWS S3 (or S3-compatible) + storage properties: bucket: description: The S3 bucket utilized for the repository @@ -5638,26 +6128,30 @@ spec: - region type: object schedules: - description: 'Defines the schedules for the pgBackRest - backups Full, Differential and Incremental backup types - are supported: https://pgbackrest.org/user-guide.html#concept/backup' + description: |- + Defines the schedules for the pgBackRest backups + Full, Differential and Incremental backup types are supported: + https://pgbackrest.org/user-guide.html#concept/backup properties: differential: - description: 'Defines the Cron schedule for a differential - pgBackRest backup. Follows the standard Cron schedule - syntax: https://k8s.io/docs/concepts/workloads/controllers/cron-jobs/#cron-schedule-syntax' + description: |- + Defines the Cron schedule for a differential pgBackRest backup. + Follows the standard Cron schedule syntax: + https://k8s.io/docs/concepts/workloads/controllers/cron-jobs/#cron-schedule-syntax minLength: 6 type: string full: - description: 'Defines the Cron schedule for a full - pgBackRest backup. Follows the standard Cron schedule - syntax: https://k8s.io/docs/concepts/workloads/controllers/cron-jobs/#cron-schedule-syntax' + description: |- + Defines the Cron schedule for a full pgBackRest backup. + Follows the standard Cron schedule syntax: + https://k8s.io/docs/concepts/workloads/controllers/cron-jobs/#cron-schedule-syntax minLength: 6 type: string incremental: - description: 'Defines the Cron schedule for an incremental - pgBackRest backup. Follows the standard Cron schedule - syntax: https://k8s.io/docs/concepts/workloads/controllers/cron-jobs/#cron-schedule-syntax' + description: |- + Defines the Cron schedule for an incremental pgBackRest backup. + Follows the standard Cron schedule syntax: + https://k8s.io/docs/concepts/workloads/controllers/cron-jobs/#cron-schedule-syntax minLength: 6 type: string type: object @@ -5670,30 +6164,29 @@ spec: used to create and/or bind a volume properties: accessModes: - description: 'accessModes contains the desired - access modes the volume should have. More info: - https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 items: type: string type: array + x-kubernetes-list-type: atomic dataSource: - description: 'dataSource field can be used to - specify either: * An existing VolumeSnapshot - object (snapshot.storage.k8s.io/VolumeSnapshot) - * An existing PVC (PersistentVolumeClaim) If - the provisioner or an external controller can - support the specified data source, it will create - a new volume based on the contents of the specified - data source. If the AnyVolumeDataSource feature - gate is enabled, this field will always have - the same contents as the DataSourceRef field.' + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. properties: apiGroup: - description: APIGroup is the group for the - resource being referenced. If APIGroup is - not specified, the specified Kind must be - in the core API group. For any other third-party - types, APIGroup is required. + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. type: string kind: description: Kind is the type of resource @@ -5707,39 +6200,38 @@ spec: - kind - name type: object + x-kubernetes-map-type: atomic dataSourceRef: - description: 'dataSourceRef specifies the object - from which to populate the volume with data, - if a non-empty volume is desired. This may be - any local object from a non-empty API group - (non core object) or a PersistentVolumeClaim - object. When this field is specified, volume - binding will only succeed if the type of the - specified object matches some installed volume - populator or dynamic provisioner. This field - will replace the functionality of the DataSource - field and as such if both fields are non-empty, - they must have the same value. For backwards - compatibility, both fields (DataSource and DataSourceRef) - will be set to the same value automatically - if one of them is empty and the other is non-empty. - There are two important differences between - DataSource and DataSourceRef: * While DataSource - only allows two specific types of objects, DataSourceRef - allows any non-core object, as well as PersistentVolumeClaim - objects. * While DataSource ignores disallowed - values (dropping them), DataSourceRef preserves - all values, and generates an error if a disallowed - value is specified. (Beta) Using this field - requires the AnyVolumeDataSource feature gate - to be enabled.' + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. properties: apiGroup: - description: APIGroup is the group for the - resource being referenced. If APIGroup is - not specified, the specified Kind must be - in the core API group. For any other third-party - types, APIGroup is required. + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. type: string kind: description: Kind is the type of resource @@ -5749,18 +6241,23 @@ spec: description: Name is the name of resource being referenced type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string required: - kind - name type: object resources: - description: 'resources represents the minimum - resources the volume should have. If RecoverVolumeExpansionFailure - feature is enabled users are allowed to specify - resource requirements that are lower than previous - value but must still be higher than capacity - recorded in the status field of the claim. More - info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources properties: limits: additionalProperties: @@ -5769,9 +6266,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum - amount of compute resources allowed. More - info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -5780,12 +6277,11 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum - amount of compute resources required. If - Requests is omitted for a container, it - defaults to Limits if that is explicitly - specified, otherwise to an implementation-defined - value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object selector: @@ -5797,63 +6293,82 @@ spec: label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, a - key, and an operator that relates the - key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and - DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. This - array is replaced during a strategic + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is - "In", and the values array contains only - "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic storageClassName: - description: 'storageClassName is the name of - the StorageClass required by the claim. More - info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. type: string volumeMode: - description: volumeMode defines what type of volume - is required by the claim. Value of Filesystem - is implied when not included in claim spec. + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. type: string volumeName: description: volumeName is the binding reference to the PersistentVolume backing this claim. type: string type: object + x-kubernetes-validations: + - message: missing accessModes + rule: has(self.accessModes) && size(self.accessModes) + > 0 + - message: missing storage request + rule: has(self.resources) && has(self.resources.requests) + && has(self.resources.requests.storage) required: - volumeClaimSpec type: object @@ -5864,6 +6379,31 @@ spec: description: Resource requirements for the pgBackRest restore Job. properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -5871,8 +6411,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -5881,59 +6422,57 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of - compute resources required. If Requests is omitted for - a container, it defaults to Limits if that is explicitly - specified, otherwise to an implementation-defined value. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object stanza: default: db - description: The name of an existing pgBackRest stanza to - use as the data source for the new PostgresCluster. Defaults - to `db` if not provided. + description: |- + The name of an existing pgBackRest stanza to use as the data source for the new PostgresCluster. + Defaults to `db` if not provided. type: string tolerations: - description: 'Tolerations of the pgBackRest restore Job. More - info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration' + description: |- + Tolerations of the pgBackRest restore Job. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration items: - description: The pod this Toleration is attached to tolerates - any taint that matches the triple using - the matching operator . + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . properties: effect: - description: Effect indicates the taint effect to match. - Empty means match all taint effects. When specified, - allowed values are NoSchedule, PreferNoSchedule and - NoExecute. + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. type: string key: - description: Key is the taint key that the toleration - applies to. Empty means match all taint keys. If the - key is empty, operator must be Exists; this combination - means to match all values and all keys. + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. type: string operator: - description: Operator represents a key's relationship - to the value. Valid operators are Exists and Equal. - Defaults to Equal. Exists is equivalent to wildcard - for value, so that a pod can tolerate all taints of - a particular category. + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. type: string tolerationSeconds: - description: TolerationSeconds represents the period - of time the toleration (which must be of effect NoExecute, - otherwise this field is ignored) tolerates the taint. - By default, it is not set, which means tolerate the - taint forever (do not evict). Zero and negative values - will be treated as 0 (evict immediately) by the system. + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. format: int64 type: integer value: - description: Value is the taint value the toleration - matches to. If the operator is Exists, the value should - be empty, otherwise just a regular string. + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. type: string type: object type: array @@ -5942,38 +6481,36 @@ spec: - stanza type: object postgresCluster: - description: 'Defines a pgBackRest data source that can be used - to pre-populate the PostgreSQL data directory for a new PostgreSQL - cluster using a pgBackRest restore. The PGBackRest field is - incompatible with the PostgresCluster field: only one data source - can be used for pre-populating a new PostgreSQL cluster' + description: |- + Defines a pgBackRest data source that can be used to pre-populate the PostgreSQL data + directory for a new PostgreSQL cluster using a pgBackRest restore. + The PGBackRest field is incompatible with the PostgresCluster field: only one + data source can be used for pre-populating a new PostgreSQL cluster properties: affinity: - description: 'Scheduling constraints of the pgBackRest restore - Job. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node' + description: |- + Scheduling constraints of the pgBackRest restore Job. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node properties: nodeAffinity: description: Describes node affinity scheduling rules for the pod. properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule - pods to nodes that satisfy the affinity expressions - specified by this field, but it may choose a node - that violates one or more of the expressions. The - node that is most preferred is the one with the - greatest sum of weights, i.e. for each node that - meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, - etc.), compute a sum by iterating through the elements - of this field and adding "weight" to the sum if - the node matches the corresponding matchExpressions; - the node(s) with the highest sum are the most preferred. + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. items: - description: An empty preferred scheduling term - matches all objects with implicit weight 0 (i.e. - it's a no-op). A null preferred scheduling term - matches no objects (i.e. is also a no-op). + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). properties: preference: description: A node selector term, associated @@ -5983,79 +6520,72 @@ spec: description: A list of node selector requirements by node's labels. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators - are In, NotIn, Exists, DoesNotExist. - Gt, and Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string values. - If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. - If the operator is Gt or Lt, the - values array must have a single - element, which will be interpreted - as an integer. This array is replaced - during a strategic merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators - are In, NotIn, Exists, DoesNotExist. - Gt, and Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string values. - If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. - If the operator is Gt or Lt, the - values array must have a single - element, which will be interpreted - as an integer. This array is replaced - during a strategic merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic weight: description: Weight associated with matching the corresponding nodeSelectorTerm, in the @@ -6067,105 +6597,100 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified - by this field are not met at scheduling time, the - pod will not be scheduled onto the node. If the - affinity requirements specified by this field cease - to be met at some point during pod execution (e.g. - due to an update), the system may or may not try - to eventually evict the pod from its node. + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. properties: nodeSelectorTerms: description: Required. A list of node selector terms. The terms are ORed. items: - description: A null or empty node selector term - matches no objects. The requirements of them - are ANDed. The TopologySelectorTerm type implements - a subset of the NodeSelectorTerm. + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. properties: matchExpressions: description: A list of node selector requirements by node's labels. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators - are In, NotIn, Exists, DoesNotExist. - Gt, and Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string values. - If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. - If the operator is Gt or Lt, the - values array must have a single - element, which will be interpreted - as an integer. This array is replaced - during a strategic merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators - are In, NotIn, Exists, DoesNotExist. - Gt, and Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string values. - If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. - If the operator is Gt or Lt, the - values array must have a single - element, which will be interpreted - as an integer. This array is replaced - during a strategic merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object + x-kubernetes-map-type: atomic type: object podAffinity: description: Describes pod affinity scheduling rules (e.g. @@ -6173,19 +6698,16 @@ spec: other pod(s)). properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule - pods to nodes that satisfy the affinity expressions - specified by this field, but it may choose a node - that violates one or more of the expressions. The - node that is most preferred is the one with the - greatest sum of weights, i.e. for each node that - meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, - etc.), compute a sum by iterating through the elements - of this field and adding "weight" to the sum if - the node has pods which matches the corresponding - podAffinityTerm; the node(s) with the highest sum - are the most preferred. + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred @@ -6196,18 +6718,18 @@ spec: associated with the corresponding weight. properties: labelSelector: - description: A label query over a set of - resources, in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -6215,60 +6737,82 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set - of namespaces that the term applies to. - The term is applied to the union of the - namespaces selected by this field and - the ones listed in the namespaces field. - null selector and null or empty namespaces - list means "this pod's namespace". An - empty selector ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -6276,70 +6820,61 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static - list of namespace names that the term - applies to. The term is applied to the - union of the namespaces listed in this - field and the ones selected by namespaceSelector. - null or empty namespaces list and null - namespaceSelector means "this pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where co-located - is defined as running on a node whose - value of the label with key topologyKey - matches that of any node on which any - of the selected pods is running. Empty - topologyKey is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object weight: - description: weight associated with matching - the corresponding podAffinityTerm, in the - range 1-100. + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. format: int32 type: integer required: @@ -6347,161 +6882,179 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified - by this field are not met at scheduling time, the - pod will not be scheduled onto the node. If the - affinity requirements specified by this field cease - to be met at some point during pod execution (e.g. - due to a pod label update), the system may or may - not try to eventually evict the pod from its node. - When there are multiple elements, the lists of nodes - corresponding to each podAffinityTerm are intersected, - i.e. all terms must be satisfied. + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. items: - description: Defines a set of pods (namely those - matching the labelSelector relative to the given - namespace(s)) that this pod should be co-located - (affinity) or not co-located (anti-affinity) with, - where co-located is defined as running on a node - whose value of the label with key - matches that of any node on which a pod of the - set of pods is running + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running properties: labelSelector: - description: A label query over a set of resources, - in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a - key's relationship to a set of values. - Valid operators are In, NotIn, Exists - and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of - string values. If the operator is - In or NotIn, the values array must - be non-empty. If the operator is - Exists or DoesNotExist, the values - array must be empty. This array - is replaced during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set of namespaces - that the term applies to. The term is applied - to the union of the namespaces selected by - this field and the ones listed in the namespaces - field. null selector and null or empty namespaces - list means "this pod's namespace". An empty - selector ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a - key's relationship to a set of values. - Valid operators are In, NotIn, Exists - and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of - string values. If the operator is - In or NotIn, the values array must - be non-empty. If the operator is - Exists or DoesNotExist, the values - array must be empty. This array - is replaced during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static list - of namespace names that the term applies to. - The term is applied to the union of the namespaces - listed in this field and the ones selected - by namespaceSelector. null or empty namespaces - list and null namespaceSelector means "this - pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the - pods matching the labelSelector in the specified - namespaces, where co-located is defined as - running on a node whose value of the label - with key topologyKey matches that of any node - on which any of the selected pods is running. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. Empty topologyKey is not allowed. type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object podAntiAffinity: description: Describes pod anti-affinity scheduling rules @@ -6509,19 +7062,16 @@ spec: etc. as some other pod(s)). properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule - pods to nodes that satisfy the anti-affinity expressions - specified by this field, but it may choose a node - that violates one or more of the expressions. The - node that is most preferred is the one with the - greatest sum of weights, i.e. for each node that - meets all of the scheduling requirements (resource - request, requiredDuringScheduling anti-affinity - expressions, etc.), compute a sum by iterating through - the elements of this field and adding "weight" to - the sum if the node has pods which matches the corresponding - podAffinityTerm; the node(s) with the highest sum - are the most preferred. + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred @@ -6532,18 +7082,18 @@ spec: associated with the corresponding weight. properties: labelSelector: - description: A label query over a set of - resources, in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -6551,60 +7101,82 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set - of namespaces that the term applies to. - The term is applied to the union of the - namespaces selected by this field and - the ones listed in the namespaces field. - null selector and null or empty namespaces - list means "this pod's namespace". An - empty selector ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -6612,70 +7184,61 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static - list of namespace names that the term - applies to. The term is applied to the - union of the namespaces listed in this - field and the ones selected by namespaceSelector. - null or empty namespaces list and null - namespaceSelector means "this pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where co-located - is defined as running on a node whose - value of the label with key topologyKey - matches that of any node on which any - of the selected pods is running. Empty - topologyKey is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object weight: - description: weight associated with matching - the corresponding podAffinityTerm, in the - range 1-100. + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. format: int32 type: integer required: @@ -6683,196 +7246,240 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements specified - by this field are not met at scheduling time, the - pod will not be scheduled onto the node. If the - anti-affinity requirements specified by this field - cease to be met at some point during pod execution - (e.g. due to a pod label update), the system may - or may not try to eventually evict the pod from - its node. When there are multiple elements, the - lists of nodes corresponding to each podAffinityTerm - are intersected, i.e. all terms must be satisfied. + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. items: - description: Defines a set of pods (namely those - matching the labelSelector relative to the given - namespace(s)) that this pod should be co-located - (affinity) or not co-located (anti-affinity) with, - where co-located is defined as running on a node - whose value of the label with key - matches that of any node on which a pod of the - set of pods is running + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running properties: labelSelector: - description: A label query over a set of resources, - in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a - key's relationship to a set of values. - Valid operators are In, NotIn, Exists - and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of - string values. If the operator is - In or NotIn, the values array must - be non-empty. If the operator is - Exists or DoesNotExist, the values - array must be empty. This array - is replaced during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set of namespaces - that the term applies to. The term is applied - to the union of the namespaces selected by - this field and the ones listed in the namespaces - field. null selector and null or empty namespaces - list means "this pod's namespace". An empty - selector ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a - key's relationship to a set of values. - Valid operators are In, NotIn, Exists - and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of - string values. If the operator is - In or NotIn, the values array must - be non-empty. If the operator is - Exists or DoesNotExist, the values - array must be empty. This array - is replaced during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static list - of namespace names that the term applies to. - The term is applied to the union of the namespaces - listed in this field and the ones selected - by namespaceSelector. null or empty namespaces - list and null namespaceSelector means "this - pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the - pods matching the labelSelector in the specified - namespaces, where co-located is defined as - running on a node whose value of the label - with key topologyKey matches that of any node - on which any of the selected pods is running. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. Empty topologyKey is not allowed. type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object type: object clusterName: - description: The name of an existing PostgresCluster to use - as the data source for the new PostgresCluster. Defaults - to the name of the PostgresCluster being created if not - provided. + description: |- + The name of an existing PostgresCluster to use as the data source for the new PostgresCluster. + Defaults to the name of the PostgresCluster being created if not provided. type: string clusterNamespace: - description: The namespace of the cluster specified as the - data source using the clusterName field. Defaults to the - namespace of the PostgresCluster being created if not provided. + description: |- + The namespace of the cluster specified as the data source using the clusterName field. + Defaults to the namespace of the PostgresCluster being created if not provided. type: string options: - description: Command line options to include when running - the pgBackRest restore command. https://pgbackrest.org/command.html#command-restore + description: |- + Command line options to include when running the pgBackRest restore command. + https://pgbackrest.org/command.html#command-restore items: type: string type: array priorityClassName: - description: 'Priority class name for the pgBackRest restore - Job pod. Changing this value causes PostgreSQL to restart. - More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/' + description: |- + Priority class name for the pgBackRest restore Job pod. Changing this + value causes PostgreSQL to restart. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/ type: string repoName: - description: The name of the pgBackRest repo within the source - PostgresCluster that contains the backups that should be - utilized to perform a pgBackRest restore when initializing - the data source for the new PostgresCluster. + description: |- + The name of the pgBackRest repo within the source PostgresCluster that contains the backups + that should be utilized to perform a pgBackRest restore when initializing the data source + for the new PostgresCluster. pattern: ^repo[1-4] type: string resources: description: Resource requirements for the pgBackRest restore Job. properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -6880,8 +7487,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -6890,53 +7498,51 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of - compute resources required. If Requests is omitted for - a container, it defaults to Limits if that is explicitly - specified, otherwise to an implementation-defined value. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object tolerations: - description: 'Tolerations of the pgBackRest restore Job. More - info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration' + description: |- + Tolerations of the pgBackRest restore Job. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration items: - description: The pod this Toleration is attached to tolerates - any taint that matches the triple using - the matching operator . + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . properties: effect: - description: Effect indicates the taint effect to match. - Empty means match all taint effects. When specified, - allowed values are NoSchedule, PreferNoSchedule and - NoExecute. + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. type: string key: - description: Key is the taint key that the toleration - applies to. Empty means match all taint keys. If the - key is empty, operator must be Exists; this combination - means to match all values and all keys. + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. type: string operator: - description: Operator represents a key's relationship - to the value. Valid operators are Exists and Equal. - Defaults to Equal. Exists is equivalent to wildcard - for value, so that a pod can tolerate all taints of - a particular category. + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. type: string tolerationSeconds: - description: TolerationSeconds represents the period - of time the toleration (which must be of effect NoExecute, - otherwise this field is ignored) tolerates the taint. - By default, it is not set, which means tolerate the - taint forever (do not evict). Zero and negative values - will be treated as 0 (evict immediately) by the system. + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. format: int64 type: integer value: - description: Value is the taint value the toleration - matches to. If the operator is Exists, the value should - be empty, otherwise just a regular string. + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. type: string type: object type: array @@ -6947,12 +7553,14 @@ spec: description: Defines any existing volumes to reuse for this PostgresCluster. properties: pgBackRestVolume: - description: Defines the existing pgBackRest repo volume and - directory to use in the current PostgresCluster. + description: |- + Defines the existing pgBackRest repo volume and directory to use in the + current PostgresCluster. properties: directory: - description: The existing directory. When not set, a move - Job is not created for the associated volume. + description: |- + The existing directory. When not set, a move Job is not created for the + associated volume. type: string pvcName: description: The existing PVC name. @@ -6961,12 +7569,14 @@ spec: - pvcName type: object pgDataVolume: - description: Defines the existing pgData volume and directory - to use in the current PostgresCluster. + description: |- + Defines the existing pgData volume and directory to use in the current + PostgresCluster. properties: directory: - description: The existing directory. When not set, a move - Job is not created for the associated volume. + description: |- + The existing directory. When not set, a move Job is not created for the + associated volume. type: string pvcName: description: The existing PVC name. @@ -6975,13 +7585,15 @@ spec: - pvcName type: object pgWALVolume: - description: Defines the existing pg_wal volume and directory - to use in the current PostgresCluster. Note that a defined - pg_wal volume MUST be accompanied by a pgData volume. + description: |- + Defines the existing pg_wal volume and directory to use in the current + PostgresCluster. Note that a defined pg_wal volume MUST be accompanied by + a pgData volume. properties: directory: - description: The existing directory. When not set, a move - Job is not created for the associated volume. + description: |- + The existing directory. When not set, a move Job is not created for the + associated volume. type: string pvcName: description: The existing PVC name. @@ -6992,9 +7604,10 @@ spec: type: object type: object databaseInitSQL: - description: DatabaseInitSQL defines a ConfigMap containing custom - SQL that will be run after the cluster is initialized. This ConfigMap - must be in the same namespace as the cluster. + description: |- + DatabaseInitSQL defines a ConfigMap containing custom SQL that will + be run after the cluster is initialized. This ConfigMap must be in the same + namespace as the cluster. properties: key: description: Key is the ConfigMap data key that points to a SQL @@ -7008,69 +7621,84 @@ spec: - name type: object disableDefaultPodScheduling: - description: Whether or not the PostgreSQL cluster should use the - defined default scheduling constraints. If the field is unset or - false, the default scheduling constraints will be used in addition - to any custom constraints provided. + description: |- + Whether or not the PostgreSQL cluster should use the defined default + scheduling constraints. If the field is unset or false, the default + scheduling constraints will be used in addition to any custom constraints + provided. type: boolean image: - description: The image name to use for PostgreSQL containers. When - omitted, the value comes from an operator environment variable. - For standard PostgreSQL images, the format is RELATED_IMAGE_POSTGRES_{postgresVersion}, + description: |- + The image name to use for PostgreSQL containers. When omitted, the value + comes from an operator environment variable. For standard PostgreSQL images, + the format is RELATED_IMAGE_POSTGRES_{postgresVersion}, e.g. RELATED_IMAGE_POSTGRES_13. For PostGIS enabled PostgreSQL images, the format is RELATED_IMAGE_POSTGRES_{postgresVersion}_GIS_{postGISVersion}, e.g. RELATED_IMAGE_POSTGRES_13_GIS_3.1. type: string imagePullPolicy: - description: 'ImagePullPolicy is used to determine when Kubernetes - will attempt to pull (download) container images. More info: https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy' + description: |- + ImagePullPolicy is used to determine when Kubernetes will attempt to + pull (download) container images. + More info: https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy enum: - Always - Never - IfNotPresent type: string imagePullSecrets: - description: The image pull secrets used to pull from a private registry - Changing this value causes all running pods to restart. https://k8s.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + description: |- + The image pull secrets used to pull from a private registry + Changing this value causes all running pods to restart. + https://k8s.io/docs/tasks/configure-pod-container/pull-image-private-registry/ items: - description: LocalObjectReference contains enough information to - let you locate the referenced object inside the same namespace. + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. properties: name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string type: object + x-kubernetes-map-type: atomic type: array instances: - description: Specifies one or more sets of PostgreSQL pods that replicate - data for this cluster. + description: |- + Specifies one or more sets of PostgreSQL pods that replicate data for + this cluster. items: properties: affinity: - description: 'Scheduling constraints of a PostgreSQL pod. Changing - this value causes PostgreSQL to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node' + description: |- + Scheduling constraints of a PostgreSQL pod. Changing this value causes + PostgreSQL to restart. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node properties: nodeAffinity: description: Describes node affinity scheduling rules for the pod. properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods - to nodes that satisfy the affinity expressions specified - by this field, but it may choose a node that violates - one or more of the expressions. The node that is most - preferred is the one with the greatest sum of weights, - i.e. for each node that meets all of the scheduling - requirements (resource request, requiredDuringScheduling - affinity expressions, etc.), compute a sum by iterating - through the elements of this field and adding "weight" - to the sum if the node matches the corresponding matchExpressions; - the node(s) with the highest sum are the most preferred. + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. items: - description: An empty preferred scheduling term matches - all objects with implicit weight 0 (i.e. it's a - no-op). A null preferred scheduling term matches - no objects (i.e. is also a no-op). + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). properties: preference: description: A node selector term, associated @@ -7080,79 +7708,72 @@ spec: description: A list of node selector requirements by node's labels. items: - description: A node selector requirement - is a selector that contains values, a - key, and an operator that relates the - key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators - are In, NotIn, Exists, DoesNotExist. - Gt, and Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string values. - If the operator is In or NotIn, the - values array must be non-empty. If - the operator is Exists or DoesNotExist, - the values array must be empty. If - the operator is Gt or Lt, the values - array must have a single element, - which will be interpreted as an integer. - This array is replaced during a strategic - merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. items: - description: A node selector requirement - is a selector that contains values, a - key, and an operator that relates the - key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators - are In, NotIn, Exists, DoesNotExist. - Gt, and Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string values. - If the operator is In or NotIn, the - values array must be non-empty. If - the operator is Exists or DoesNotExist, - the values array must be empty. If - the operator is Gt or Lt, the values - array must have a single element, - which will be interpreted as an integer. - This array is replaced during a strategic - merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic weight: description: Weight associated with matching the corresponding nodeSelectorTerm, in the range @@ -7164,105 +7785,100 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified - by this field are not met at scheduling time, the - pod will not be scheduled onto the node. If the affinity - requirements specified by this field cease to be met - at some point during pod execution (e.g. due to an - update), the system may or may not try to eventually - evict the pod from its node. + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. properties: nodeSelectorTerms: description: Required. A list of node selector terms. The terms are ORed. items: - description: A null or empty node selector term - matches no objects. The requirements of them - are ANDed. The TopologySelectorTerm type implements - a subset of the NodeSelectorTerm. + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. properties: matchExpressions: description: A list of node selector requirements by node's labels. items: - description: A node selector requirement - is a selector that contains values, a - key, and an operator that relates the - key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators - are In, NotIn, Exists, DoesNotExist. - Gt, and Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string values. - If the operator is In or NotIn, the - values array must be non-empty. If - the operator is Exists or DoesNotExist, - the values array must be empty. If - the operator is Gt or Lt, the values - array must have a single element, - which will be interpreted as an integer. - This array is replaced during a strategic - merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. items: - description: A node selector requirement - is a selector that contains values, a - key, and an operator that relates the - key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators - are In, NotIn, Exists, DoesNotExist. - Gt, and Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string values. - If the operator is In or NotIn, the - values array must be non-empty. If - the operator is Exists or DoesNotExist, - the values array must be empty. If - the operator is Gt or Lt, the values - array must have a single element, - which will be interpreted as an integer. - This array is replaced during a strategic - merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object + x-kubernetes-map-type: atomic type: object podAffinity: description: Describes pod affinity scheduling rules (e.g. @@ -7270,18 +7886,16 @@ spec: other pod(s)). properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods - to nodes that satisfy the affinity expressions specified - by this field, but it may choose a node that violates - one or more of the expressions. The node that is most - preferred is the one with the greatest sum of weights, - i.e. for each node that meets all of the scheduling - requirements (resource request, requiredDuringScheduling - affinity expressions, etc.), compute a sum by iterating - through the elements of this field and adding "weight" - to the sum if the node has pods which matches the - corresponding podAffinityTerm; the node(s) with the - highest sum are the most preferred. + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred @@ -7292,144 +7906,161 @@ spec: with the corresponding weight. properties: labelSelector: - description: A label query over a set of resources, - in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. This - array is replaced during a strategic + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set of - namespaces that the term applies to. The - term is applied to the union of the namespaces - selected by this field and the ones listed - in the namespaces field. null selector and - null or empty namespaces list means "this - pod's namespace". An empty selector ({}) - matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. This - array is replaced during a strategic + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static - list of namespace names that the term applies - to. The term is applied to the union of - the namespaces listed in this field and - the ones selected by namespaceSelector. - null or empty namespaces list and null namespaceSelector - means "this pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where co-located - is defined as running on a node whose value - of the label with key topologyKey matches - that of any node on which any of the selected - pods is running. Empty topologyKey is not - allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object weight: - description: weight associated with matching the - corresponding podAffinityTerm, in the range - 1-100. + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. format: int32 type: integer required: @@ -7437,158 +8068,179 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified - by this field are not met at scheduling time, the - pod will not be scheduled onto the node. If the affinity - requirements specified by this field cease to be met - at some point during pod execution (e.g. due to a - pod label update), the system may or may not try to - eventually evict the pod from its node. When there - are multiple elements, the lists of nodes corresponding - to each podAffinityTerm are intersected, i.e. all - terms must be satisfied. + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. items: - description: Defines a set of pods (namely those matching - the labelSelector relative to the given namespace(s)) - that this pod should be co-located (affinity) or - not co-located (anti-affinity) with, where co-located - is defined as running on a node whose value of the - label with key matches that of any - node on which a pod of the set of pods is running + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running properties: labelSelector: - description: A label query over a set of resources, - in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, a - key, and an operator that relates the - key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and - DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. This - array is replaced during a strategic + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is - "In", and the values array contains only - "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set of namespaces - that the term applies to. The term is applied - to the union of the namespaces selected by this - field and the ones listed in the namespaces - field. null selector and null or empty namespaces - list means "this pod's namespace". An empty - selector ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, a - key, and an operator that relates the - key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and - DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. This - array is replaced during a strategic + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is - "In", and the values array contains only - "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static list - of namespace names that the term applies to. - The term is applied to the union of the namespaces - listed in this field and the ones selected by - namespaceSelector. null or empty namespaces - list and null namespaceSelector means "this - pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods - matching the labelSelector in the specified - namespaces, where co-located is defined as running - on a node whose value of the label with key - topologyKey matches that of any node on which - any of the selected pods is running. Empty topologyKey - is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object podAntiAffinity: description: Describes pod anti-affinity scheduling rules @@ -7596,18 +8248,16 @@ spec: as some other pod(s)). properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods - to nodes that satisfy the anti-affinity expressions - specified by this field, but it may choose a node - that violates one or more of the expressions. The - node that is most preferred is the one with the greatest - sum of weights, i.e. for each node that meets all - of the scheduling requirements (resource request, - requiredDuringScheduling anti-affinity expressions, - etc.), compute a sum by iterating through the elements - of this field and adding "weight" to the sum if the - node has pods which matches the corresponding podAffinityTerm; - the node(s) with the highest sum are the most preferred. + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred @@ -7618,144 +8268,161 @@ spec: with the corresponding weight. properties: labelSelector: - description: A label query over a set of resources, - in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. This - array is replaced during a strategic + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set of - namespaces that the term applies to. The - term is applied to the union of the namespaces - selected by this field and the ones listed - in the namespaces field. null selector and - null or empty namespaces list means "this - pod's namespace". An empty selector ({}) - matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. This - array is replaced during a strategic + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static - list of namespace names that the term applies - to. The term is applied to the union of - the namespaces listed in this field and - the ones selected by namespaceSelector. - null or empty namespaces list and null namespaceSelector - means "this pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where co-located - is defined as running on a node whose value - of the label with key topologyKey matches - that of any node on which any of the selected - pods is running. Empty topologyKey is not - allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object weight: - description: weight associated with matching the - corresponding podAffinityTerm, in the range - 1-100. + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. format: int32 type: integer required: @@ -7763,199 +8430,221 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements specified - by this field are not met at scheduling time, the - pod will not be scheduled onto the node. If the anti-affinity - requirements specified by this field cease to be met - at some point during pod execution (e.g. due to a - pod label update), the system may or may not try to - eventually evict the pod from its node. When there - are multiple elements, the lists of nodes corresponding - to each podAffinityTerm are intersected, i.e. all - terms must be satisfied. + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. items: - description: Defines a set of pods (namely those matching - the labelSelector relative to the given namespace(s)) - that this pod should be co-located (affinity) or - not co-located (anti-affinity) with, where co-located - is defined as running on a node whose value of the - label with key matches that of any - node on which a pod of the set of pods is running + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running properties: labelSelector: - description: A label query over a set of resources, - in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, a - key, and an operator that relates the - key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and - DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. This - array is replaced during a strategic + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is - "In", and the values array contains only - "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set of namespaces - that the term applies to. The term is applied - to the union of the namespaces selected by this - field and the ones listed in the namespaces - field. null selector and null or empty namespaces - list means "this pod's namespace". An empty - selector ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, a - key, and an operator that relates the - key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and - DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. This - array is replaced during a strategic + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is - "In", and the values array contains only - "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static list - of namespace names that the term applies to. - The term is applied to the union of the namespaces - listed in this field and the ones selected by - namespaceSelector. null or empty namespaces - list and null namespaceSelector means "this - pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods - matching the labelSelector in the specified - namespaces, where co-located is defined as running - on a node whose value of the label with key - topologyKey matches that of any node on which - any of the selected pods is running. Empty topologyKey - is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object type: object containers: - description: Custom sidecars for PostgreSQL instance pods. Changing - this value causes PostgreSQL to restart. + description: |- + Custom sidecars for PostgreSQL instance pods. Changing this value causes + PostgreSQL to restart. items: description: A single application container that you want to run within a pod. properties: args: - description: 'Arguments to the entrypoint. The container - image''s CMD is used if this is not provided. Variable - references $(VAR_NAME) are expanded using the container''s - environment. If a variable cannot be resolved, the reference - in the input string will be unchanged. Double $$ are - reduced to a single $, which allows for escaping the - $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce - the string literal "$(VAR_NAME)". Escaped references - will never be expanded, regardless of whether the variable - exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell items: type: string type: array + x-kubernetes-list-type: atomic command: - description: 'Entrypoint array. Not executed within a - shell. The container image''s ENTRYPOINT is used if - this is not provided. Variable references $(VAR_NAME) - are expanded using the container''s environment. If - a variable cannot be resolved, the reference in the - input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) - syntax: i.e. "$$(VAR_NAME)" will produce the string - literal "$(VAR_NAME)". Escaped references will never - be expanded, regardless of whether the variable exists - or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell items: type: string type: array + x-kubernetes-list-type: atomic env: - description: List of environment variables to set in the - container. Cannot be updated. + description: |- + List of environment variables to set in the container. + Cannot be updated. items: description: EnvVar represents an environment variable present in a Container. @@ -7965,17 +8654,16 @@ spec: be a C_IDENTIFIER. type: string value: - description: 'Variable references $(VAR_NAME) are - expanded using the previously defined environment - variables in the container and any service environment - variables. If a variable cannot be resolved, the - reference in the input string will be unchanged. - Double $$ are reduced to a single $, which allows - for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" - will produce the string literal "$(VAR_NAME)". - Escaped references will never be expanded, regardless - of whether the variable exists or not. Defaults - to "".' + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". type: string valueFrom: description: Source for the environment variable's @@ -7988,8 +8676,13 @@ spec: description: The key to select. type: string name: - description: 'Name of the referent. More - info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: description: Specify whether the ConfigMap @@ -7998,12 +8691,11 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic fieldRef: - description: 'Selects a field of the pod: supports - metadata.name, metadata.namespace, `metadata.labels['''']`, - `metadata.annotations['''']`, spec.nodeName, - spec.serviceAccountName, status.hostIP, status.podIP, - status.podIPs.' + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. properties: apiVersion: description: Version of the schema the FieldPath @@ -8016,12 +8708,11 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: - description: 'Selects a resource of the container: - only resources limits and requests (limits.cpu, - limits.memory, limits.ephemeral-storage, requests.cpu, - requests.memory and requests.ephemeral-storage) - are currently supported.' + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. properties: containerName: description: 'Container name: required for @@ -8042,6 +8733,7 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: description: Selects a key of a secret in the pod's namespace @@ -8051,8 +8743,13 @@ spec: from. Must be a valid secret key. type: string name: - description: 'Name of the referent. More - info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: description: Specify whether the Secret @@ -8061,19 +8758,22 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map envFrom: - description: List of sources to populate environment variables - in the container. The keys defined within a source must - be a C_IDENTIFIER. All invalid keys will be reported - as an event when the container is starting. When a key - exists in multiple sources, the value associated with - the last source will take precedence. Values defined - by an Env with a duplicate key will take precedence. + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. Cannot be updated. items: description: EnvFromSource represents the source of @@ -8083,14 +8783,20 @@ spec: description: The ConfigMap to select from properties: name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: description: Specify whether the ConfigMap must be defined type: boolean type: object + x-kubernetes-map-type: atomic prefix: description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. @@ -8099,65 +8805,72 @@ spec: description: The Secret to select from properties: name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: description: Specify whether the Secret must be defined type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array + x-kubernetes-list-type: atomic image: - description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images - This field is optional to allow higher level config - management to default or override container images in - workload controllers like Deployments and StatefulSets.' + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. type: string imagePullPolicy: - description: 'Image pull policy. One of Always, Never, - IfNotPresent. Defaults to Always if :latest tag is specified, - or IfNotPresent otherwise. Cannot be updated. More info: - https://kubernetes.io/docs/concepts/containers/images#updating-images' + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images type: string lifecycle: - description: Actions that the management system should - take in response to container lifecycle events. Cannot - be updated. + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. properties: postStart: - description: 'PostStart is called immediately after - a container is created. If the handler fails, the - container is terminated and restarted according - to its restart policy. Other management of the container - blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks properties: exec: description: Exec specifies the action to take. properties: command: - description: Command is the command line to - execute inside the container, the working - directory for the command is root ('/') - in the container's filesystem. The command - is simply exec'd, it is not run inside a - shell, so traditional shell instructions - ('|', etc) won't work. To use a shell, you - need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy - and non-zero is unhealthy. + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. items: type: string type: array + x-kubernetes-list-type: atomic type: object httpGet: description: HTTPGet specifies the http request to perform. properties: host: - description: Host name to connect to, defaults - to the pod IP. You probably want to set + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. type: string httpHeaders: @@ -8168,7 +8881,9 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string value: description: The header field value @@ -8178,6 +8893,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. type: string @@ -8185,24 +8901,36 @@ spec: anyOf: - type: integer - type: string - description: Name or number of the port to - access on the container. Number must be - in the range 1 to 65535. Name must be an - IANA_SVC_NAME. + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true scheme: - description: Scheme to use for connecting - to the host. Defaults to HTTP. + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. type: string required: - port type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: - description: Deprecated. TCPSocket is NOT supported - as a LifecycleHandler and kept for the backward - compatibility. There are no validation of this - field and lifecycle hooks will fail in runtime - when tcp handler is specified. + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. properties: host: description: 'Optional: Host name to connect @@ -8212,55 +8940,49 @@ spec: anyOf: - type: integer - type: string - description: Number or name of the port to - access on the container. Number must be - in the range 1 to 65535. Name must be an - IANA_SVC_NAME. + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true required: - port type: object type: object preStop: - description: 'PreStop is called immediately before - a container is terminated due to an API request - or management event such as liveness/startup probe - failure, preemption, resource contention, etc. The - handler is not called if the container crashes or - exits. The Pod''s termination grace period countdown - begins before the PreStop hook is executed. Regardless - of the outcome of the handler, the container will - eventually terminate within the Pod''s termination - grace period (unless delayed by finalizers). Other - management of the container blocks until the hook - completes or until the termination grace period - is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks properties: exec: description: Exec specifies the action to take. properties: command: - description: Command is the command line to - execute inside the container, the working - directory for the command is root ('/') - in the container's filesystem. The command - is simply exec'd, it is not run inside a - shell, so traditional shell instructions - ('|', etc) won't work. To use a shell, you - need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy - and non-zero is unhealthy. + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. items: type: string type: array + x-kubernetes-list-type: atomic type: object httpGet: description: HTTPGet specifies the http request to perform. properties: host: - description: Host name to connect to, defaults - to the pod IP. You probably want to set + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. type: string httpHeaders: @@ -8271,7 +8993,9 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string value: description: The header field value @@ -8281,6 +9005,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. type: string @@ -8288,24 +9013,36 @@ spec: anyOf: - type: integer - type: string - description: Name or number of the port to - access on the container. Number must be - in the range 1 to 65535. Name must be an - IANA_SVC_NAME. + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true scheme: - description: Scheme to use for connecting - to the host. Defaults to HTTP. + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. type: string required: - port type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: - description: Deprecated. TCPSocket is NOT supported - as a LifecycleHandler and kept for the backward - compatibility. There are no validation of this - field and lifecycle hooks will fail in runtime - when tcp handler is specified. + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. properties: host: description: 'Optional: Host name to connect @@ -8315,10 +9052,10 @@ spec: anyOf: - type: integer - type: string - description: Number or name of the port to - access on the container. Number must be - in the range 1 to 65535. Name must be an - IANA_SVC_NAME. + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true required: - port @@ -8326,37 +9063,36 @@ spec: type: object type: object livenessProbe: - description: 'Periodic probe of container liveness. Container - will be restarted if the probe fails. Cannot be updated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes properties: exec: description: Exec specifies the action to take. properties: command: - description: Command is the command line to execute - inside the container, the working directory - for the command is root ('/') in the container's - filesystem. The command is simply exec'd, it - is not run inside a shell, so traditional shell - instructions ('|', etc) won't work. To use a - shell, you need to explicitly call out to that - shell. Exit status of 0 is treated as live/healthy - and non-zero is unhealthy. + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: - description: Minimum consecutive failures for the - probe to be considered failed after having succeeded. + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. format: int32 type: integer grpc: description: GRPC specifies an action involving a - GRPC port. This is a beta field and requires enabling - GRPCContainerProbe feature gate. + GRPC port. properties: port: description: Port number of the gRPC service. @@ -8364,11 +9100,12 @@ spec: format: int32 type: integer service: - description: "Service is the name of the service - to place in the gRPC HealthCheckRequest (see - https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - \n If this is not specified, the default behavior - is defined by gRPC." + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. type: string required: - port @@ -8378,9 +9115,9 @@ spec: perform. properties: host: - description: Host name to connect to, defaults - to the pod IP. You probably want to set "Host" - in httpHeaders instead. + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. type: string httpHeaders: description: Custom headers to set in the request. @@ -8390,7 +9127,9 @@ spec: to be used in HTTP probes properties: name: - description: The header field name + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string value: description: The header field value @@ -8400,6 +9139,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. type: string @@ -8407,33 +9147,35 @@ spec: anyOf: - type: integer - type: string - description: Name or number of the port to access - on the container. Number must be in the range - 1 to 65535. Name must be an IANA_SVC_NAME. + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true scheme: - description: Scheme to use for connecting to the - host. Defaults to HTTP. + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. type: string required: - port type: object initialDelaySeconds: - description: 'Number of seconds after the container - has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes format: int32 type: integer periodSeconds: - description: How often (in seconds) to perform the - probe. Default to 10 seconds. Minimum value is 1. + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. format: int32 type: integer successThreshold: - description: Minimum consecutive successes for the - probe to be considered successful after having failed. - Defaults to 1. Must be 1 for liveness and startup. - Minimum value is 1. + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. format: int32 type: integer tcpSocket: @@ -8448,60 +9190,59 @@ spec: anyOf: - type: integer - type: string - description: Number or name of the port to access - on the container. Number must be in the range - 1 to 65535. Name must be an IANA_SVC_NAME. + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true required: - port type: object terminationGracePeriodSeconds: - description: Optional duration in seconds the pod - needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after - the processes running in the pod are sent a termination - signal and the time when the processes are forcibly - halted with a kill signal. Set this value longer - than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds - will be used. Otherwise, this value overrides the - value provided by the pod spec. Value must be non-negative - integer. The value zero indicates stop immediately - via the kill signal (no opportunity to shut down). - This is a beta field and requires enabling ProbeTerminationGracePeriod - feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds - is used if unset. + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. format: int64 type: integer timeoutSeconds: - description: 'Number of seconds after which the probe - times out. Defaults to 1 second. Minimum value is - 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes format: int32 type: integer type: object name: - description: Name of the container specified as a DNS_LABEL. + description: |- + Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. type: string ports: - description: List of ports to expose from the container. - Exposing a port here gives the system additional information - about the network connections a container uses, but - is primarily informational. Not specifying a port here - DOES NOT prevent that port from being exposed. Any port - which is listening on the default "0.0.0.0" address - inside a container will be accessible from the network. + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated. items: description: ContainerPort represents a network port in a single container. properties: containerPort: - description: Number of port to expose on the pod's - IP address. This must be a valid port number, - 0 < x < 65536. + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. format: int32 type: integer hostIP: @@ -8509,23 +9250,24 @@ spec: to. type: string hostPort: - description: Number of port to expose on the host. - If specified, this must be a valid port number, - 0 < x < 65536. If HostNetwork is specified, this - must match ContainerPort. Most containers do not - need this. + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. format: int32 type: integer name: - description: If specified, this must be an IANA_SVC_NAME - and unique within the pod. Each named port in - a pod must have a unique name. Name for the port - that can be referred to by services. + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. type: string protocol: default: TCP - description: Protocol for port. Must be UDP, TCP, - or SCTP. Defaults to "TCP". + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". type: string required: - containerPort @@ -8536,37 +9278,36 @@ spec: - protocol x-kubernetes-list-type: map readinessProbe: - description: 'Periodic probe of container service readiness. - Container will be removed from service endpoints if - the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes properties: exec: description: Exec specifies the action to take. properties: command: - description: Command is the command line to execute - inside the container, the working directory - for the command is root ('/') in the container's - filesystem. The command is simply exec'd, it - is not run inside a shell, so traditional shell - instructions ('|', etc) won't work. To use a - shell, you need to explicitly call out to that - shell. Exit status of 0 is treated as live/healthy - and non-zero is unhealthy. + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: - description: Minimum consecutive failures for the - probe to be considered failed after having succeeded. + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. format: int32 type: integer grpc: description: GRPC specifies an action involving a - GRPC port. This is a beta field and requires enabling - GRPCContainerProbe feature gate. + GRPC port. properties: port: description: Port number of the gRPC service. @@ -8574,11 +9315,12 @@ spec: format: int32 type: integer service: - description: "Service is the name of the service - to place in the gRPC HealthCheckRequest (see - https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - \n If this is not specified, the default behavior - is defined by gRPC." + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. type: string required: - port @@ -8588,9 +9330,9 @@ spec: perform. properties: host: - description: Host name to connect to, defaults - to the pod IP. You probably want to set "Host" - in httpHeaders instead. + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. type: string httpHeaders: description: Custom headers to set in the request. @@ -8600,7 +9342,9 @@ spec: to be used in HTTP probes properties: name: - description: The header field name + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string value: description: The header field value @@ -8610,6 +9354,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. type: string @@ -8617,33 +9362,35 @@ spec: anyOf: - type: integer - type: string - description: Name or number of the port to access - on the container. Number must be in the range - 1 to 65535. Name must be an IANA_SVC_NAME. + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true scheme: - description: Scheme to use for connecting to the - host. Defaults to HTTP. + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. type: string required: - port type: object initialDelaySeconds: - description: 'Number of seconds after the container - has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes format: int32 type: integer periodSeconds: - description: How often (in seconds) to perform the - probe. Default to 10 seconds. Minimum value is 1. + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. format: int32 type: integer successThreshold: - description: Minimum consecutive successes for the - probe to be considered successful after having failed. - Defaults to 1. Must be 1 for liveness and startup. - Minimum value is 1. + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. format: int32 type: integer tcpSocket: @@ -8658,42 +9405,90 @@ spec: anyOf: - type: integer - type: string - description: Number or name of the port to access - on the container. Number must be in the range - 1 to 65535. Name must be an IANA_SVC_NAME. + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true required: - port type: object terminationGracePeriodSeconds: - description: Optional duration in seconds the pod - needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after - the processes running in the pod are sent a termination - signal and the time when the processes are forcibly - halted with a kill signal. Set this value longer - than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds - will be used. Otherwise, this value overrides the - value provided by the pod spec. Value must be non-negative - integer. The value zero indicates stop immediately - via the kill signal (no opportunity to shut down). - This is a beta field and requires enabling ProbeTerminationGracePeriod - feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds - is used if unset. + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. format: int64 type: integer timeoutSeconds: - description: 'Number of seconds after which the probe - times out. Defaults to 1 second. Minimum value is - 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes format: int32 type: integer type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: - description: 'Compute Resources required by this container. - Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -8701,8 +9496,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount - of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -8711,33 +9507,76 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount - of compute resources required. If Requests is omitted - for a container, it defaults to Limits if that is - explicitly specified, otherwise to an implementation-defined - value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string securityContext: - description: 'SecurityContext defines the security options - the container should be run with. If set, the fields - of SecurityContext override the equivalent fields of - PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ properties: allowPrivilegeEscalation: - description: 'AllowPrivilegeEscalation controls whether - a process can gain more privileges than its parent - process. This bool directly controls if the no_new_privs - flag will be set on the container process. AllowPrivilegeEscalation - is true always when the container is: 1) run as - Privileged 2) has CAP_SYS_ADMIN Note that this field - cannot be set when spec.os.name is windows.' + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object capabilities: - description: The capabilities to add/drop when running - containers. Defaults to the default set of capabilities - granted by the container runtime. Note that this - field cannot be set when spec.os.name is windows. + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. properties: add: description: Added capabilities @@ -8746,6 +9585,7 @@ spec: type type: string type: array + x-kubernetes-list-type: atomic drop: description: Removed capabilities items: @@ -8753,65 +9593,63 @@ spec: type type: string type: array + x-kubernetes-list-type: atomic type: object privileged: - description: Run container in privileged mode. Processes - in privileged containers are essentially equivalent - to root on the host. Defaults to false. Note that - this field cannot be set when spec.os.name is windows. + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. type: boolean procMount: - description: procMount denotes the type of proc mount - to use for the containers. The default is DefaultProcMount - which uses the container runtime defaults for readonly - paths and masked paths. This requires the ProcMountType - feature flag to be enabled. Note that this field - cannot be set when spec.os.name is windows. + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. type: string readOnlyRootFilesystem: - description: Whether this container has a read-only - root filesystem. Default is false. Note that this - field cannot be set when spec.os.name is windows. + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. type: boolean runAsGroup: - description: The GID to run the entrypoint of the - container process. Uses runtime default if unset. - May also be set in PodSecurityContext. If set in - both SecurityContext and PodSecurityContext, the - value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name - is windows. + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. format: int64 type: integer runAsNonRoot: - description: Indicates that the container must run - as a non-root user. If true, the Kubelet will validate - the image at runtime to ensure that it does not - run as UID 0 (root) and fail to start the container - if it does. If unset or false, no such validation - will be performed. May also be set in PodSecurityContext. If - set in both SecurityContext and PodSecurityContext, - the value specified in SecurityContext takes precedence. + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. type: boolean runAsUser: - description: The UID to run the entrypoint of the - container process. Defaults to user specified in - image metadata if unspecified. May also be set in - PodSecurityContext. If set in both SecurityContext - and PodSecurityContext, the value specified in SecurityContext - takes precedence. Note that this field cannot be - set when spec.os.name is windows. + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. format: int64 type: integer seLinuxOptions: - description: The SELinux context to be applied to - the container. If unspecified, the container runtime - will allocate a random SELinux context for each - container. May also be set in PodSecurityContext. If - set in both SecurityContext and PodSecurityContext, - the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name - is windows. + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. properties: level: description: Level is SELinux level label that @@ -8831,113 +9669,98 @@ spec: type: string type: object seccompProfile: - description: The seccomp options to use by this container. - If seccomp options are provided at both the pod - & container level, the container options override - the pod options. Note that this field cannot be - set when spec.os.name is windows. + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. properties: localhostProfile: - description: localhostProfile indicates a profile - defined in a file on the node should be used. - The profile must be preconfigured on the node - to work. Must be a descending path, relative - to the kubelet's configured seccomp profile - location. Must only be set if type is "Localhost". + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. type: string type: - description: 'type indicates which kind of seccomp - profile will be applied. Valid options are: - Localhost - a profile defined in a file on the - node should be used. RuntimeDefault - the container - runtime default profile should be used. Unconfined - - no profile should be applied.' + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. type: string required: - type type: object windowsOptions: - description: The Windows specific settings applied - to all containers. If unspecified, the options from - the PodSecurityContext will be used. If set in both - SecurityContext and PodSecurityContext, the value - specified in SecurityContext takes precedence. Note - that this field cannot be set when spec.os.name - is linux. + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. properties: gmsaCredentialSpec: - description: GMSACredentialSpec is where the GMSA - admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) - inlines the contents of the GMSA credential - spec named by the GMSACredentialSpecName field. + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. type: string gmsaCredentialSpecName: description: GMSACredentialSpecName is the name of the GMSA credential spec to use. type: string hostProcess: - description: HostProcess determines if a container - should be run as a 'Host Process' container. - This field is alpha-level and will only be honored - by components that enable the WindowsHostProcessContainers - feature flag. Setting this field without the - feature flag will result in errors when validating - the Pod. All of a Pod's containers must have - the same effective HostProcess value (it is - not allowed to have a mix of HostProcess containers - and non-HostProcess containers). In addition, - if HostProcess is true then HostNetwork must - also be set to true. + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. type: boolean runAsUserName: - description: The UserName in Windows to run the - entrypoint of the container process. Defaults - to the user specified in image metadata if unspecified. - May also be set in PodSecurityContext. If set - in both SecurityContext and PodSecurityContext, - the value specified in SecurityContext takes - precedence. + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. type: string type: object type: object startupProbe: - description: 'StartupProbe indicates that the Pod has - successfully initialized. If specified, no other probes - are executed until this completes successfully. If this - probe fails, the Pod will be restarted, just as if the - livenessProbe failed. This can be used to provide different - probe parameters at the beginning of a Pod''s lifecycle, - when it might take a long time to load data or warm - a cache, than during steady-state operation. This cannot - be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes properties: exec: description: Exec specifies the action to take. properties: command: - description: Command is the command line to execute - inside the container, the working directory - for the command is root ('/') in the container's - filesystem. The command is simply exec'd, it - is not run inside a shell, so traditional shell - instructions ('|', etc) won't work. To use a - shell, you need to explicitly call out to that - shell. Exit status of 0 is treated as live/healthy - and non-zero is unhealthy. + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: - description: Minimum consecutive failures for the - probe to be considered failed after having succeeded. + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. format: int32 type: integer grpc: description: GRPC specifies an action involving a - GRPC port. This is a beta field and requires enabling - GRPCContainerProbe feature gate. + GRPC port. properties: port: description: Port number of the gRPC service. @@ -8945,11 +9768,12 @@ spec: format: int32 type: integer service: - description: "Service is the name of the service - to place in the gRPC HealthCheckRequest (see - https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - \n If this is not specified, the default behavior - is defined by gRPC." + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. type: string required: - port @@ -8959,9 +9783,9 @@ spec: perform. properties: host: - description: Host name to connect to, defaults - to the pod IP. You probably want to set "Host" - in httpHeaders instead. + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. type: string httpHeaders: description: Custom headers to set in the request. @@ -8971,7 +9795,9 @@ spec: to be used in HTTP probes properties: name: - description: The header field name + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string value: description: The header field value @@ -8981,6 +9807,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. type: string @@ -8988,33 +9815,35 @@ spec: anyOf: - type: integer - type: string - description: Name or number of the port to access - on the container. Number must be in the range - 1 to 65535. Name must be an IANA_SVC_NAME. + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true scheme: - description: Scheme to use for connecting to the - host. Defaults to HTTP. + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. type: string required: - port type: object initialDelaySeconds: - description: 'Number of seconds after the container - has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes format: int32 type: integer periodSeconds: - description: How often (in seconds) to perform the - probe. Default to 10 seconds. Minimum value is 1. + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. format: int32 type: integer successThreshold: - description: Minimum consecutive successes for the - probe to be considered successful after having failed. - Defaults to 1. Must be 1 for liveness and startup. - Minimum value is 1. + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. format: int32 type: integer tcpSocket: @@ -9029,81 +9858,76 @@ spec: anyOf: - type: integer - type: string - description: Number or name of the port to access - on the container. Number must be in the range - 1 to 65535. Name must be an IANA_SVC_NAME. + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true required: - port type: object terminationGracePeriodSeconds: - description: Optional duration in seconds the pod - needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after - the processes running in the pod are sent a termination - signal and the time when the processes are forcibly - halted with a kill signal. Set this value longer - than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds - will be used. Otherwise, this value overrides the - value provided by the pod spec. Value must be non-negative - integer. The value zero indicates stop immediately - via the kill signal (no opportunity to shut down). - This is a beta field and requires enabling ProbeTerminationGracePeriod - feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds - is used if unset. + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. format: int64 type: integer timeoutSeconds: - description: 'Number of seconds after which the probe - times out. Defaults to 1 second. Minimum value is - 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes format: int32 type: integer type: object stdin: - description: Whether this container should allocate a - buffer for stdin in the container runtime. If this is - not set, reads from stdin in the container will always - result in EOF. Default is false. + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. type: boolean stdinOnce: - description: Whether the container runtime should close - the stdin channel after it has been opened by a single - attach. When stdin is true the stdin stream will remain - open across multiple attach sessions. If stdinOnce is - set to true, stdin is opened on container start, is - empty until the first client attaches to stdin, and - then remains open and accepts data until the client - disconnects, at which time stdin is closed and remains - closed until the container is restarted. If this flag - is false, a container processes that reads from stdin - will never receive an EOF. Default is false + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false type: boolean terminationMessagePath: - description: 'Optional: Path at which the file to which - the container''s termination message will be written - is mounted into the container''s filesystem. Message - written is intended to be brief final status, such as - an assertion failure message. Will be truncated by the - node if greater than 4096 bytes. The total message length - across all containers will be limited to 12kb. Defaults - to /dev/termination-log. Cannot be updated.' + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. type: string terminationMessagePolicy: - description: Indicate how the termination message should - be populated. File will use the contents of terminationMessagePath - to populate the container status message on both success - and failure. FallbackToLogsOnError will use the last - chunk of container log output if the termination message - file is empty and the container exited with an error. - The log output is limited to 2048 bytes or 80 lines, - whichever is smaller. Defaults to File. Cannot be updated. + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. type: string tty: - description: Whether this container should allocate a - TTY for itself, also requires 'stdin' to be true. Default - is false. + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. type: boolean volumeDevices: description: volumeDevices is the list of block devices @@ -9125,86 +9949,118 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map volumeMounts: - description: Pod volumes to mount into the container's - filesystem. Cannot be updated. + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. items: description: VolumeMount describes a mounting of a Volume within a container. properties: mountPath: - description: Path within the container at which - the volume should be mounted. Must not contain - ':'. + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. type: string mountPropagation: - description: mountPropagation determines how mounts - are propagated from the host to container and - the other way around. When not set, MountPropagationNone - is used. This field is beta in 1.10. + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). type: string name: description: This must match the Name of a Volume. type: string readOnly: - description: Mounted read-only if true, read-write - otherwise (false or unspecified). Defaults to - false. + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string subPath: - description: Path within the volume from which the - container's volume should be mounted. Defaults - to "" (volume's root). + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). type: string subPathExpr: - description: Expanded path within the volume from - which the container's volume should be mounted. - Behaves similarly to SubPath but environment variable - references $(VAR_NAME) are expanded using the - container's environment. Defaults to "" (volume's - root). SubPathExpr and SubPath are mutually exclusive. + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. type: string required: - mountPath - name type: object type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map workingDir: - description: Container's working directory. If not specified, - the container runtime's default will be used, which - might be configured in the container image. Cannot be - updated. + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. type: string required: - name type: object type: array dataVolumeClaimSpec: - description: 'Defines a PersistentVolumeClaim for PostgreSQL - data. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes' + description: |- + Defines a PersistentVolumeClaim for PostgreSQL data. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes properties: accessModes: - description: 'accessModes contains the desired access modes - the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 items: type: string - minItems: 1 type: array + x-kubernetes-list-type: atomic dataSource: - description: 'dataSource field can be used to specify either: + description: |- + dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) - * An existing PVC (PersistentVolumeClaim) If the provisioner - or an external controller can support the specified data - source, it will create a new volume based on the contents - of the specified data source. If the AnyVolumeDataSource - feature gate is enabled, this field will always have the - same contents as the DataSourceRef field.' + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. properties: apiGroup: - description: APIGroup is the group for the resource - being referenced. If APIGroup is not specified, the - specified Kind must be in the core API group. For - any other third-party types, APIGroup is required. + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. type: string kind: description: Kind is the type of resource being referenced @@ -9216,34 +10072,38 @@ spec: - kind - name type: object + x-kubernetes-map-type: atomic dataSourceRef: - description: 'dataSourceRef specifies the object from which - to populate the volume with data, if a non-empty volume - is desired. This may be any local object from a non-empty - API group (non core object) or a PersistentVolumeClaim - object. When this field is specified, volume binding will - only succeed if the type of the specified object matches - some installed volume populator or dynamic provisioner. - This field will replace the functionality of the DataSource - field and as such if both fields are non-empty, they must - have the same value. For backwards compatibility, both - fields (DataSource and DataSourceRef) will be set to the - same value automatically if one of them is empty and the - other is non-empty. There are two important differences - between DataSource and DataSourceRef: * While DataSource - only allows two specific types of objects, DataSourceRef - allows any non-core object, as well as PersistentVolumeClaim - objects. * While DataSource ignores disallowed values - (dropping them), DataSourceRef preserves all values, and - generates an error if a disallowed value is specified. - (Beta) Using this field requires the AnyVolumeDataSource - feature gate to be enabled.' + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. properties: apiGroup: - description: APIGroup is the group for the resource - being referenced. If APIGroup is not specified, the - specified Kind must be in the core API group. For - any other third-party types, APIGroup is required. + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. type: string kind: description: Kind is the type of resource being referenced @@ -9251,17 +10111,23 @@ spec: name: description: Name is the name of resource being referenced type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string required: - kind - name type: object resources: - description: 'resources represents the minimum resources - the volume should have. If RecoverVolumeExpansionFailure - feature is enabled users are allowed to specify resource - requirements that are lower than previous value but must - still be higher than capacity recorded in the status field - of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources properties: limits: additionalProperties: @@ -9270,8 +10136,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of - compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -9280,16 +10147,12 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount - of compute resources required. If Requests is omitted - for a container, it defaults to Limits if that is - explicitly specified, otherwise to an implementation-defined - value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - required: - - storage + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object - required: - - requests type: object selector: description: selector is a label query over volumes to consider @@ -9299,8 +10162,8 @@ spec: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. properties: key: @@ -9308,52 +10171,72 @@ spec: applies to. type: string operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, - NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string values. - If the operator is In or NotIn, the values array - must be non-empty. If the operator is Exists - or DoesNotExist, the values array must be empty. - This array is replaced during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field - is "key", the operator is "In", and the values array - contains only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic storageClassName: - description: 'storageClassName is the name of the StorageClass - required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. type: string volumeMode: - description: volumeMode defines what type of volume is required - by the claim. Value of Filesystem is implied when not - included in claim spec. + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. type: string volumeName: description: volumeName is the binding reference to the PersistentVolume backing this claim. type: string - required: - - accessModes - - resources type: object + x-kubernetes-validations: + - message: missing accessModes + rule: has(self.accessModes) && size(self.accessModes) > 0 + - message: missing storage request + rule: has(self.resources) && has(self.resources.requests) + && has(self.resources.requests.storage) metadata: description: Metadata contains metadata for custom resources properties: @@ -9370,22 +10253,24 @@ spec: anyOf: - type: integer - type: string - description: Minimum number of pods that should be available - at a time. Defaults to one when the replicas field is greater - than one. + description: |- + Minimum number of pods that should be available at a time. + Defaults to one when the replicas field is greater than one. x-kubernetes-int-or-string: true name: default: "" - description: Name that associates this set of PostgreSQL pods. - This field is optional when only one instance set is defined. - Each instance set in a cluster must have a unique name. The - combined length of this and the cluster name must be 46 characters - or less. + description: |- + Name that associates this set of PostgreSQL pods. This field is optional + when only one instance set is defined. Each instance set in a cluster + must have a unique name. The combined length of this and the cluster name + must be 46 characters or less. pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?)?$ type: string priorityClassName: - description: 'Priority class name for the PostgreSQL pod. Changing - this value causes PostgreSQL to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/' + description: |- + Priority class name for the PostgreSQL pod. Changing this value causes + PostgreSQL to restart. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/ type: string replicas: default: 1 @@ -9396,6 +10281,31 @@ spec: resources: description: Compute resources of a PostgreSQL container. properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -9403,8 +10313,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -9413,11 +10324,11 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. More info: - https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object sidecars: @@ -9430,6 +10341,32 @@ spec: resources: description: Resource requirements for a sidecar container properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -9437,8 +10374,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount - of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -9447,48 +10385,50 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount - of compute resources required. If Requests is - omitted for a container, it defaults to Limits - if that is explicitly specified, otherwise to - an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object type: object type: object tablespaceVolumes: - description: The list of tablespaces volumes to mount for this - postgrescluster This field requires enabling TablespaceVolumes - feature gate + description: |- + The list of tablespaces volumes to mount for this postgrescluster + This field requires enabling TablespaceVolumes feature gate items: properties: dataVolumeClaimSpec: - description: 'Defines a PersistentVolumeClaim for a tablespace. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes' + description: |- + Defines a PersistentVolumeClaim for a tablespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes properties: accessModes: - description: 'accessModes contains the desired access - modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 items: type: string type: array + x-kubernetes-list-type: atomic dataSource: - description: 'dataSource field can be used to specify - either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) - * An existing PVC (PersistentVolumeClaim) If the - provisioner or an external controller can support - the specified data source, it will create a new - volume based on the contents of the specified data - source. If the AnyVolumeDataSource feature gate - is enabled, this field will always have the same - contents as the DataSourceRef field.' + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. properties: apiGroup: - description: APIGroup is the group for the resource - being referenced. If APIGroup is not specified, - the specified Kind must be in the core API group. - For any other third-party types, APIGroup is - required. + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. type: string kind: description: Kind is the type of resource being @@ -9502,37 +10442,38 @@ spec: - kind - name type: object + x-kubernetes-map-type: atomic dataSourceRef: - description: 'dataSourceRef specifies the object from - which to populate the volume with data, if a non-empty - volume is desired. This may be any local object - from a non-empty API group (non core object) or - a PersistentVolumeClaim object. When this field - is specified, volume binding will only succeed if - the type of the specified object matches some installed - volume populator or dynamic provisioner. This field - will replace the functionality of the DataSource - field and as such if both fields are non-empty, - they must have the same value. For backwards compatibility, - both fields (DataSource and DataSourceRef) will - be set to the same value automatically if one of - them is empty and the other is non-empty. There - are two important differences between DataSource - and DataSourceRef: * While DataSource only allows - two specific types of objects, DataSourceRef allows - any non-core object, as well as PersistentVolumeClaim - objects. * While DataSource ignores disallowed values - (dropping them), DataSourceRef preserves all values, - and generates an error if a disallowed value is - specified. (Beta) Using this field requires the - AnyVolumeDataSource feature gate to be enabled.' + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. properties: apiGroup: - description: APIGroup is the group for the resource - being referenced. If APIGroup is not specified, - the specified Kind must be in the core API group. - For any other third-party types, APIGroup is - required. + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. type: string kind: description: Kind is the type of resource being @@ -9542,17 +10483,23 @@ spec: description: Name is the name of resource being referenced type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string required: - kind - name type: object resources: - description: 'resources represents the minimum resources - the volume should have. If RecoverVolumeExpansionFailure - feature is enabled users are allowed to specify - resource requirements that are lower than previous - value but must still be higher than capacity recorded - in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources properties: limits: additionalProperties: @@ -9561,8 +10508,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount - of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -9571,12 +10519,11 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount - of compute resources required. If Requests is - omitted for a container, it defaults to Limits - if that is explicitly specified, otherwise to - an implementation-defined value. More info: - https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object selector: @@ -9588,63 +10535,86 @@ spec: selector requirements. The requirements are ANDed. items: - description: A label selector requirement is - a selector that contains values, a key, and - an operator that relates the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. If - the operator is Exists or DoesNotExist, - the values array must be empty. This array - is replaced during a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is "In", - and the values array contains only "value". - The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic storageClassName: - description: 'storageClassName is the name of the - StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. type: string volumeMode: - description: volumeMode defines what type of volume - is required by the claim. Value of Filesystem is - implied when not included in claim spec. + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. type: string volumeName: description: volumeName is the binding reference to the PersistentVolume backing this claim. type: string type: object + x-kubernetes-validations: + - message: missing accessModes + rule: has(self.accessModes) && size(self.accessModes) + > 0 + - message: missing storage request + rule: has(self.resources) && has(self.resources.requests) + && has(self.resources.requests.storage) name: - description: The name for the tablespace, used as the - path name for the volume. Must be unique in the instance - set since they become the directory names. + description: |- + The name for the tablespace, used as the path name for the volume. + Must be unique in the instance set since they become the directory names. minLength: 1 pattern: ^[a-z][a-z0-9]*$ type: string @@ -9657,67 +10627,67 @@ spec: - name x-kubernetes-list-type: map tolerations: - description: 'Tolerations of a PostgreSQL pod. Changing this - value causes PostgreSQL to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration' + description: |- + Tolerations of a PostgreSQL pod. Changing this value causes PostgreSQL to restart. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration items: - description: The pod this Toleration is attached to tolerates - any taint that matches the triple using - the matching operator . + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . properties: effect: - description: Effect indicates the taint effect to match. - Empty means match all taint effects. When specified, - allowed values are NoSchedule, PreferNoSchedule and - NoExecute. + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. type: string key: - description: Key is the taint key that the toleration - applies to. Empty means match all taint keys. If the - key is empty, operator must be Exists; this combination - means to match all values and all keys. + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. type: string operator: - description: Operator represents a key's relationship - to the value. Valid operators are Exists and Equal. - Defaults to Equal. Exists is equivalent to wildcard - for value, so that a pod can tolerate all taints of - a particular category. + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. type: string tolerationSeconds: - description: TolerationSeconds represents the period of - time the toleration (which must be of effect NoExecute, - otherwise this field is ignored) tolerates the taint. - By default, it is not set, which means tolerate the - taint forever (do not evict). Zero and negative values - will be treated as 0 (evict immediately) by the system. + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. format: int64 type: integer value: - description: Value is the taint value the toleration matches - to. If the operator is Exists, the value should be empty, - otherwise just a regular string. + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. type: string type: object type: array topologySpreadConstraints: - description: 'Topology spread constraints of a PostgreSQL pod. - Changing this value causes PostgreSQL to restart. More info: - https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/' + description: |- + Topology spread constraints of a PostgreSQL pod. Changing this value causes + PostgreSQL to restart. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ items: description: TopologySpreadConstraint specifies how to spread matching pods among the given topology. properties: labelSelector: - description: LabelSelector is used to find matching pods. - Pods that match this label selector are counted to determine - the number of pods in their corresponding topology domain. + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. properties: key: @@ -9725,115 +10695,150 @@ spec: applies to. type: string operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, - NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string values. - If the operator is In or NotIn, the values - array must be non-empty. If the operator is - Exists or DoesNotExist, the values array must - be empty. This array is replaced during a - strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field - is "key", the operator is "In", and the values array - contains only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic maxSkew: - description: 'MaxSkew describes the degree to which pods - may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, - it is the maximum permitted difference between the number - of matching pods in the target topology and the global - minimum. The global minimum is the minimum number of - matching pods in an eligible domain or zero if the number - of eligible domains is less than MinDomains. For example, - in a 3-zone cluster, MaxSkew is set to 1, and pods with - the same labelSelector spread as 2/2/1: In this case, - the global minimum is 1. | zone1 | zone2 | zone3 | | P - P | P P | P | - if MaxSkew is 1, incoming pod - can only be scheduled to zone3 to become 2/2/2; scheduling - it onto zone1(zone2) would make the ActualSkew(3-1) - on zone1(zone2) violate MaxSkew(1). - if MaxSkew is - 2, incoming pod can be scheduled onto any zone. When - `whenUnsatisfiable=ScheduleAnyway`, it is used to give - higher precedence to topologies that satisfy it. It''s - a required field. Default value is 1 and 0 is not allowed.' + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. format: int32 type: integer minDomains: - description: "MinDomains indicates a minimum number of - eligible domains. When the number of eligible domains - with matching topology keys is less than minDomains, - Pod Topology Spread treats \"global minimum\" as 0, - and then the calculation of Skew is performed. And when - the number of eligible domains with matching topology - keys equals or greater than minDomains, this value has - no effect on scheduling. As a result, when the number - of eligible domains is less than minDomains, scheduler - won't schedule more than maxSkew Pods to those domains. - If value is nil, the constraint behaves as if MinDomains - is equal to 1. Valid values are integers greater than - 0. When value is not nil, WhenUnsatisfiable must be - DoNotSchedule. \n For example, in a 3-zone cluster, - MaxSkew is set to 2, MinDomains is set to 5 and pods - with the same labelSelector spread as 2/2/2: | zone1 - | zone2 | zone3 | | P P | P P | P P | The number - of domains is less than 5(MinDomains), so \"global minimum\" - is treated as 0. In this situation, new pod with the - same labelSelector cannot be scheduled, because computed - skew will be 3(3 - 0) if new Pod is scheduled to any - of the three zones, it will violate MaxSkew. \n This - is an alpha field and requires enabling MinDomainsInPodTopologySpread - feature gate." + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. format: int32 type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string topologyKey: - description: TopologyKey is the key of node labels. Nodes - that have a label with this key and identical values - are considered to be in the same topology. We consider - each as a "bucket", and try to put balanced - number of pods into each bucket. We define a domain - as a particular instance of a topology. Also, we define - an eligible domain as a domain whose nodes match the - node selector. e.g. If TopologyKey is "kubernetes.io/hostname", - each Node is a domain of that topology. And, if TopologyKey - is "topology.kubernetes.io/zone", each zone is a domain - of that topology. It's a required field. + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. type: string whenUnsatisfiable: - description: 'WhenUnsatisfiable indicates how to deal - with a pod if it doesn''t satisfy the spread constraint. - - DoNotSchedule (default) tells the scheduler not to - schedule it. - ScheduleAnyway tells the scheduler to - schedule the pod in any location, but giving higher - precedence to topologies that would help reduce the - skew. A constraint is considered "Unsatisfiable" for - an incoming pod if and only if every possible node assignment - for that pod would violate "MaxSkew" on some topology. - For example, in a 3-zone cluster, MaxSkew is set to - 1, and pods with the same labelSelector spread as 3/1/1: - | zone1 | zone2 | zone3 | | P P P | P | P | - If WhenUnsatisfiable is set to DoNotSchedule, incoming - pod can only be scheduled to zone2(zone3) to become - 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies - MaxSkew(1). In other words, the cluster can still be - imbalanced, but scheduler won''t make it *more* imbalanced. - It''s a required field.' + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. type: string required: - maxSkew @@ -9842,31 +10847,34 @@ spec: type: object type: array walVolumeClaimSpec: - description: 'Defines a separate PersistentVolumeClaim for PostgreSQL''s - write-ahead log. More info: https://www.postgresql.org/docs/current/wal.html' + description: |- + Defines a separate PersistentVolumeClaim for PostgreSQL's write-ahead log. + More info: https://www.postgresql.org/docs/current/wal.html properties: accessModes: - description: 'accessModes contains the desired access modes - the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 items: type: string - minItems: 1 type: array + x-kubernetes-list-type: atomic dataSource: - description: 'dataSource field can be used to specify either: + description: |- + dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) - * An existing PVC (PersistentVolumeClaim) If the provisioner - or an external controller can support the specified data - source, it will create a new volume based on the contents - of the specified data source. If the AnyVolumeDataSource - feature gate is enabled, this field will always have the - same contents as the DataSourceRef field.' + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. properties: apiGroup: - description: APIGroup is the group for the resource - being referenced. If APIGroup is not specified, the - specified Kind must be in the core API group. For - any other third-party types, APIGroup is required. + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. type: string kind: description: Kind is the type of resource being referenced @@ -9878,34 +10886,38 @@ spec: - kind - name type: object + x-kubernetes-map-type: atomic dataSourceRef: - description: 'dataSourceRef specifies the object from which - to populate the volume with data, if a non-empty volume - is desired. This may be any local object from a non-empty - API group (non core object) or a PersistentVolumeClaim - object. When this field is specified, volume binding will - only succeed if the type of the specified object matches - some installed volume populator or dynamic provisioner. - This field will replace the functionality of the DataSource - field and as such if both fields are non-empty, they must - have the same value. For backwards compatibility, both - fields (DataSource and DataSourceRef) will be set to the - same value automatically if one of them is empty and the - other is non-empty. There are two important differences - between DataSource and DataSourceRef: * While DataSource - only allows two specific types of objects, DataSourceRef - allows any non-core object, as well as PersistentVolumeClaim - objects. * While DataSource ignores disallowed values - (dropping them), DataSourceRef preserves all values, and - generates an error if a disallowed value is specified. - (Beta) Using this field requires the AnyVolumeDataSource - feature gate to be enabled.' + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. properties: apiGroup: - description: APIGroup is the group for the resource - being referenced. If APIGroup is not specified, the - specified Kind must be in the core API group. For - any other third-party types, APIGroup is required. + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. type: string kind: description: Kind is the type of resource being referenced @@ -9913,17 +10925,23 @@ spec: name: description: Name is the name of resource being referenced type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string required: - kind - name type: object resources: - description: 'resources represents the minimum resources - the volume should have. If RecoverVolumeExpansionFailure - feature is enabled users are allowed to specify resource - requirements that are lower than previous value but must - still be higher than capacity recorded in the status field - of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources properties: limits: additionalProperties: @@ -9932,8 +10950,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of - compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -9942,16 +10961,12 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount - of compute resources required. If Requests is omitted - for a container, it defaults to Limits if that is - explicitly specified, otherwise to an implementation-defined - value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - required: - - storage + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object - required: - - requests type: object selector: description: selector is a label query over volumes to consider @@ -9961,8 +10976,8 @@ spec: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. properties: key: @@ -9970,52 +10985,72 @@ spec: applies to. type: string operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, - NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string values. - If the operator is In or NotIn, the values array - must be non-empty. If the operator is Exists - or DoesNotExist, the values array must be empty. - This array is replaced during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field - is "key", the operator is "In", and the values array - contains only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic storageClassName: - description: 'storageClassName is the name of the StorageClass - required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. type: string volumeMode: - description: volumeMode defines what type of volume is required - by the claim. Value of Filesystem is implied when not - included in claim spec. + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. type: string volumeName: description: volumeName is the binding reference to the PersistentVolume backing this claim. type: string - required: - - accessModes - - resources type: object + x-kubernetes-validations: + - message: missing accessModes + rule: has(self.accessModes) && size(self.accessModes) > 0 + - message: missing storage request + rule: has(self.resources) && has(self.resources.requests) + && has(self.resources.requests.storage) required: - dataVolumeClaimSpec type: object @@ -10047,77 +11082,167 @@ spec: exporter: properties: configuration: - description: 'Projected volumes containing custom PostgreSQL - Exporter configuration. Currently supports the customization - of PostgreSQL Exporter queries. If a "queries.yml" file - is detected in any volume projected using this field, - it will be loaded using the "extend.query-path" flag: + description: |- + Projected volumes containing custom PostgreSQL Exporter configuration. Currently supports + the customization of PostgreSQL Exporter queries. If a "queries.yml" file is detected in + any volume projected using this field, it will be loaded using the "extend.query-path" flag: https://github.com/prometheus-community/postgres_exporter#flags - Changing the values of field causes PostgreSQL and the - exporter to restart.' + Changing the values of field causes PostgreSQL and the exporter to restart. items: description: Projection that may be projected along with other supported volume types properties: - configMap: - description: configMap information about the configMap - data to project + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. properties: - items: - description: items if unspecified, each key-value - pair in the Data field of the referenced ConfigMap - will be projected into the volume as a file - whose name is the key and content is the value. - If specified, the listed keys will be projected - into the specified paths, and unlisted keys - will not be present. If a key is specified - which is not present in the ConfigMap, the - volume setup will error unless it is marked - optional. Paths must be relative and may not - contain the '..' path or start with '..'. - items: - description: Maps a string key to a path within - a volume. - properties: - key: - description: key is the key to project. - type: string - mode: - description: 'mode is Optional: mode bits - used to set permissions on this file. - Must be an octal value between 0000 - and 0777 or a decimal value between - 0 and 511. YAML accepts both octal and - decimal values, JSON requires decimal - values for mode bits. If not specified, - the volume defaultMode will be used. - This might be in conflict with other - options that affect the file mode, like - fsGroup, and the result can be other - mode bits set.' - format: int32 - type: integer - path: - description: path is the relative path - of the file to map the key to. May not - be an absolute path. May not contain - the path element '..'. May not start - with the string '..'. - type: string - required: - - key - - path - type: object - type: array - name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - optional: - description: optional specify whether the ConfigMap + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume root + to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the configMap + data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap or its keys must be defined type: boolean type: object + x-kubernetes-map-type: atomic downwardAPI: description: downwardAPI information about the downwardAPI data to project @@ -10133,7 +11258,7 @@ spec: fieldRef: description: 'Required: Selects a field of the pod: only annotations, labels, - name and namespace are supported.' + name, namespace and uid are supported.' properties: apiVersion: description: Version of the schema @@ -10147,19 +11272,15 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic mode: - description: 'Optional: mode bits used - to set permissions on this file, must - be an octal value between 0000 and 0777 - or a decimal value between 0 and 511. - YAML accepts both octal and decimal - values, JSON requires decimal values - for mode bits. If not specified, the - volume defaultMode will be used. This - might be in conflict with other options - that affect the file mode, like fsGroup, - and the result can be other mode bits - set.' + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: @@ -10171,11 +11292,9 @@ spec: must not start with ''..''' type: string resourceFieldRef: - description: 'Selects a resource of the - container: only resources limits and - requests (limits.cpu, limits.memory, - requests.cpu and requests.memory) are - currently supported.' + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. properties: containerName: description: 'Container name: required @@ -10197,27 +11316,26 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic required: - path type: object type: array + x-kubernetes-list-type: atomic type: object secret: description: secret information about the secret data to project properties: items: - description: items if unspecified, each key-value - pair in the Data field of the referenced Secret - will be projected into the volume as a file - whose name is the key and content is the value. - If specified, the listed keys will be projected - into the specified paths, and unlisted keys - will not be present. If a key is specified - which is not present in the Secret, the volume - setup will error unless it is marked optional. - Paths must be relative and may not contain - the '..' path or start with '..'. + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. items: description: Maps a string key to a path within a volume. @@ -10226,69 +11344,68 @@ spec: description: key is the key to project. type: string mode: - description: 'mode is Optional: mode bits - used to set permissions on this file. - Must be an octal value between 0000 - and 0777 or a decimal value between - 0 and 511. YAML accepts both octal and - decimal values, JSON requires decimal - values for mode bits. If not specified, - the volume defaultMode will be used. - This might be in conflict with other - options that affect the file mode, like - fsGroup, and the result can be other - mode bits set.' + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: path is the relative path - of the file to map the key to. May not - be an absolute path. May not contain - the path element '..'. May not start - with the string '..'. + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. type: string required: - key - path type: object type: array + x-kubernetes-list-type: atomic name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: description: optional field specify whether the Secret or its key must be defined type: boolean type: object + x-kubernetes-map-type: atomic serviceAccountToken: description: serviceAccountToken is information about the serviceAccountToken data to project properties: audience: - description: audience is the intended audience - of the token. A recipient of a token must - identify itself with an identifier specified - in the audience of the token, and otherwise - should reject the token. The audience defaults - to the identifier of the apiserver. + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. type: string expirationSeconds: - description: expirationSeconds is the requested - duration of validity of the service account - token. As the token approaches expiration, - the kubelet volume plugin will proactively - rotate the service account token. The kubelet - will start trying to rotate the token if the - token is older than 80 percent of its time - to live or if the token is older than 24 hours.Defaults - to 1 hour and must be at least 10 minutes. + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. format: int64 type: integer path: - description: path is the path relative to the - mount point of the file to project the token - into. + description: |- + path is the path relative to the mount point of the file to project the + token into. type: string required: - path @@ -10296,20 +11413,19 @@ spec: type: object type: array customTLSSecret: - description: Projected secret containing custom TLS certificates - to encrypt output from the exporter web server + description: |- + Projected secret containing custom TLS certificates to encrypt output from the exporter + web server properties: items: - description: items if unspecified, each key-value - pair in the Data field of the referenced Secret - will be projected into the volume as a file whose - name is the key and content is the value. If specified, - the listed keys will be projected into the specified - paths, and unlisted keys will not be present. If - a key is specified which is not present in the Secret, - the volume setup will error unless it is marked - optional. Paths must be relative and may not contain - the '..' path or start with '..'. + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. items: description: Maps a string key to a path within a volume. @@ -10318,22 +11434,20 @@ spec: description: key is the key to project. type: string mode: - description: 'mode is Optional: mode bits used - to set permissions on this file. Must be an - octal value between 0000 and 0777 or a decimal - value between 0 and 511. YAML accepts both - octal and decimal values, JSON requires decimal - values for mode bits. If not specified, the - volume defaultMode will be used. This might - be in conflict with other options that affect - the file mode, like fsGroup, and the result - can be other mode bits set.' + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: path is the relative path of the - file to map the key to. May not be an absolute - path. May not contain the path element '..'. + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. May not start with the string '..'. type: string required: @@ -10341,23 +11455,58 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: description: optional field specify whether the Secret or its key must be defined type: boolean type: object + x-kubernetes-map-type: atomic image: - description: The image name to use for crunchy-postgres-exporter - containers. The image may also be set using the RELATED_IMAGE_PGEXPORTER - environment variable. + description: |- + The image name to use for crunchy-postgres-exporter containers. The image may + also be set using the RELATED_IMAGE_PGEXPORTER environment variable. type: string resources: - description: 'Changing this value causes PostgreSQL and - the exporter to restart. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers' + description: |- + Changing this value causes PostgreSQL and the exporter to restart. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -10365,8 +11514,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount - of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -10375,33 +11525,36 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount - of compute resources required. If Requests is omitted - for a container, it defaults to Limits if that is - explicitly specified, otherwise to an implementation-defined - value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object type: object type: object type: object openshift: - description: Whether or not the PostgreSQL cluster is being deployed - to an OpenShift environment. If the field is unset, the operator - will automatically detect the environment. + description: |- + Whether or not the PostgreSQL cluster is being deployed to an OpenShift + environment. If the field is unset, the operator will automatically + detect the environment. type: boolean patroni: properties: dynamicConfiguration: - description: 'Patroni dynamic configuration settings. Changes - to this value will be automatically reloaded without validation. - Changes to certain PostgreSQL parameters cause PostgreSQL to - restart. More info: https://patroni.readthedocs.io/en/latest/SETTINGS.html' + description: |- + Patroni dynamic configuration settings. Changes to this value will be + automatically reloaded without validation. Changes to certain PostgreSQL + parameters cause PostgreSQL to restart. + More info: https://patroni.readthedocs.io/en/latest/dynamic_configuration.html type: object x-kubernetes-preserve-unknown-fields: true leaderLeaseDurationSeconds: default: 30 - description: TTL of the cluster leader lock. "Think of it as the + description: |- + TTL of the cluster leader lock. "Think of it as the length of time before initiation of the automatic failover process." Changing this value causes PostgreSQL to restart. format: int32 @@ -10409,8 +11562,9 @@ spec: type: integer port: default: 8008 - description: The port on which Patroni should listen. Changing - this value causes PostgreSQL to restart. + description: |- + The port on which Patroni should listen. + Changing this value causes PostgreSQL to restart. format: int32 minimum: 1024 type: integer @@ -10423,20 +11577,19 @@ spec: in a PostgresCluster type: boolean targetInstance: - description: The instance that should become primary during - a switchover. This field is optional when Type is "Switchover" - and required when Type is "Failover". When it is not specified, - a healthy replica is automatically selected. + description: |- + The instance that should become primary during a switchover. This field is + optional when Type is "Switchover" and required when Type is "Failover". + When it is not specified, a healthy replica is automatically selected. type: string type: default: Switchover - description: 'Type of switchover to perform. Valid options - are Switchover and Failover. "Switchover" changes the primary - instance of a healthy PostgresCluster. "Failover" forces - a particular instance to be primary, regardless of other + description: |- + Type of switchover to perform. Valid options are Switchover and Failover. + "Switchover" changes the primary instance of a healthy PostgresCluster. + "Failover" forces a particular instance to be primary, regardless of other factors. A TargetInstance must be specified to failover. - NOTE: The Failover type is reserved as the "last resort" - case.' + NOTE: The Failover type is reserved as the "last resort" case. enum: - Switchover - Failover @@ -10446,7 +11599,8 @@ spec: type: object syncPeriodSeconds: default: 10 - description: The interval for refreshing the leader lock and applying + description: |- + The interval for refreshing the leader lock and applying dynamicConfiguration. Must be less than leaderLeaseDurationSeconds. Changing this value causes PostgreSQL to restart. format: int32 @@ -10454,8 +11608,9 @@ spec: type: integer type: object paused: - description: Suspends the rollout and reconciliation of changes made - to the PostgresCluster spec. + description: |- + Suspends the rollout and reconciliation of changes made to the + PostgresCluster spec. type: boolean port: default: 5432 @@ -10464,15 +11619,15 @@ spec: minimum: 1024 type: integer postGISVersion: - description: The PostGIS extension version installed in the PostgreSQL - image. When image is not set, indicates a PostGIS enabled image - will be used. + description: |- + The PostGIS extension version installed in the PostgreSQL image. + When image is not set, indicates a PostGIS enabled image will be used. type: string postgresVersion: description: The major version of PostgreSQL installed in the PostgreSQL image - maximum: 16 - minimum: 10 + maximum: 17 + minimum: 11 type: integer proxy: description: The specification of a proxy that connects to PostgreSQL. @@ -10481,31 +11636,30 @@ spec: description: Defines a PgBouncer proxy and connection pooler. properties: affinity: - description: 'Scheduling constraints of a PgBouncer pod. Changing - this value causes PgBouncer to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node' + description: |- + Scheduling constraints of a PgBouncer pod. Changing this value causes + PgBouncer to restart. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node properties: nodeAffinity: description: Describes node affinity scheduling rules for the pod. properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule - pods to nodes that satisfy the affinity expressions - specified by this field, but it may choose a node - that violates one or more of the expressions. The - node that is most preferred is the one with the - greatest sum of weights, i.e. for each node that - meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, - etc.), compute a sum by iterating through the elements - of this field and adding "weight" to the sum if - the node matches the corresponding matchExpressions; - the node(s) with the highest sum are the most preferred. + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. items: - description: An empty preferred scheduling term - matches all objects with implicit weight 0 (i.e. - it's a no-op). A null preferred scheduling term - matches no objects (i.e. is also a no-op). + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). properties: preference: description: A node selector term, associated @@ -10515,79 +11669,72 @@ spec: description: A list of node selector requirements by node's labels. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators - are In, NotIn, Exists, DoesNotExist. - Gt, and Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string values. - If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. - If the operator is Gt or Lt, the - values array must have a single - element, which will be interpreted - as an integer. This array is replaced - during a strategic merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators - are In, NotIn, Exists, DoesNotExist. - Gt, and Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string values. - If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. - If the operator is Gt or Lt, the - values array must have a single - element, which will be interpreted - as an integer. This array is replaced - during a strategic merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic weight: description: Weight associated with matching the corresponding nodeSelectorTerm, in the @@ -10599,105 +11746,100 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified - by this field are not met at scheduling time, the - pod will not be scheduled onto the node. If the - affinity requirements specified by this field cease - to be met at some point during pod execution (e.g. - due to an update), the system may or may not try - to eventually evict the pod from its node. + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. properties: nodeSelectorTerms: description: Required. A list of node selector terms. The terms are ORed. items: - description: A null or empty node selector term - matches no objects. The requirements of them - are ANDed. The TopologySelectorTerm type implements - a subset of the NodeSelectorTerm. + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. properties: matchExpressions: description: A list of node selector requirements by node's labels. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators - are In, NotIn, Exists, DoesNotExist. - Gt, and Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string values. - If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. - If the operator is Gt or Lt, the - values array must have a single - element, which will be interpreted - as an integer. This array is replaced - during a strategic merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators - are In, NotIn, Exists, DoesNotExist. - Gt, and Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string values. - If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. - If the operator is Gt or Lt, the - values array must have a single - element, which will be interpreted - as an integer. This array is replaced - during a strategic merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object + x-kubernetes-map-type: atomic type: object podAffinity: description: Describes pod affinity scheduling rules (e.g. @@ -10705,19 +11847,16 @@ spec: other pod(s)). properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule - pods to nodes that satisfy the affinity expressions - specified by this field, but it may choose a node - that violates one or more of the expressions. The - node that is most preferred is the one with the - greatest sum of weights, i.e. for each node that - meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, - etc.), compute a sum by iterating through the elements - of this field and adding "weight" to the sum if - the node has pods which matches the corresponding - podAffinityTerm; the node(s) with the highest sum - are the most preferred. + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred @@ -10728,18 +11867,18 @@ spec: associated with the corresponding weight. properties: labelSelector: - description: A label query over a set of - resources, in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -10747,60 +11886,82 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set - of namespaces that the term applies to. - The term is applied to the union of the - namespaces selected by this field and - the ones listed in the namespaces field. - null selector and null or empty namespaces - list means "this pod's namespace". An - empty selector ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -10808,70 +11969,61 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static - list of namespace names that the term - applies to. The term is applied to the - union of the namespaces listed in this - field and the ones selected by namespaceSelector. - null or empty namespaces list and null - namespaceSelector means "this pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where co-located - is defined as running on a node whose - value of the label with key topologyKey - matches that of any node on which any - of the selected pods is running. Empty - topologyKey is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object weight: - description: weight associated with matching - the corresponding podAffinityTerm, in the - range 1-100. + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. format: int32 type: integer required: @@ -10879,161 +12031,179 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified - by this field are not met at scheduling time, the - pod will not be scheduled onto the node. If the - affinity requirements specified by this field cease - to be met at some point during pod execution (e.g. - due to a pod label update), the system may or may - not try to eventually evict the pod from its node. - When there are multiple elements, the lists of nodes - corresponding to each podAffinityTerm are intersected, - i.e. all terms must be satisfied. + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. items: - description: Defines a set of pods (namely those - matching the labelSelector relative to the given - namespace(s)) that this pod should be co-located - (affinity) or not co-located (anti-affinity) with, - where co-located is defined as running on a node - whose value of the label with key - matches that of any node on which a pod of the - set of pods is running + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running properties: labelSelector: - description: A label query over a set of resources, - in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a - key's relationship to a set of values. - Valid operators are In, NotIn, Exists - and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of - string values. If the operator is - In or NotIn, the values array must - be non-empty. If the operator is - Exists or DoesNotExist, the values - array must be empty. This array - is replaced during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set of namespaces - that the term applies to. The term is applied - to the union of the namespaces selected by - this field and the ones listed in the namespaces - field. null selector and null or empty namespaces - list means "this pod's namespace". An empty - selector ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a - key's relationship to a set of values. - Valid operators are In, NotIn, Exists - and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of - string values. If the operator is - In or NotIn, the values array must - be non-empty. If the operator is - Exists or DoesNotExist, the values - array must be empty. This array - is replaced during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static list - of namespace names that the term applies to. - The term is applied to the union of the namespaces - listed in this field and the ones selected - by namespaceSelector. null or empty namespaces - list and null namespaceSelector means "this - pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the - pods matching the labelSelector in the specified - namespaces, where co-located is defined as - running on a node whose value of the label - with key topologyKey matches that of any node - on which any of the selected pods is running. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. Empty topologyKey is not allowed. type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object podAntiAffinity: description: Describes pod anti-affinity scheduling rules @@ -11041,19 +12211,16 @@ spec: etc. as some other pod(s)). properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule - pods to nodes that satisfy the anti-affinity expressions - specified by this field, but it may choose a node - that violates one or more of the expressions. The - node that is most preferred is the one with the - greatest sum of weights, i.e. for each node that - meets all of the scheduling requirements (resource - request, requiredDuringScheduling anti-affinity - expressions, etc.), compute a sum by iterating through - the elements of this field and adding "weight" to - the sum if the node has pods which matches the corresponding - podAffinityTerm; the node(s) with the highest sum - are the most preferred. + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred @@ -11064,18 +12231,18 @@ spec: associated with the corresponding weight. properties: labelSelector: - description: A label query over a set of - resources, in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -11083,60 +12250,82 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set - of namespaces that the term applies to. - The term is applied to the union of the - namespaces selected by this field and - the ones listed in the namespaces field. - null selector and null or empty namespaces - list means "this pod's namespace". An - empty selector ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -11144,70 +12333,61 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static - list of namespace names that the term - applies to. The term is applied to the - union of the namespaces listed in this - field and the ones selected by namespaceSelector. - null or empty namespaces list and null - namespaceSelector means "this pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where co-located - is defined as running on a node whose - value of the label with key topologyKey - matches that of any node on which any - of the selected pods is running. Empty - topologyKey is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object weight: - description: weight associated with matching - the corresponding podAffinityTerm, in the - range 1-100. + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. format: int32 type: integer required: @@ -11215,206 +12395,317 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements specified - by this field are not met at scheduling time, the - pod will not be scheduled onto the node. If the - anti-affinity requirements specified by this field - cease to be met at some point during pod execution - (e.g. due to a pod label update), the system may - or may not try to eventually evict the pod from - its node. When there are multiple elements, the - lists of nodes corresponding to each podAffinityTerm - are intersected, i.e. all terms must be satisfied. + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. items: - description: Defines a set of pods (namely those - matching the labelSelector relative to the given - namespace(s)) that this pod should be co-located - (affinity) or not co-located (anti-affinity) with, - where co-located is defined as running on a node - whose value of the label with key - matches that of any node on which a pod of the - set of pods is running + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running properties: labelSelector: - description: A label query over a set of resources, - in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a - key's relationship to a set of values. - Valid operators are In, NotIn, Exists - and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of - string values. If the operator is - In or NotIn, the values array must - be non-empty. If the operator is - Exists or DoesNotExist, the values - array must be empty. This array - is replaced during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set of namespaces - that the term applies to. The term is applied - to the union of the namespaces selected by - this field and the ones listed in the namespaces - field. null selector and null or empty namespaces - list means "this pod's namespace". An empty - selector ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a - key's relationship to a set of values. - Valid operators are In, NotIn, Exists - and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of - string values. If the operator is - In or NotIn, the values array must - be non-empty. If the operator is - Exists or DoesNotExist, the values - array must be empty. This array - is replaced during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static list - of namespace names that the term applies to. - The term is applied to the union of the namespaces - listed in this field and the ones selected - by namespaceSelector. null or empty namespaces - list and null namespaceSelector means "this - pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the - pods matching the labelSelector in the specified - namespaces, where co-located is defined as - running on a node whose value of the label - with key topologyKey matches that of any node - on which any of the selected pods is running. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. Empty topologyKey is not allowed. type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object type: object config: - description: 'Configuration settings for the PgBouncer process. - Changes to any of these values will be automatically reloaded - without validation. Be careful, as you may put PgBouncer - into an unusable state. More info: https://www.pgbouncer.org/usage.html#reload' + description: |- + Configuration settings for the PgBouncer process. Changes to any of these + values will be automatically reloaded without validation. Be careful, as + you may put PgBouncer into an unusable state. + More info: https://www.pgbouncer.org/usage.html#reload properties: databases: additionalProperties: type: string - description: 'PgBouncer database definitions. The key - is the database requested by a client while the value - is a libpq-styled connection string. The special key - "*" acts as a fallback. When this field is empty, PgBouncer - is configured with a single "*" entry that connects - to the primary PostgreSQL instance. More info: https://www.pgbouncer.org/config.html#section-databases' + description: |- + PgBouncer database definitions. The key is the database requested by a + client while the value is a libpq-styled connection string. The special + key "*" acts as a fallback. When this field is empty, PgBouncer is + configured with a single "*" entry that connects to the primary + PostgreSQL instance. + More info: https://www.pgbouncer.org/config.html#section-databases type: object files: - description: 'Files to mount under "/etc/pgbouncer". When - specified, settings in the "pgbouncer.ini" file are - loaded before all others. From there, other files may - be included by absolute path. Changing these references - causes PgBouncer to restart, but changes to the file - contents are automatically reloaded. More info: https://www.pgbouncer.org/config.html#include-directive' + description: |- + Files to mount under "/etc/pgbouncer". When specified, settings in the + "pgbouncer.ini" file are loaded before all others. From there, other + files may be included by absolute path. Changing these references causes + PgBouncer to restart, but changes to the file contents are automatically + reloaded. + More info: https://www.pgbouncer.org/config.html#include-directive items: description: Projection that may be projected along with other supported volume types properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume root + to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object configMap: description: configMap information about the configMap data to project properties: items: - description: items if unspecified, each key-value - pair in the Data field of the referenced ConfigMap - will be projected into the volume as a file - whose name is the key and content is the value. - If specified, the listed keys will be projected - into the specified paths, and unlisted keys - will not be present. If a key is specified - which is not present in the ConfigMap, the - volume setup will error unless it is marked - optional. Paths must be relative and may not - contain the '..' path or start with '..'. + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. items: description: Maps a string key to a path within a volume. @@ -11423,41 +12714,43 @@ spec: description: key is the key to project. type: string mode: - description: 'mode is Optional: mode bits - used to set permissions on this file. - Must be an octal value between 0000 - and 0777 or a decimal value between - 0 and 511. YAML accepts both octal and - decimal values, JSON requires decimal - values for mode bits. If not specified, - the volume defaultMode will be used. - This might be in conflict with other - options that affect the file mode, like - fsGroup, and the result can be other - mode bits set.' + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: path is the relative path - of the file to map the key to. May not - be an absolute path. May not contain - the path element '..'. May not start - with the string '..'. + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. type: string required: - key - path type: object type: array + x-kubernetes-list-type: atomic name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: description: optional specify whether the ConfigMap or its keys must be defined type: boolean type: object + x-kubernetes-map-type: atomic downwardAPI: description: downwardAPI information about the downwardAPI data to project @@ -11473,7 +12766,7 @@ spec: fieldRef: description: 'Required: Selects a field of the pod: only annotations, labels, - name and namespace are supported.' + name, namespace and uid are supported.' properties: apiVersion: description: Version of the schema @@ -11487,19 +12780,15 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic mode: - description: 'Optional: mode bits used - to set permissions on this file, must - be an octal value between 0000 and 0777 - or a decimal value between 0 and 511. - YAML accepts both octal and decimal - values, JSON requires decimal values - for mode bits. If not specified, the - volume defaultMode will be used. This - might be in conflict with other options - that affect the file mode, like fsGroup, - and the result can be other mode bits - set.' + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: @@ -11511,11 +12800,9 @@ spec: must not start with ''..''' type: string resourceFieldRef: - description: 'Selects a resource of the - container: only resources limits and - requests (limits.cpu, limits.memory, - requests.cpu and requests.memory) are - currently supported.' + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. properties: containerName: description: 'Container name: required @@ -11537,27 +12824,26 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic required: - path type: object type: array + x-kubernetes-list-type: atomic type: object secret: description: secret information about the secret data to project properties: items: - description: items if unspecified, each key-value - pair in the Data field of the referenced Secret - will be projected into the volume as a file - whose name is the key and content is the value. - If specified, the listed keys will be projected - into the specified paths, and unlisted keys - will not be present. If a key is specified - which is not present in the Secret, the volume - setup will error unless it is marked optional. - Paths must be relative and may not contain - the '..' path or start with '..'. + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. items: description: Maps a string key to a path within a volume. @@ -11566,69 +12852,68 @@ spec: description: key is the key to project. type: string mode: - description: 'mode is Optional: mode bits - used to set permissions on this file. - Must be an octal value between 0000 - and 0777 or a decimal value between - 0 and 511. YAML accepts both octal and - decimal values, JSON requires decimal - values for mode bits. If not specified, - the volume defaultMode will be used. - This might be in conflict with other - options that affect the file mode, like - fsGroup, and the result can be other - mode bits set.' + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: path is the relative path - of the file to map the key to. May not - be an absolute path. May not contain - the path element '..'. May not start - with the string '..'. + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. type: string required: - key - path type: object type: array + x-kubernetes-list-type: atomic name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: description: optional field specify whether the Secret or its key must be defined type: boolean type: object + x-kubernetes-map-type: atomic serviceAccountToken: description: serviceAccountToken is information about the serviceAccountToken data to project properties: audience: - description: audience is the intended audience - of the token. A recipient of a token must - identify itself with an identifier specified - in the audience of the token, and otherwise - should reject the token. The audience defaults - to the identifier of the apiserver. + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. type: string expirationSeconds: - description: expirationSeconds is the requested - duration of validity of the service account - token. As the token approaches expiration, - the kubelet volume plugin will proactively - rotate the service account token. The kubelet - will start trying to rotate the token if the - token is older than 80 percent of its time - to live or if the token is older than 24 hours.Defaults - to 1 hour and must be at least 10 minutes. + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. format: int64 type: integer path: - description: path is the path relative to the - mount point of the file to project the token - into. + description: |- + path is the path relative to the mount point of the file to project the + token into. type: string required: - path @@ -11638,56 +12923,58 @@ spec: global: additionalProperties: type: string - description: 'Settings that apply to the entire PgBouncer - process. More info: https://www.pgbouncer.org/config.html' + description: |- + Settings that apply to the entire PgBouncer process. + More info: https://www.pgbouncer.org/config.html type: object users: additionalProperties: type: string - description: 'Connection settings specific to particular - users. More info: https://www.pgbouncer.org/config.html#section-users' + description: |- + Connection settings specific to particular users. + More info: https://www.pgbouncer.org/config.html#section-users type: object type: object containers: - description: Custom sidecars for a PgBouncer pod. Changing - this value causes PgBouncer to restart. + description: |- + Custom sidecars for a PgBouncer pod. Changing this value causes + PgBouncer to restart. items: description: A single application container that you want to run within a pod. properties: args: - description: 'Arguments to the entrypoint. The container - image''s CMD is used if this is not provided. Variable - references $(VAR_NAME) are expanded using the container''s - environment. If a variable cannot be resolved, the - reference in the input string will be unchanged. Double - $$ are reduced to a single $, which allows for escaping - the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce - the string literal "$(VAR_NAME)". Escaped references - will never be expanded, regardless of whether the - variable exists or not. Cannot be updated. More info: - https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell items: type: string type: array + x-kubernetes-list-type: atomic command: - description: 'Entrypoint array. Not executed within - a shell. The container image''s ENTRYPOINT is used - if this is not provided. Variable references $(VAR_NAME) - are expanded using the container''s environment. If - a variable cannot be resolved, the reference in the - input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) - syntax: i.e. "$$(VAR_NAME)" will produce the string - literal "$(VAR_NAME)". Escaped references will never - be expanded, regardless of whether the variable exists - or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell items: type: string type: array + x-kubernetes-list-type: atomic env: - description: List of environment variables to set in - the container. Cannot be updated. + description: |- + List of environment variables to set in the container. + Cannot be updated. items: description: EnvVar represents an environment variable present in a Container. @@ -11697,17 +12984,16 @@ spec: Must be a C_IDENTIFIER. type: string value: - description: 'Variable references $(VAR_NAME) - are expanded using the previously defined environment - variables in the container and any service environment - variables. If a variable cannot be resolved, - the reference in the input string will be unchanged. - Double $$ are reduced to a single $, which allows - for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" - will produce the string literal "$(VAR_NAME)". - Escaped references will never be expanded, regardless - of whether the variable exists or not. Defaults - to "".' + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". type: string valueFrom: description: Source for the environment variable's @@ -11720,8 +13006,13 @@ spec: description: The key to select. type: string name: - description: 'Name of the referent. More - info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: description: Specify whether the ConfigMap @@ -11730,12 +13021,11 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic fieldRef: - description: 'Selects a field of the pod: - supports metadata.name, metadata.namespace, - `metadata.labels['''']`, `metadata.annotations['''']`, - spec.nodeName, spec.serviceAccountName, - status.hostIP, status.podIP, status.podIPs.' + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. properties: apiVersion: description: Version of the schema the @@ -11749,12 +13039,11 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: - description: 'Selects a resource of the container: - only resources limits and requests (limits.cpu, - limits.memory, limits.ephemeral-storage, - requests.cpu, requests.memory and requests.ephemeral-storage) - are currently supported.' + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. properties: containerName: description: 'Container name: required @@ -11775,6 +13064,7 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: description: Selects a key of a secret in the pod's namespace @@ -11785,8 +13075,13 @@ spec: key. type: string name: - description: 'Name of the referent. More - info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: description: Specify whether the Secret @@ -11795,20 +13090,23 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map envFrom: - description: List of sources to populate environment - variables in the container. The keys defined within - a source must be a C_IDENTIFIER. All invalid keys - will be reported as an event when the container is - starting. When a key exists in multiple sources, the - value associated with the last source will take precedence. - Values defined by an Env with a duplicate key will - take precedence. Cannot be updated. + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. items: description: EnvFromSource represents the source of a set of ConfigMaps @@ -11817,14 +13115,20 @@ spec: description: The ConfigMap to select from properties: name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: description: Specify whether the ConfigMap must be defined type: boolean type: object + x-kubernetes-map-type: atomic prefix: description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. @@ -11833,66 +13137,72 @@ spec: description: The Secret to select from properties: name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: description: Specify whether the Secret must be defined type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array + x-kubernetes-list-type: atomic image: - description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images - This field is optional to allow higher level config - management to default or override container images - in workload controllers like Deployments and StatefulSets.' + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. type: string imagePullPolicy: - description: 'Image pull policy. One of Always, Never, - IfNotPresent. Defaults to Always if :latest tag is - specified, or IfNotPresent otherwise. Cannot be updated. - More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images type: string lifecycle: - description: Actions that the management system should - take in response to container lifecycle events. Cannot - be updated. + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. properties: postStart: - description: 'PostStart is called immediately after - a container is created. If the handler fails, - the container is terminated and restarted according - to its restart policy. Other management of the - container blocks until the hook completes. More - info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks properties: exec: description: Exec specifies the action to take. properties: command: - description: Command is the command line - to execute inside the container, the working - directory for the command is root ('/') - in the container's filesystem. The command - is simply exec'd, it is not run inside - a shell, so traditional shell instructions - ('|', etc) won't work. To use a shell, - you need to explicitly call out to that - shell. Exit status of 0 is treated as - live/healthy and non-zero is unhealthy. + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. items: type: string type: array + x-kubernetes-list-type: atomic type: object httpGet: description: HTTPGet specifies the http request to perform. properties: host: - description: Host name to connect to, defaults - to the pod IP. You probably want to set + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. type: string httpHeaders: @@ -11903,7 +13213,9 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string value: description: The header field value @@ -11913,6 +13225,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. @@ -11921,24 +13234,36 @@ spec: anyOf: - type: integer - type: string - description: Name or number of the port - to access on the container. Number must - be in the range 1 to 65535. Name must - be an IANA_SVC_NAME. + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true scheme: - description: Scheme to use for connecting - to the host. Defaults to HTTP. + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. type: string required: - port type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: - description: Deprecated. TCPSocket is NOT supported - as a LifecycleHandler and kept for the backward - compatibility. There are no validation of - this field and lifecycle hooks will fail in - runtime when tcp handler is specified. + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. properties: host: description: 'Optional: Host name to connect @@ -11948,55 +13273,49 @@ spec: anyOf: - type: integer - type: string - description: Number or name of the port - to access on the container. Number must - be in the range 1 to 65535. Name must - be an IANA_SVC_NAME. + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true required: - port type: object type: object preStop: - description: 'PreStop is called immediately before - a container is terminated due to an API request - or management event such as liveness/startup probe - failure, preemption, resource contention, etc. - The handler is not called if the container crashes - or exits. The Pod''s termination grace period - countdown begins before the PreStop hook is executed. - Regardless of the outcome of the handler, the - container will eventually terminate within the - Pod''s termination grace period (unless delayed - by finalizers). Other management of the container - blocks until the hook completes or until the termination - grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks properties: exec: description: Exec specifies the action to take. properties: command: - description: Command is the command line - to execute inside the container, the working - directory for the command is root ('/') - in the container's filesystem. The command - is simply exec'd, it is not run inside - a shell, so traditional shell instructions - ('|', etc) won't work. To use a shell, - you need to explicitly call out to that - shell. Exit status of 0 is treated as - live/healthy and non-zero is unhealthy. + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. items: type: string type: array + x-kubernetes-list-type: atomic type: object httpGet: description: HTTPGet specifies the http request to perform. properties: host: - description: Host name to connect to, defaults - to the pod IP. You probably want to set + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. type: string httpHeaders: @@ -12007,7 +13326,9 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string value: description: The header field value @@ -12017,6 +13338,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. @@ -12025,24 +13347,36 @@ spec: anyOf: - type: integer - type: string - description: Name or number of the port - to access on the container. Number must - be in the range 1 to 65535. Name must - be an IANA_SVC_NAME. + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true scheme: - description: Scheme to use for connecting - to the host. Defaults to HTTP. + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. type: string required: - port type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: - description: Deprecated. TCPSocket is NOT supported - as a LifecycleHandler and kept for the backward - compatibility. There are no validation of - this field and lifecycle hooks will fail in - runtime when tcp handler is specified. + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. properties: host: description: 'Optional: Host name to connect @@ -12052,10 +13386,10 @@ spec: anyOf: - type: integer - type: string - description: Number or name of the port - to access on the container. Number must - be in the range 1 to 65535. Name must - be an IANA_SVC_NAME. + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true required: - port @@ -12063,37 +13397,36 @@ spec: type: object type: object livenessProbe: - description: 'Periodic probe of container liveness. - Container will be restarted if the probe fails. Cannot - be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes properties: exec: description: Exec specifies the action to take. properties: command: - description: Command is the command line to - execute inside the container, the working - directory for the command is root ('/') in - the container's filesystem. The command is - simply exec'd, it is not run inside a shell, - so traditional shell instructions ('|', etc) - won't work. To use a shell, you need to explicitly - call out to that shell. Exit status of 0 is - treated as live/healthy and non-zero is unhealthy. + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: - description: Minimum consecutive failures for the - probe to be considered failed after having succeeded. + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. format: int32 type: integer grpc: description: GRPC specifies an action involving - a GRPC port. This is a beta field and requires - enabling GRPCContainerProbe feature gate. + a GRPC port. properties: port: description: Port number of the gRPC service. @@ -12101,11 +13434,12 @@ spec: format: int32 type: integer service: - description: "Service is the name of the service - to place in the gRPC HealthCheckRequest (see - https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - \n If this is not specified, the default behavior - is defined by gRPC." + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. type: string required: - port @@ -12115,9 +13449,9 @@ spec: to perform. properties: host: - description: Host name to connect to, defaults - to the pod IP. You probably want to set "Host" - in httpHeaders instead. + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. type: string httpHeaders: description: Custom headers to set in the request. @@ -12127,7 +13461,9 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string value: description: The header field value @@ -12137,6 +13473,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. type: string @@ -12144,34 +13481,35 @@ spec: anyOf: - type: integer - type: string - description: Name or number of the port to access - on the container. Number must be in the range - 1 to 65535. Name must be an IANA_SVC_NAME. + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true scheme: - description: Scheme to use for connecting to - the host. Defaults to HTTP. + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. type: string required: - port type: object initialDelaySeconds: - description: 'Number of seconds after the container - has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes format: int32 type: integer periodSeconds: - description: How often (in seconds) to perform the - probe. Default to 10 seconds. Minimum value is - 1. + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. format: int32 type: integer successThreshold: - description: Minimum consecutive successes for the - probe to be considered successful after having - failed. Defaults to 1. Must be 1 for liveness - and startup. Minimum value is 1. + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. format: int32 type: integer tcpSocket: @@ -12186,61 +13524,59 @@ spec: anyOf: - type: integer - type: string - description: Number or name of the port to access - on the container. Number must be in the range - 1 to 65535. Name must be an IANA_SVC_NAME. + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true required: - port type: object terminationGracePeriodSeconds: - description: Optional duration in seconds the pod - needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after - the processes running in the pod are sent a termination - signal and the time when the processes are forcibly - halted with a kill signal. Set this value longer - than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds - will be used. Otherwise, this value overrides - the value provided by the pod spec. Value must - be non-negative integer. The value zero indicates - stop immediately via the kill signal (no opportunity - to shut down). This is a beta field and requires - enabling ProbeTerminationGracePeriod feature gate. - Minimum value is 1. spec.terminationGracePeriodSeconds - is used if unset. + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. format: int64 type: integer timeoutSeconds: - description: 'Number of seconds after which the - probe times out. Defaults to 1 second. Minimum - value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes format: int32 type: integer type: object name: - description: Name of the container specified as a DNS_LABEL. + description: |- + Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. type: string ports: - description: List of ports to expose from the container. - Exposing a port here gives the system additional information - about the network connections a container uses, but - is primarily informational. Not specifying a port - here DOES NOT prevent that port from being exposed. - Any port which is listening on the default "0.0.0.0" - address inside a container will be accessible from - the network. Cannot be updated. + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. items: description: ContainerPort represents a network port in a single container. properties: containerPort: - description: Number of port to expose on the pod's - IP address. This must be a valid port number, - 0 < x < 65536. + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. format: int32 type: integer hostIP: @@ -12248,23 +13584,24 @@ spec: port to. type: string hostPort: - description: Number of port to expose on the host. - If specified, this must be a valid port number, - 0 < x < 65536. If HostNetwork is specified, - this must match ContainerPort. Most containers - do not need this. + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. format: int32 type: integer name: - description: If specified, this must be an IANA_SVC_NAME - and unique within the pod. Each named port in - a pod must have a unique name. Name for the - port that can be referred to by services. + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. type: string protocol: default: TCP - description: Protocol for port. Must be UDP, TCP, - or SCTP. Defaults to "TCP". + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". type: string required: - containerPort @@ -12275,37 +13612,36 @@ spec: - protocol x-kubernetes-list-type: map readinessProbe: - description: 'Periodic probe of container service readiness. - Container will be removed from service endpoints if - the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes properties: exec: description: Exec specifies the action to take. properties: command: - description: Command is the command line to - execute inside the container, the working - directory for the command is root ('/') in - the container's filesystem. The command is - simply exec'd, it is not run inside a shell, - so traditional shell instructions ('|', etc) - won't work. To use a shell, you need to explicitly - call out to that shell. Exit status of 0 is - treated as live/healthy and non-zero is unhealthy. + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: - description: Minimum consecutive failures for the - probe to be considered failed after having succeeded. + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. format: int32 type: integer grpc: description: GRPC specifies an action involving - a GRPC port. This is a beta field and requires - enabling GRPCContainerProbe feature gate. + a GRPC port. properties: port: description: Port number of the gRPC service. @@ -12313,11 +13649,12 @@ spec: format: int32 type: integer service: - description: "Service is the name of the service - to place in the gRPC HealthCheckRequest (see - https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - \n If this is not specified, the default behavior - is defined by gRPC." + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. type: string required: - port @@ -12327,9 +13664,9 @@ spec: to perform. properties: host: - description: Host name to connect to, defaults - to the pod IP. You probably want to set "Host" - in httpHeaders instead. + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. type: string httpHeaders: description: Custom headers to set in the request. @@ -12339,7 +13676,9 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string value: description: The header field value @@ -12349,6 +13688,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. type: string @@ -12356,34 +13696,35 @@ spec: anyOf: - type: integer - type: string - description: Name or number of the port to access - on the container. Number must be in the range - 1 to 65535. Name must be an IANA_SVC_NAME. + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true scheme: - description: Scheme to use for connecting to - the host. Defaults to HTTP. + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. type: string required: - port type: object initialDelaySeconds: - description: 'Number of seconds after the container - has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes format: int32 type: integer periodSeconds: - description: How often (in seconds) to perform the - probe. Default to 10 seconds. Minimum value is - 1. + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. format: int32 type: integer successThreshold: - description: Minimum consecutive successes for the - probe to be considered successful after having - failed. Defaults to 1. Must be 1 for liveness - and startup. Minimum value is 1. + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. format: int32 type: integer tcpSocket: @@ -12398,43 +13739,90 @@ spec: anyOf: - type: integer - type: string - description: Number or name of the port to access - on the container. Number must be in the range - 1 to 65535. Name must be an IANA_SVC_NAME. + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true required: - port type: object terminationGracePeriodSeconds: - description: Optional duration in seconds the pod - needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after - the processes running in the pod are sent a termination - signal and the time when the processes are forcibly - halted with a kill signal. Set this value longer - than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds - will be used. Otherwise, this value overrides - the value provided by the pod spec. Value must - be non-negative integer. The value zero indicates - stop immediately via the kill signal (no opportunity - to shut down). This is a beta field and requires - enabling ProbeTerminationGracePeriod feature gate. - Minimum value is 1. spec.terminationGracePeriodSeconds - is used if unset. + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. format: int64 type: integer timeoutSeconds: - description: 'Number of seconds after which the - probe times out. Defaults to 1 second. Minimum - value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes format: int32 type: integer type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: - description: 'Compute Resources required by this container. - Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -12442,8 +13830,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount - of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -12452,34 +13841,76 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount - of compute resources required. If Requests is - omitted for a container, it defaults to Limits - if that is explicitly specified, otherwise to - an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string securityContext: - description: 'SecurityContext defines the security options - the container should be run with. If set, the fields - of SecurityContext override the equivalent fields - of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ properties: allowPrivilegeEscalation: - description: 'AllowPrivilegeEscalation controls - whether a process can gain more privileges than - its parent process. This bool directly controls - if the no_new_privs flag will be set on the container - process. AllowPrivilegeEscalation is true always - when the container is: 1) run as Privileged 2) - has CAP_SYS_ADMIN Note that this field cannot - be set when spec.os.name is windows.' + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object capabilities: - description: The capabilities to add/drop when running - containers. Defaults to the default set of capabilities - granted by the container runtime. Note that this - field cannot be set when spec.os.name is windows. + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. properties: add: description: Added capabilities @@ -12488,6 +13919,7 @@ spec: type type: string type: array + x-kubernetes-list-type: atomic drop: description: Removed capabilities items: @@ -12495,68 +13927,63 @@ spec: type type: string type: array + x-kubernetes-list-type: atomic type: object privileged: - description: Run container in privileged mode. Processes - in privileged containers are essentially equivalent - to root on the host. Defaults to false. Note that - this field cannot be set when spec.os.name is - windows. + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. type: boolean procMount: - description: procMount denotes the type of proc - mount to use for the containers. The default is - DefaultProcMount which uses the container runtime - defaults for readonly paths and masked paths. - This requires the ProcMountType feature flag to - be enabled. Note that this field cannot be set - when spec.os.name is windows. + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. type: string readOnlyRootFilesystem: - description: Whether this container has a read-only - root filesystem. Default is false. Note that this - field cannot be set when spec.os.name is windows. + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. type: boolean runAsGroup: - description: The GID to run the entrypoint of the - container process. Uses runtime default if unset. - May also be set in PodSecurityContext. If set - in both SecurityContext and PodSecurityContext, - the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name - is windows. + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. format: int64 type: integer runAsNonRoot: - description: Indicates that the container must run - as a non-root user. If true, the Kubelet will - validate the image at runtime to ensure that it - does not run as UID 0 (root) and fail to start - the container if it does. If unset or false, no - such validation will be performed. May also be - set in PodSecurityContext. If set in both SecurityContext - and PodSecurityContext, the value specified in - SecurityContext takes precedence. + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. type: boolean runAsUser: - description: The UID to run the entrypoint of the - container process. Defaults to user specified - in image metadata if unspecified. May also be - set in PodSecurityContext. If set in both SecurityContext - and PodSecurityContext, the value specified in - SecurityContext takes precedence. Note that this - field cannot be set when spec.os.name is windows. + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. format: int64 type: integer seLinuxOptions: - description: The SELinux context to be applied to - the container. If unspecified, the container runtime - will allocate a random SELinux context for each - container. May also be set in PodSecurityContext. If - set in both SecurityContext and PodSecurityContext, - the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name - is windows. + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. properties: level: description: Level is SELinux level label that @@ -12576,114 +14003,98 @@ spec: type: string type: object seccompProfile: - description: The seccomp options to use by this - container. If seccomp options are provided at - both the pod & container level, the container - options override the pod options. Note that this - field cannot be set when spec.os.name is windows. + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. properties: localhostProfile: - description: localhostProfile indicates a profile - defined in a file on the node should be used. - The profile must be preconfigured on the node - to work. Must be a descending path, relative - to the kubelet's configured seccomp profile - location. Must only be set if type is "Localhost". + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. type: string type: - description: 'type indicates which kind of seccomp - profile will be applied. Valid options are: - Localhost - a profile defined in a file on - the node should be used. RuntimeDefault - - the container runtime default profile should - be used. Unconfined - no profile should be - applied.' + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. type: string required: - type type: object windowsOptions: - description: The Windows specific settings applied - to all containers. If unspecified, the options - from the PodSecurityContext will be used. If set - in both SecurityContext and PodSecurityContext, - the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name - is linux. + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. properties: gmsaCredentialSpec: - description: GMSACredentialSpec is where the - GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) - inlines the contents of the GMSA credential - spec named by the GMSACredentialSpecName field. + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. type: string gmsaCredentialSpecName: description: GMSACredentialSpecName is the name of the GMSA credential spec to use. type: string hostProcess: - description: HostProcess determines if a container - should be run as a 'Host Process' container. - This field is alpha-level and will only be - honored by components that enable the WindowsHostProcessContainers - feature flag. Setting this field without the - feature flag will result in errors when validating - the Pod. All of a Pod's containers must have - the same effective HostProcess value (it is - not allowed to have a mix of HostProcess containers - and non-HostProcess containers). In addition, - if HostProcess is true then HostNetwork must - also be set to true. + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. type: boolean runAsUserName: - description: The UserName in Windows to run - the entrypoint of the container process. Defaults - to the user specified in image metadata if - unspecified. May also be set in PodSecurityContext. - If set in both SecurityContext and PodSecurityContext, - the value specified in SecurityContext takes - precedence. + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. type: string type: object type: object startupProbe: - description: 'StartupProbe indicates that the Pod has - successfully initialized. If specified, no other probes - are executed until this completes successfully. If - this probe fails, the Pod will be restarted, just - as if the livenessProbe failed. This can be used to - provide different probe parameters at the beginning - of a Pod''s lifecycle, when it might take a long time - to load data or warm a cache, than during steady-state - operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes properties: exec: description: Exec specifies the action to take. properties: command: - description: Command is the command line to - execute inside the container, the working - directory for the command is root ('/') in - the container's filesystem. The command is - simply exec'd, it is not run inside a shell, - so traditional shell instructions ('|', etc) - won't work. To use a shell, you need to explicitly - call out to that shell. Exit status of 0 is - treated as live/healthy and non-zero is unhealthy. + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: - description: Minimum consecutive failures for the - probe to be considered failed after having succeeded. + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. format: int32 type: integer grpc: description: GRPC specifies an action involving - a GRPC port. This is a beta field and requires - enabling GRPCContainerProbe feature gate. + a GRPC port. properties: port: description: Port number of the gRPC service. @@ -12691,11 +14102,12 @@ spec: format: int32 type: integer service: - description: "Service is the name of the service - to place in the gRPC HealthCheckRequest (see - https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - \n If this is not specified, the default behavior - is defined by gRPC." + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. type: string required: - port @@ -12705,9 +14117,9 @@ spec: to perform. properties: host: - description: Host name to connect to, defaults - to the pod IP. You probably want to set "Host" - in httpHeaders instead. + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. type: string httpHeaders: description: Custom headers to set in the request. @@ -12717,7 +14129,9 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string value: description: The header field value @@ -12727,6 +14141,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. type: string @@ -12734,34 +14149,35 @@ spec: anyOf: - type: integer - type: string - description: Name or number of the port to access - on the container. Number must be in the range - 1 to 65535. Name must be an IANA_SVC_NAME. + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true scheme: - description: Scheme to use for connecting to - the host. Defaults to HTTP. + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. type: string required: - port type: object initialDelaySeconds: - description: 'Number of seconds after the container - has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes format: int32 type: integer periodSeconds: - description: How often (in seconds) to perform the - probe. Default to 10 seconds. Minimum value is - 1. + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. format: int32 type: integer successThreshold: - description: Minimum consecutive successes for the - probe to be considered successful after having - failed. Defaults to 1. Must be 1 for liveness - and startup. Minimum value is 1. + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. format: int32 type: integer tcpSocket: @@ -12776,83 +14192,75 @@ spec: anyOf: - type: integer - type: string - description: Number or name of the port to access - on the container. Number must be in the range - 1 to 65535. Name must be an IANA_SVC_NAME. + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true required: - port type: object terminationGracePeriodSeconds: - description: Optional duration in seconds the pod - needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after - the processes running in the pod are sent a termination - signal and the time when the processes are forcibly - halted with a kill signal. Set this value longer - than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds - will be used. Otherwise, this value overrides - the value provided by the pod spec. Value must - be non-negative integer. The value zero indicates - stop immediately via the kill signal (no opportunity - to shut down). This is a beta field and requires - enabling ProbeTerminationGracePeriod feature gate. - Minimum value is 1. spec.terminationGracePeriodSeconds - is used if unset. + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. format: int64 type: integer timeoutSeconds: - description: 'Number of seconds after which the - probe times out. Defaults to 1 second. Minimum - value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes format: int32 type: integer type: object stdin: - description: Whether this container should allocate - a buffer for stdin in the container runtime. If this - is not set, reads from stdin in the container will - always result in EOF. Default is false. + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. type: boolean stdinOnce: - description: Whether the container runtime should close - the stdin channel after it has been opened by a single - attach. When stdin is true the stdin stream will remain - open across multiple attach sessions. If stdinOnce - is set to true, stdin is opened on container start, - is empty until the first client attaches to stdin, - and then remains open and accepts data until the client - disconnects, at which time stdin is closed and remains - closed until the container is restarted. If this flag - is false, a container processes that reads from stdin - will never receive an EOF. Default is false + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false type: boolean terminationMessagePath: - description: 'Optional: Path at which the file to which - the container''s termination message will be written - is mounted into the container''s filesystem. Message - written is intended to be brief final status, such - as an assertion failure message. Will be truncated - by the node if greater than 4096 bytes. The total - message length across all containers will be limited - to 12kb. Defaults to /dev/termination-log. Cannot - be updated.' + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. type: string terminationMessagePolicy: - description: Indicate how the termination message should - be populated. File will use the contents of terminationMessagePath - to populate the container status message on both success - and failure. FallbackToLogsOnError will use the last - chunk of container log output if the termination message - file is empty and the container exited with an error. - The log output is limited to 2048 bytes or 80 lines, - whichever is smaller. Defaults to File. Cannot be - updated. + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. type: string tty: - description: Whether this container should allocate - a TTY for itself, also requires 'stdin' to be true. + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. type: boolean volumeDevices: @@ -12876,78 +14284,106 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map volumeMounts: - description: Pod volumes to mount into the container's - filesystem. Cannot be updated. + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. items: description: VolumeMount describes a mounting of a Volume within a container. properties: mountPath: - description: Path within the container at which - the volume should be mounted. Must not contain - ':'. + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. type: string mountPropagation: - description: mountPropagation determines how mounts - are propagated from the host to container and - the other way around. When not set, MountPropagationNone - is used. This field is beta in 1.10. + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). type: string name: description: This must match the Name of a Volume. type: string readOnly: - description: Mounted read-only if true, read-write - otherwise (false or unspecified). Defaults to - false. + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string subPath: - description: Path within the volume from which - the container's volume should be mounted. Defaults - to "" (volume's root). + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). type: string subPathExpr: - description: Expanded path within the volume from - which the container's volume should be mounted. - Behaves similarly to SubPath but environment - variable references $(VAR_NAME) are expanded - using the container's environment. Defaults - to "" (volume's root). SubPathExpr and SubPath - are mutually exclusive. + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. type: string required: - mountPath - name type: object type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map workingDir: - description: Container's working directory. If not specified, - the container runtime's default will be used, which - might be configured in the container image. Cannot - be updated. + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. type: string required: - name type: object type: array customTLSSecret: - description: 'A secret projection containing a certificate - and key with which to encrypt connections to PgBouncer. - The "tls.crt", "tls.key", and "ca.crt" paths must be PEM-encoded - certificates and keys. Changing this value causes PgBouncer - to restart. More info: https://kubernetes.io/docs/concepts/configuration/secret/#projection-of-secret-keys-to-specific-paths' + description: |- + A secret projection containing a certificate and key with which to encrypt + connections to PgBouncer. The "tls.crt", "tls.key", and "ca.crt" paths must + be PEM-encoded certificates and keys. Changing this value causes PgBouncer + to restart. + More info: https://kubernetes.io/docs/concepts/configuration/secret/#projection-of-secret-keys-to-specific-paths properties: items: - description: items if unspecified, each key-value pair - in the Data field of the referenced Secret will be projected - into the volume as a file whose name is the key and - content is the value. If specified, the listed keys - will be projected into the specified paths, and unlisted - keys will not be present. If a key is specified which - is not present in the Secret, the volume setup will - error unless it is marked optional. Paths must be relative - and may not contain the '..' path or start with '..'. + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. items: description: Maps a string key to a path within a volume. properties: @@ -12955,41 +14391,49 @@ spec: description: key is the key to project. type: string mode: - description: 'mode is Optional: mode bits used to - set permissions on this file. Must be an octal - value between 0000 and 0777 or a decimal value - between 0 and 511. YAML accepts both octal and - decimal values, JSON requires decimal values for - mode bits. If not specified, the volume defaultMode - will be used. This might be in conflict with other - options that affect the file mode, like fsGroup, - and the result can be other mode bits set.' + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: path is the relative path of the file - to map the key to. May not be an absolute path. - May not contain the path element '..'. May not - start with the string '..'. + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. type: string required: - key - path type: object type: array + x-kubernetes-list-type: atomic name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: description: optional field specify whether the Secret or its key must be defined type: boolean type: object + x-kubernetes-map-type: atomic image: - description: 'Name of a container image that can run PgBouncer - 1.15 or newer. Changing this value causes PgBouncer to restart. - The image may also be set using the RELATED_IMAGE_PGBOUNCER - environment variable. More info: https://kubernetes.io/docs/concepts/containers/images' + description: |- + Name of a container image that can run PgBouncer 1.15 or newer. Changing + this value causes PgBouncer to restart. The image may also be set using + the RELATED_IMAGE_PGBOUNCER environment variable. + More info: https://kubernetes.io/docs/concepts/containers/images type: string metadata: description: Metadata contains metadata for custom resources @@ -13007,20 +14451,23 @@ spec: anyOf: - type: integer - type: string - description: Minimum number of pods that should be available - at a time. Defaults to one when the replicas field is greater - than one. + description: |- + Minimum number of pods that should be available at a time. + Defaults to one when the replicas field is greater than one. x-kubernetes-int-or-string: true port: default: 5432 - description: Port on which PgBouncer should listen for client - connections. Changing this value causes PgBouncer to restart. + description: |- + Port on which PgBouncer should listen for client connections. Changing + this value causes PgBouncer to restart. format: int32 minimum: 1024 type: integer priorityClassName: - description: 'Priority class name for the pgBouncer pod. Changing - this value causes PostgreSQL to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/' + description: |- + Priority class name for the pgBouncer pod. Changing this value causes + PostgreSQL to restart. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/ type: string replicas: default: 1 @@ -13029,10 +14476,36 @@ spec: minimum: 0 type: integer resources: - description: 'Compute resources of a PgBouncer container. - Changing this value causes PgBouncer to restart. More info: - https://kubernetes.io/docs/concepts/configuration/manage-resources-containers' + description: |- + Compute resources of a PgBouncer container. Changing this value causes + PgBouncer to restart. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -13040,8 +14513,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -13050,16 +14524,28 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of - compute resources required. If Requests is omitted for - a container, it defaults to Limits if that is explicitly - specified, otherwise to an implementation-defined value. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object service: description: Specification of the service that exposes PgBouncer. properties: + externalTrafficPolicy: + description: 'More info: https://kubernetes.io/docs/concepts/services-networking/service/#traffic-policies' + enum: + - Cluster + - Local + type: string + internalTrafficPolicy: + description: 'More info: https://kubernetes.io/docs/concepts/services-networking/service/#traffic-policies' + enum: + - Cluster + - Local + type: string metadata: description: Metadata contains metadata for custom resources properties: @@ -13073,11 +14559,11 @@ spec: type: object type: object nodePort: - description: The port on which this service is exposed - when type is NodePort or LoadBalancer. Value must be - in-range and not in use or the operation will fail. - If unspecified, a port will be allocated if this Service - requires one. - https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + description: |- + The port on which this service is exposed when type is NodePort or + LoadBalancer. Value must be in-range and not in use or the operation will + fail. If unspecified, a port will be allocated if this Service requires one. + - https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport format: int32 type: integer type: @@ -13099,6 +14585,32 @@ spec: resources: description: Resource requirements for a sidecar container properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -13106,8 +14618,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount - of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -13116,198 +14629,229 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount - of compute resources required. If Requests is - omitted for a container, it defaults to Limits - if that is explicitly specified, otherwise to - an implementation-defined value. More info: - https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object type: object type: object tolerations: - description: 'Tolerations of a PgBouncer pod. Changing this - value causes PgBouncer to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration' + description: |- + Tolerations of a PgBouncer pod. Changing this value causes PgBouncer to + restart. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration items: - description: The pod this Toleration is attached to tolerates - any taint that matches the triple using - the matching operator . + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . properties: effect: - description: Effect indicates the taint effect to match. - Empty means match all taint effects. When specified, - allowed values are NoSchedule, PreferNoSchedule and - NoExecute. + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. type: string key: - description: Key is the taint key that the toleration - applies to. Empty means match all taint keys. If the - key is empty, operator must be Exists; this combination - means to match all values and all keys. + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. type: string operator: - description: Operator represents a key's relationship - to the value. Valid operators are Exists and Equal. - Defaults to Equal. Exists is equivalent to wildcard - for value, so that a pod can tolerate all taints of - a particular category. + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. type: string tolerationSeconds: - description: TolerationSeconds represents the period - of time the toleration (which must be of effect NoExecute, - otherwise this field is ignored) tolerates the taint. - By default, it is not set, which means tolerate the - taint forever (do not evict). Zero and negative values - will be treated as 0 (evict immediately) by the system. + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. format: int64 type: integer value: - description: Value is the taint value the toleration - matches to. If the operator is Exists, the value should - be empty, otherwise just a regular string. + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. type: string type: object type: array topologySpreadConstraints: - description: 'Topology spread constraints of a PgBouncer pod. - Changing this value causes PgBouncer to restart. More info: - https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/' + description: |- + Topology spread constraints of a PgBouncer pod. Changing this value causes + PgBouncer to restart. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ items: description: TopologySpreadConstraint specifies how to spread matching pods among the given topology. properties: labelSelector: - description: LabelSelector is used to find matching - pods. Pods that match this label selector are counted - to determine the number of pods in their corresponding - topology domain. + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. If the - operator is Exists or DoesNotExist, the - values array must be empty. This array is - replaced during a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is "In", - and the values array contains only "value". The - requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic maxSkew: - description: 'MaxSkew describes the degree to which - pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, - it is the maximum permitted difference between the - number of matching pods in the target topology and - the global minimum. The global minimum is the minimum - number of matching pods in an eligible domain or zero - if the number of eligible domains is less than MinDomains. - For example, in a 3-zone cluster, MaxSkew is set to - 1, and pods with the same labelSelector spread as - 2/2/1: In this case, the global minimum is 1. | zone1 - | zone2 | zone3 | | P P | P P | P | - if MaxSkew - is 1, incoming pod can only be scheduled to zone3 - to become 2/2/2; scheduling it onto zone1(zone2) would - make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). - - if MaxSkew is 2, incoming pod can be scheduled onto - any zone. When `whenUnsatisfiable=ScheduleAnyway`, - it is used to give higher precedence to topologies - that satisfy it. It''s a required field. Default value - is 1 and 0 is not allowed.' + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. format: int32 type: integer minDomains: - description: "MinDomains indicates a minimum number - of eligible domains. When the number of eligible domains - with matching topology keys is less than minDomains, - Pod Topology Spread treats \"global minimum\" as 0, - and then the calculation of Skew is performed. And - when the number of eligible domains with matching - topology keys equals or greater than minDomains, this - value has no effect on scheduling. As a result, when - the number of eligible domains is less than minDomains, - scheduler won't schedule more than maxSkew Pods to - those domains. If value is nil, the constraint behaves - as if MinDomains is equal to 1. Valid values are integers - greater than 0. When value is not nil, WhenUnsatisfiable - must be DoNotSchedule. \n For example, in a 3-zone - cluster, MaxSkew is set to 2, MinDomains is set to - 5 and pods with the same labelSelector spread as 2/2/2: - | zone1 | zone2 | zone3 | | P P | P P | P P | - The number of domains is less than 5(MinDomains), - so \"global minimum\" is treated as 0. In this situation, - new pod with the same labelSelector cannot be scheduled, - because computed skew will be 3(3 - 0) if new Pod - is scheduled to any of the three zones, it will violate - MaxSkew. \n This is an alpha field and requires enabling - MinDomainsInPodTopologySpread feature gate." + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. format: int32 type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string topologyKey: - description: TopologyKey is the key of node labels. - Nodes that have a label with this key and identical - values are considered to be in the same topology. - We consider each as a "bucket", and try - to put balanced number of pods into each bucket. We - define a domain as a particular instance of a topology. - Also, we define an eligible domain as a domain whose - nodes match the node selector. e.g. If TopologyKey - is "kubernetes.io/hostname", each Node is a domain - of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", - each zone is a domain of that topology. It's a required - field. + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. type: string whenUnsatisfiable: - description: 'WhenUnsatisfiable indicates how to deal - with a pod if it doesn''t satisfy the spread constraint. - - DoNotSchedule (default) tells the scheduler not - to schedule it. - ScheduleAnyway tells the scheduler - to schedule the pod in any location, but giving higher - precedence to topologies that would help reduce the - skew. A constraint is considered "Unsatisfiable" for - an incoming pod if and only if every possible node - assignment for that pod would violate "MaxSkew" on - some topology. For example, in a 3-zone cluster, MaxSkew - is set to 1, and pods with the same labelSelector - spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P - | P | P | If WhenUnsatisfiable is set to DoNotSchedule, - incoming pod can only be scheduled to zone2(zone3) - to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) - satisfies MaxSkew(1). In other words, the cluster - can still be imbalanced, but scheduler won''t make - it *more* imbalanced. It''s a required field.' + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. type: string required: - maxSkew @@ -13319,10 +14863,67 @@ spec: required: - pgBouncer type: object + replicaService: + description: Specification of the service that exposes PostgreSQL + replica instances + properties: + externalTrafficPolicy: + description: 'More info: https://kubernetes.io/docs/concepts/services-networking/service/#traffic-policies' + enum: + - Cluster + - Local + type: string + internalTrafficPolicy: + description: 'More info: https://kubernetes.io/docs/concepts/services-networking/service/#traffic-policies' + enum: + - Cluster + - Local + type: string + metadata: + description: Metadata contains metadata for custom resources + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + nodePort: + description: |- + The port on which this service is exposed when type is NodePort or + LoadBalancer. Value must be in-range and not in use or the operation will + fail. If unspecified, a port will be allocated if this Service requires one. + - https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + format: int32 + type: integer + type: + default: ClusterIP + description: 'More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types' + enum: + - ClusterIP + - NodePort + - LoadBalancer + type: string + type: object service: description: Specification of the service that exposes the PostgreSQL primary instance. properties: + externalTrafficPolicy: + description: 'More info: https://kubernetes.io/docs/concepts/services-networking/service/#traffic-policies' + enum: + - Cluster + - Local + type: string + internalTrafficPolicy: + description: 'More info: https://kubernetes.io/docs/concepts/services-networking/service/#traffic-policies' + enum: + - Cluster + - Local + type: string metadata: description: Metadata contains metadata for custom resources properties: @@ -13336,10 +14937,11 @@ spec: type: object type: object nodePort: - description: The port on which this service is exposed when type - is NodePort or LoadBalancer. Value must be in-range and not - in use or the operation will fail. If unspecified, a port will - be allocated if this Service requires one. - https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + description: |- + The port on which this service is exposed when type is NodePort or + LoadBalancer. Value must be in-range and not in use or the operation will + fail. If unspecified, a port will be allocated if this Service requires one. + - https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport format: int32 type: integer type: @@ -13352,10 +14954,11 @@ spec: type: string type: object shutdown: - description: Whether or not the PostgreSQL cluster should be stopped. - When this is true, workloads are scaled to zero and CronJobs are - suspended. Other resources, such as Services and Volumes, remain - in place. + description: |- + Whether or not the PostgreSQL cluster should be stopped. + When this is true, workloads are scaled to zero and CronJobs + are suspended. + Other resources, such as Services and Volumes, remain in place. type: boolean standby: description: Run this cluster as a read-only copy of an existing cluster @@ -13363,9 +14966,10 @@ spec: properties: enabled: default: true - description: Whether or not the PostgreSQL cluster should be read-only. - When this is true, WAL files are applied from a pgBackRest repository - or another PostgreSQL server. + description: |- + Whether or not the PostgreSQL cluster should be read-only. When this is + true, WAL files are applied from a pgBackRest repository or another + PostgreSQL server. type: boolean host: description: Network address of the PostgreSQL server to follow @@ -13384,9 +14988,10 @@ spec: type: string type: object supplementalGroups: - description: 'A list of group IDs applied to the process of a container. - These can be useful when accessing shared file systems with constrained - permissions. More info: https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context' + description: |- + A list of group IDs applied to the process of a container. These can be + useful when accessing shared file systems with constrained permissions. + More info: https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context items: format: int64 maximum: 2147483647 @@ -13401,31 +15006,30 @@ spec: description: Defines a pgAdmin user interface. properties: affinity: - description: 'Scheduling constraints of a pgAdmin pod. Changing - this value causes pgAdmin to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node' + description: |- + Scheduling constraints of a pgAdmin pod. Changing this value causes + pgAdmin to restart. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node properties: nodeAffinity: description: Describes node affinity scheduling rules for the pod. properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule - pods to nodes that satisfy the affinity expressions - specified by this field, but it may choose a node - that violates one or more of the expressions. The - node that is most preferred is the one with the - greatest sum of weights, i.e. for each node that - meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, - etc.), compute a sum by iterating through the elements - of this field and adding "weight" to the sum if - the node matches the corresponding matchExpressions; - the node(s) with the highest sum are the most preferred. + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. items: - description: An empty preferred scheduling term - matches all objects with implicit weight 0 (i.e. - it's a no-op). A null preferred scheduling term - matches no objects (i.e. is also a no-op). + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). properties: preference: description: A node selector term, associated @@ -13435,79 +15039,72 @@ spec: description: A list of node selector requirements by node's labels. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators - are In, NotIn, Exists, DoesNotExist. - Gt, and Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string values. - If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. - If the operator is Gt or Lt, the - values array must have a single - element, which will be interpreted - as an integer. This array is replaced - during a strategic merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators - are In, NotIn, Exists, DoesNotExist. - Gt, and Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string values. - If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. - If the operator is Gt or Lt, the - values array must have a single - element, which will be interpreted - as an integer. This array is replaced - during a strategic merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic weight: description: Weight associated with matching the corresponding nodeSelectorTerm, in the @@ -13519,105 +15116,100 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified - by this field are not met at scheduling time, the - pod will not be scheduled onto the node. If the - affinity requirements specified by this field cease - to be met at some point during pod execution (e.g. - due to an update), the system may or may not try - to eventually evict the pod from its node. + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. properties: nodeSelectorTerms: description: Required. A list of node selector terms. The terms are ORed. items: - description: A null or empty node selector term - matches no objects. The requirements of them - are ANDed. The TopologySelectorTerm type implements - a subset of the NodeSelectorTerm. + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. properties: matchExpressions: description: A list of node selector requirements by node's labels. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators - are In, NotIn, Exists, DoesNotExist. - Gt, and Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string values. - If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. - If the operator is Gt or Lt, the - values array must have a single - element, which will be interpreted - as an integer. This array is replaced - during a strategic merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators - are In, NotIn, Exists, DoesNotExist. - Gt, and Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string values. - If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. - If the operator is Gt or Lt, the - values array must have a single - element, which will be interpreted - as an integer. This array is replaced - during a strategic merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object + x-kubernetes-map-type: atomic type: object podAffinity: description: Describes pod affinity scheduling rules (e.g. @@ -13625,19 +15217,16 @@ spec: other pod(s)). properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule - pods to nodes that satisfy the affinity expressions - specified by this field, but it may choose a node - that violates one or more of the expressions. The - node that is most preferred is the one with the - greatest sum of weights, i.e. for each node that - meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, - etc.), compute a sum by iterating through the elements - of this field and adding "weight" to the sum if - the node has pods which matches the corresponding - podAffinityTerm; the node(s) with the highest sum - are the most preferred. + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred @@ -13648,18 +15237,18 @@ spec: associated with the corresponding weight. properties: labelSelector: - description: A label query over a set of - resources, in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -13667,60 +15256,82 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set - of namespaces that the term applies to. - The term is applied to the union of the - namespaces selected by this field and - the ones listed in the namespaces field. - null selector and null or empty namespaces - list means "this pod's namespace". An - empty selector ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -13728,70 +15339,61 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static - list of namespace names that the term - applies to. The term is applied to the - union of the namespaces listed in this - field and the ones selected by namespaceSelector. - null or empty namespaces list and null - namespaceSelector means "this pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where co-located - is defined as running on a node whose - value of the label with key topologyKey - matches that of any node on which any - of the selected pods is running. Empty - topologyKey is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object weight: - description: weight associated with matching - the corresponding podAffinityTerm, in the - range 1-100. + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. format: int32 type: integer required: @@ -13799,161 +15401,179 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified - by this field are not met at scheduling time, the - pod will not be scheduled onto the node. If the - affinity requirements specified by this field cease - to be met at some point during pod execution (e.g. - due to a pod label update), the system may or may - not try to eventually evict the pod from its node. - When there are multiple elements, the lists of nodes - corresponding to each podAffinityTerm are intersected, - i.e. all terms must be satisfied. + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. items: - description: Defines a set of pods (namely those - matching the labelSelector relative to the given - namespace(s)) that this pod should be co-located - (affinity) or not co-located (anti-affinity) with, - where co-located is defined as running on a node - whose value of the label with key - matches that of any node on which a pod of the - set of pods is running + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running properties: labelSelector: - description: A label query over a set of resources, - in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a - key's relationship to a set of values. - Valid operators are In, NotIn, Exists - and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of - string values. If the operator is - In or NotIn, the values array must - be non-empty. If the operator is - Exists or DoesNotExist, the values - array must be empty. This array - is replaced during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set of namespaces - that the term applies to. The term is applied - to the union of the namespaces selected by - this field and the ones listed in the namespaces - field. null selector and null or empty namespaces - list means "this pod's namespace". An empty - selector ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a - key's relationship to a set of values. - Valid operators are In, NotIn, Exists - and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of - string values. If the operator is - In or NotIn, the values array must - be non-empty. If the operator is - Exists or DoesNotExist, the values - array must be empty. This array - is replaced during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static list - of namespace names that the term applies to. - The term is applied to the union of the namespaces - listed in this field and the ones selected - by namespaceSelector. null or empty namespaces - list and null namespaceSelector means "this - pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the - pods matching the labelSelector in the specified - namespaces, where co-located is defined as - running on a node whose value of the label - with key topologyKey matches that of any node - on which any of the selected pods is running. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. Empty topologyKey is not allowed. type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object podAntiAffinity: description: Describes pod anti-affinity scheduling rules @@ -13961,19 +15581,16 @@ spec: etc. as some other pod(s)). properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule - pods to nodes that satisfy the anti-affinity expressions - specified by this field, but it may choose a node - that violates one or more of the expressions. The - node that is most preferred is the one with the - greatest sum of weights, i.e. for each node that - meets all of the scheduling requirements (resource - request, requiredDuringScheduling anti-affinity - expressions, etc.), compute a sum by iterating through - the elements of this field and adding "weight" to - the sum if the node has pods which matches the corresponding - podAffinityTerm; the node(s) with the highest sum - are the most preferred. + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred @@ -13984,18 +15601,18 @@ spec: associated with the corresponding weight. properties: labelSelector: - description: A label query over a set of - resources, in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -14003,60 +15620,82 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set - of namespaces that the term applies to. - The term is applied to the union of the - namespaces selected by this field and - the ones listed in the namespaces field. - null selector and null or empty namespaces - list means "this pod's namespace". An - empty selector ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -14064,70 +15703,61 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static - list of namespace names that the term - applies to. The term is applied to the - union of the namespaces listed in this - field and the ones selected by namespaceSelector. - null or empty namespaces list and null - namespaceSelector means "this pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where co-located - is defined as running on a node whose - value of the label with key topologyKey - matches that of any node on which any - of the selected pods is running. Empty - topologyKey is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object weight: - description: weight associated with matching - the corresponding podAffinityTerm, in the - range 1-100. + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. format: int32 type: integer required: @@ -14135,192 +15765,301 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements specified - by this field are not met at scheduling time, the - pod will not be scheduled onto the node. If the - anti-affinity requirements specified by this field - cease to be met at some point during pod execution - (e.g. due to a pod label update), the system may - or may not try to eventually evict the pod from - its node. When there are multiple elements, the - lists of nodes corresponding to each podAffinityTerm - are intersected, i.e. all terms must be satisfied. + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. items: - description: Defines a set of pods (namely those - matching the labelSelector relative to the given - namespace(s)) that this pod should be co-located - (affinity) or not co-located (anti-affinity) with, - where co-located is defined as running on a node - whose value of the label with key - matches that of any node on which a pod of the - set of pods is running + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running properties: labelSelector: - description: A label query over a set of resources, - in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a - key's relationship to a set of values. - Valid operators are In, NotIn, Exists - and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of - string values. If the operator is - In or NotIn, the values array must - be non-empty. If the operator is - Exists or DoesNotExist, the values - array must be empty. This array - is replaced during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set of namespaces - that the term applies to. The term is applied - to the union of the namespaces selected by - this field and the ones listed in the namespaces - field. null selector and null or empty namespaces - list means "this pod's namespace". An empty - selector ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a - key's relationship to a set of values. - Valid operators are In, NotIn, Exists - and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of - string values. If the operator is - In or NotIn, the values array must - be non-empty. If the operator is - Exists or DoesNotExist, the values - array must be empty. This array - is replaced during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static list - of namespace names that the term applies to. - The term is applied to the union of the namespaces - listed in this field and the ones selected - by namespaceSelector. null or empty namespaces - list and null namespaceSelector means "this - pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the - pods matching the labelSelector in the specified - namespaces, where co-located is defined as - running on a node whose value of the label - with key topologyKey matches that of any node - on which any of the selected pods is running. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. Empty topologyKey is not allowed. type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object type: object config: - description: Configuration settings for the pgAdmin process. - Changes to any of these values will be loaded without validation. - Be careful, as you may put pgAdmin into an unusable state. + description: |- + Configuration settings for the pgAdmin process. Changes to any of these + values will be loaded without validation. Be careful, as + you may put pgAdmin into an unusable state. properties: files: - description: Files allows the user to mount projected - volumes into the pgAdmin container so that files can - be referenced by pgAdmin as needed. + description: |- + Files allows the user to mount projected volumes into the pgAdmin + container so that files can be referenced by pgAdmin as needed. items: description: Projection that may be projected along with other supported volume types properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume root + to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object configMap: description: configMap information about the configMap data to project properties: items: - description: items if unspecified, each key-value - pair in the Data field of the referenced ConfigMap - will be projected into the volume as a file - whose name is the key and content is the value. - If specified, the listed keys will be projected - into the specified paths, and unlisted keys - will not be present. If a key is specified - which is not present in the ConfigMap, the - volume setup will error unless it is marked - optional. Paths must be relative and may not - contain the '..' path or start with '..'. + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. items: description: Maps a string key to a path within a volume. @@ -14329,41 +16068,43 @@ spec: description: key is the key to project. type: string mode: - description: 'mode is Optional: mode bits - used to set permissions on this file. - Must be an octal value between 0000 - and 0777 or a decimal value between - 0 and 511. YAML accepts both octal and - decimal values, JSON requires decimal - values for mode bits. If not specified, - the volume defaultMode will be used. - This might be in conflict with other - options that affect the file mode, like - fsGroup, and the result can be other - mode bits set.' + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: path is the relative path - of the file to map the key to. May not - be an absolute path. May not contain - the path element '..'. May not start - with the string '..'. + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. type: string required: - key - path type: object type: array + x-kubernetes-list-type: atomic name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: description: optional specify whether the ConfigMap or its keys must be defined type: boolean type: object + x-kubernetes-map-type: atomic downwardAPI: description: downwardAPI information about the downwardAPI data to project @@ -14379,7 +16120,7 @@ spec: fieldRef: description: 'Required: Selects a field of the pod: only annotations, labels, - name and namespace are supported.' + name, namespace and uid are supported.' properties: apiVersion: description: Version of the schema @@ -14393,19 +16134,15 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic mode: - description: 'Optional: mode bits used - to set permissions on this file, must - be an octal value between 0000 and 0777 - or a decimal value between 0 and 511. - YAML accepts both octal and decimal - values, JSON requires decimal values - for mode bits. If not specified, the - volume defaultMode will be used. This - might be in conflict with other options - that affect the file mode, like fsGroup, - and the result can be other mode bits - set.' + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: @@ -14417,11 +16154,9 @@ spec: must not start with ''..''' type: string resourceFieldRef: - description: 'Selects a resource of the - container: only resources limits and - requests (limits.cpu, limits.memory, - requests.cpu and requests.memory) are - currently supported.' + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. properties: containerName: description: 'Container name: required @@ -14443,27 +16178,26 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic required: - path type: object type: array + x-kubernetes-list-type: atomic type: object secret: description: secret information about the secret data to project properties: items: - description: items if unspecified, each key-value - pair in the Data field of the referenced Secret - will be projected into the volume as a file - whose name is the key and content is the value. - If specified, the listed keys will be projected - into the specified paths, and unlisted keys - will not be present. If a key is specified - which is not present in the Secret, the volume - setup will error unless it is marked optional. - Paths must be relative and may not contain - the '..' path or start with '..'. + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. items: description: Maps a string key to a path within a volume. @@ -14472,69 +16206,68 @@ spec: description: key is the key to project. type: string mode: - description: 'mode is Optional: mode bits - used to set permissions on this file. - Must be an octal value between 0000 - and 0777 or a decimal value between - 0 and 511. YAML accepts both octal and - decimal values, JSON requires decimal - values for mode bits. If not specified, - the volume defaultMode will be used. - This might be in conflict with other - options that affect the file mode, like - fsGroup, and the result can be other - mode bits set.' + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: path is the relative path - of the file to map the key to. May not - be an absolute path. May not contain - the path element '..'. May not start - with the string '..'. + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. type: string required: - key - path type: object type: array + x-kubernetes-list-type: atomic name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: description: optional field specify whether the Secret or its key must be defined type: boolean type: object + x-kubernetes-map-type: atomic serviceAccountToken: description: serviceAccountToken is information about the serviceAccountToken data to project properties: audience: - description: audience is the intended audience - of the token. A recipient of a token must - identify itself with an identifier specified - in the audience of the token, and otherwise - should reject the token. The audience defaults - to the identifier of the apiserver. + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. type: string expirationSeconds: - description: expirationSeconds is the requested - duration of validity of the service account - token. As the token approaches expiration, - the kubelet volume plugin will proactively - rotate the service account token. The kubelet - will start trying to rotate the token if the - token is older than 80 percent of its time - to live or if the token is older than 24 hours.Defaults - to 1 hour and must be at least 10 minutes. + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. format: int64 type: integer path: - description: path is the path relative to the - mount point of the file to project the token - into. + description: |- + path is the path relative to the mount point of the file to project the + token into. type: string required: - path @@ -14542,15 +16275,22 @@ spec: type: object type: array ldapBindPassword: - description: 'A Secret containing the value for the LDAP_BIND_PASSWORD - setting. More info: https://www.pgadmin.org/docs/pgadmin4/latest/ldap.html' + description: |- + A Secret containing the value for the LDAP_BIND_PASSWORD setting. + More info: https://www.pgadmin.org/docs/pgadmin4/latest/ldap.html properties: key: description: The key of the secret to select from. Must be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: description: Specify whether the Secret or its key @@ -14559,37 +16299,43 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic settings: - description: 'Settings for the pgAdmin server process. - Keys should be uppercase and values must be constants. - More info: https://www.pgadmin.org/docs/pgadmin4/latest/config_py.html' + description: |- + Settings for the pgAdmin server process. Keys should be uppercase and + values must be constants. + More info: https://www.pgadmin.org/docs/pgadmin4/latest/config_py.html type: object x-kubernetes-preserve-unknown-fields: true type: object dataVolumeClaimSpec: - description: 'Defines a PersistentVolumeClaim for pgAdmin - data. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes' + description: |- + Defines a PersistentVolumeClaim for pgAdmin data. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes properties: accessModes: - description: 'accessModes contains the desired access - modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 items: type: string type: array + x-kubernetes-list-type: atomic dataSource: - description: 'dataSource field can be used to specify - either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) - * An existing PVC (PersistentVolumeClaim) If the provisioner - or an external controller can support the specified - data source, it will create a new volume based on the - contents of the specified data source. If the AnyVolumeDataSource - feature gate is enabled, this field will always have - the same contents as the DataSourceRef field.' + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. properties: apiGroup: - description: APIGroup is the group for the resource - being referenced. If APIGroup is not specified, - the specified Kind must be in the core API group. + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. type: string kind: @@ -14602,33 +16348,37 @@ spec: - kind - name type: object + x-kubernetes-map-type: atomic dataSourceRef: - description: 'dataSourceRef specifies the object from - which to populate the volume with data, if a non-empty - volume is desired. This may be any local object from - a non-empty API group (non core object) or a PersistentVolumeClaim - object. When this field is specified, volume binding - will only succeed if the type of the specified object - matches some installed volume populator or dynamic provisioner. - This field will replace the functionality of the DataSource - field and as such if both fields are non-empty, they - must have the same value. For backwards compatibility, - both fields (DataSource and DataSourceRef) will be set - to the same value automatically if one of them is empty - and the other is non-empty. There are two important - differences between DataSource and DataSourceRef: * - While DataSource only allows two specific types of objects, - DataSourceRef allows any non-core object, as well as - PersistentVolumeClaim objects. * While DataSource ignores - disallowed values (dropping them), DataSourceRef preserves - all values, and generates an error if a disallowed value - is specified. (Beta) Using this field requires the AnyVolumeDataSource - feature gate to be enabled.' + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. properties: apiGroup: - description: APIGroup is the group for the resource - being referenced. If APIGroup is not specified, - the specified Kind must be in the core API group. + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. type: string kind: @@ -14637,17 +16387,23 @@ spec: name: description: Name is the name of resource being referenced type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string required: - kind - name type: object resources: - description: 'resources represents the minimum resources - the volume should have. If RecoverVolumeExpansionFailure - feature is enabled users are allowed to specify resource - requirements that are lower than previous value but - must still be higher than capacity recorded in the status - field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources properties: limits: additionalProperties: @@ -14656,8 +16412,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount - of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -14666,11 +16423,11 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount - of compute resources required. If Requests is omitted - for a container, it defaults to Limits if that is - explicitly specified, otherwise to an implementation-defined - value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object selector: @@ -14681,8 +16438,8 @@ spec: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. properties: key: @@ -14690,43 +16447,60 @@ spec: applies to. type: string operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, - NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string values. - If the operator is In or NotIn, the values - array must be non-empty. If the operator is - Exists or DoesNotExist, the values array must - be empty. This array is replaced during a - strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field - is "key", the operator is "In", and the values array - contains only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic storageClassName: - description: 'storageClassName is the name of the StorageClass - required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. type: string volumeMode: - description: volumeMode defines what type of volume is - required by the claim. Value of Filesystem is implied - when not included in claim spec. + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. type: string volumeName: description: volumeName is the binding reference to the @@ -14734,10 +16508,11 @@ spec: type: string type: object image: - description: 'Name of a container image that can run pgAdmin - 4. Changing this value causes pgAdmin to restart. The image - may also be set using the RELATED_IMAGE_PGADMIN environment - variable. More info: https://kubernetes.io/docs/concepts/containers/images' + description: |- + Name of a container image that can run pgAdmin 4. Changing this value causes + pgAdmin to restart. The image may also be set using the RELATED_IMAGE_PGADMIN + environment variable. + More info: https://kubernetes.io/docs/concepts/containers/images type: string metadata: description: Metadata contains metadata for custom resources @@ -14752,8 +16527,10 @@ spec: type: object type: object priorityClassName: - description: 'Priority class name for the pgAdmin pod. Changing - this value causes pgAdmin to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/' + description: |- + Priority class name for the pgAdmin pod. Changing this value causes pgAdmin + to restart. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/ type: string replicas: default: 1 @@ -14763,9 +16540,36 @@ spec: minimum: 0 type: integer resources: - description: 'Compute resources of a pgAdmin container. Changing - this value causes pgAdmin to restart. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers' + description: |- + Compute resources of a pgAdmin container. Changing this value causes + pgAdmin to restart. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -14773,8 +16577,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -14783,16 +16588,28 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of - compute resources required. If Requests is omitted for - a container, it defaults to Limits if that is explicitly - specified, otherwise to an implementation-defined value. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object service: description: Specification of the service that exposes pgAdmin. properties: + externalTrafficPolicy: + description: 'More info: https://kubernetes.io/docs/concepts/services-networking/service/#traffic-policies' + enum: + - Cluster + - Local + type: string + internalTrafficPolicy: + description: 'More info: https://kubernetes.io/docs/concepts/services-networking/service/#traffic-policies' + enum: + - Cluster + - Local + type: string metadata: description: Metadata contains metadata for custom resources properties: @@ -14806,11 +16623,11 @@ spec: type: object type: object nodePort: - description: The port on which this service is exposed - when type is NodePort or LoadBalancer. Value must be - in-range and not in use or the operation will fail. - If unspecified, a port will be allocated if this Service - requires one. - https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + description: |- + The port on which this service is exposed when type is NodePort or + LoadBalancer. Value must be in-range and not in use or the operation will + fail. If unspecified, a port will be allocated if this Service requires one. + - https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport format: int32 type: integer type: @@ -14823,187 +16640,218 @@ spec: type: string type: object tolerations: - description: 'Tolerations of a pgAdmin pod. Changing this - value causes pgAdmin to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration' + description: |- + Tolerations of a pgAdmin pod. Changing this value causes pgAdmin to restart. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration items: - description: The pod this Toleration is attached to tolerates - any taint that matches the triple using - the matching operator . + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . properties: effect: - description: Effect indicates the taint effect to match. - Empty means match all taint effects. When specified, - allowed values are NoSchedule, PreferNoSchedule and - NoExecute. + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. type: string key: - description: Key is the taint key that the toleration - applies to. Empty means match all taint keys. If the - key is empty, operator must be Exists; this combination - means to match all values and all keys. + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. type: string operator: - description: Operator represents a key's relationship - to the value. Valid operators are Exists and Equal. - Defaults to Equal. Exists is equivalent to wildcard - for value, so that a pod can tolerate all taints of - a particular category. + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. type: string tolerationSeconds: - description: TolerationSeconds represents the period - of time the toleration (which must be of effect NoExecute, - otherwise this field is ignored) tolerates the taint. - By default, it is not set, which means tolerate the - taint forever (do not evict). Zero and negative values - will be treated as 0 (evict immediately) by the system. + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. format: int64 type: integer value: - description: Value is the taint value the toleration - matches to. If the operator is Exists, the value should - be empty, otherwise just a regular string. + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. type: string type: object type: array topologySpreadConstraints: - description: 'Topology spread constraints of a pgAdmin pod. - Changing this value causes pgAdmin to restart. More info: - https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/' + description: |- + Topology spread constraints of a pgAdmin pod. Changing this value causes + pgAdmin to restart. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ items: description: TopologySpreadConstraint specifies how to spread matching pods among the given topology. properties: labelSelector: - description: LabelSelector is used to find matching - pods. Pods that match this label selector are counted - to determine the number of pods in their corresponding - topology domain. + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. If the - operator is Exists or DoesNotExist, the - values array must be empty. This array is - replaced during a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is "In", - and the values array contains only "value". The - requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic maxSkew: - description: 'MaxSkew describes the degree to which - pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, - it is the maximum permitted difference between the - number of matching pods in the target topology and - the global minimum. The global minimum is the minimum - number of matching pods in an eligible domain or zero - if the number of eligible domains is less than MinDomains. - For example, in a 3-zone cluster, MaxSkew is set to - 1, and pods with the same labelSelector spread as - 2/2/1: In this case, the global minimum is 1. | zone1 - | zone2 | zone3 | | P P | P P | P | - if MaxSkew - is 1, incoming pod can only be scheduled to zone3 - to become 2/2/2; scheduling it onto zone1(zone2) would - make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). - - if MaxSkew is 2, incoming pod can be scheduled onto - any zone. When `whenUnsatisfiable=ScheduleAnyway`, - it is used to give higher precedence to topologies - that satisfy it. It''s a required field. Default value - is 1 and 0 is not allowed.' + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. format: int32 type: integer minDomains: - description: "MinDomains indicates a minimum number - of eligible domains. When the number of eligible domains - with matching topology keys is less than minDomains, - Pod Topology Spread treats \"global minimum\" as 0, - and then the calculation of Skew is performed. And - when the number of eligible domains with matching - topology keys equals or greater than minDomains, this - value has no effect on scheduling. As a result, when - the number of eligible domains is less than minDomains, - scheduler won't schedule more than maxSkew Pods to - those domains. If value is nil, the constraint behaves - as if MinDomains is equal to 1. Valid values are integers - greater than 0. When value is not nil, WhenUnsatisfiable - must be DoNotSchedule. \n For example, in a 3-zone - cluster, MaxSkew is set to 2, MinDomains is set to - 5 and pods with the same labelSelector spread as 2/2/2: - | zone1 | zone2 | zone3 | | P P | P P | P P | - The number of domains is less than 5(MinDomains), - so \"global minimum\" is treated as 0. In this situation, - new pod with the same labelSelector cannot be scheduled, - because computed skew will be 3(3 - 0) if new Pod - is scheduled to any of the three zones, it will violate - MaxSkew. \n This is an alpha field and requires enabling - MinDomainsInPodTopologySpread feature gate." + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. format: int32 type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string topologyKey: - description: TopologyKey is the key of node labels. - Nodes that have a label with this key and identical - values are considered to be in the same topology. - We consider each as a "bucket", and try - to put balanced number of pods into each bucket. We - define a domain as a particular instance of a topology. - Also, we define an eligible domain as a domain whose - nodes match the node selector. e.g. If TopologyKey - is "kubernetes.io/hostname", each Node is a domain - of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", - each zone is a domain of that topology. It's a required - field. + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. type: string whenUnsatisfiable: - description: 'WhenUnsatisfiable indicates how to deal - with a pod if it doesn''t satisfy the spread constraint. - - DoNotSchedule (default) tells the scheduler not - to schedule it. - ScheduleAnyway tells the scheduler - to schedule the pod in any location, but giving higher - precedence to topologies that would help reduce the - skew. A constraint is considered "Unsatisfiable" for - an incoming pod if and only if every possible node - assignment for that pod would violate "MaxSkew" on - some topology. For example, in a 3-zone cluster, MaxSkew - is set to 1, and pods with the same labelSelector - spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P - | P | P | If WhenUnsatisfiable is set to DoNotSchedule, - incoming pod can only be scheduled to zone2(zone3) - to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) - satisfies MaxSkew(1). In other words, the cluster - can still be imbalanced, but scheduler won''t make - it *more* imbalanced. It''s a required field.' + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. type: string required: - maxSkew @@ -15018,48 +16866,58 @@ spec: - pgAdmin type: object users: - description: Users to create inside PostgreSQL and the databases they - should access. The default creates one user that can access one - database matching the PostgresCluster name. An empty list creates - no users. Removing a user from this list does NOT drop the user - nor revoke their access. + description: |- + Users to create inside PostgreSQL and the databases they should access. + The default creates one user that can access one database matching the + PostgresCluster name. An empty list creates no users. Removing a user + from this list does NOT drop the user nor revoke their access. items: properties: databases: - description: Databases to which this user can connect and create - objects. Removing a database from this list does NOT revoke - access. This field is ignored for the "postgres" user. + description: |- + Databases to which this user can connect and create objects. Removing a + database from this list does NOT revoke access. This field is ignored for + the "postgres" user. items: - description: 'PostgreSQL identifiers are limited in length - but may contain any character. More info: https://www.postgresql.org/docs/current/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS' + description: |- + PostgreSQL identifiers are limited in length but may contain any character. + More info: https://www.postgresql.org/docs/current/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS maxLength: 63 minLength: 1 type: string type: array x-kubernetes-list-type: set name: - description: The name of this PostgreSQL user. The value may - contain only lowercase letters, numbers, and hyphen so that - it fits into Kubernetes metadata. + description: |- + The name of this PostgreSQL user. The value may contain only lowercase + letters, numbers, and hyphen so that it fits into Kubernetes metadata. maxLength: 63 minLength: 1 pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ type: string options: - description: 'ALTER ROLE options except for PASSWORD. This field - is ignored for the "postgres" user. More info: https://www.postgresql.org/docs/current/role-attributes.html' + description: |- + ALTER ROLE options except for PASSWORD. This field is ignored for the + "postgres" user. + More info: https://www.postgresql.org/docs/current/role-attributes.html + maxLength: 200 pattern: ^[^;]*$ type: string + x-kubernetes-validations: + - message: cannot assign password + rule: '!self.matches("(?i:PASSWORD)")' + - message: cannot contain comments + rule: '!self.matches("(?:--|/[*]|[*]/)")' password: description: Properties of the password generated for this user. properties: type: default: ASCII - description: Type of password to generate. Defaults to ASCII. - Valid options are ASCII and AlphaNumeric. "ASCII" passwords - contain letters, numbers, and symbols from the US-ASCII - character set. "AlphaNumeric" passwords contain letters - and numbers from the US-ASCII character set. + description: |- + Type of password to generate. Defaults to ASCII. Valid options are ASCII + and AlphaNumeric. + "ASCII" passwords contain letters, numbers, and symbols from the US-ASCII character set. + "AlphaNumeric" passwords contain letters and numbers from the US-ASCII character set. enum: - ASCII - AlphaNumeric @@ -15070,12 +16928,12 @@ spec: required: - name type: object + maxItems: 64 type: array x-kubernetes-list-map-keys: - name x-kubernetes-list-type: map required: - - backups - instances - postgresVersion type: object @@ -15083,40 +16941,40 @@ spec: description: PostgresClusterStatus defines the observed state of PostgresCluster properties: conditions: - description: 'conditions represent the observations of postgrescluster''s - current state. Known .status.conditions.type are: "PersistentVolumeResizing", - "Progressing", "ProxyAvailable"' + description: |- + conditions represent the observations of postgrescluster's current state. + Known .status.conditions.type are: "PersistentVolumeResizing", + "Progressing", "ProxyAvailable" items: description: Condition contains details for one aspect of the current state of this API Resource. properties: lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. format: int64 minimum: 0 type: integer reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. This field may not be empty. maxLength: 1024 minLength: 1 @@ -15130,7 +16988,7 @@ spec: - Unknown type: string type: - description: type of condition in CamelCase. + description: type of condition in CamelCase or in foo.example.com/CamelCase. maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -15157,6 +17015,11 @@ spec: description: Current state of PostgreSQL instances. items: properties: + desiredPGDataVolume: + additionalProperties: + type: string + description: Desired Size of the pgData volume + type: object name: type: string readyReplicas: @@ -15215,11 +17078,10 @@ spec: format: int32 type: integer completionTime: - description: Represents the time the manual backup Job was - determined by the Job controller to be completed. This - field is only set if the backup completed successfully. - Additionally, it is represented in RFC3339 form and is in - UTC. + description: |- + Represents the time the manual backup Job was determined by the Job controller + to be completed. This field is only set if the backup completed successfully. + Additionally, it is represented in RFC3339 form and is in UTC. format: date-time type: string failed: @@ -15228,18 +17090,19 @@ spec: format: int32 type: integer finished: - description: Specifies whether or not the Job is finished - executing (does not indicate success or failure). + description: |- + Specifies whether or not the Job is finished executing (does not indicate success or + failure). type: boolean id: - description: A unique identifier for the manual backup as - provided using the "pgbackrest-backup" annotation when initiating - a backup. + description: |- + A unique identifier for the manual backup as provided using the "pgbackrest-backup" + annotation when initiating a backup. type: string startTime: - description: Represents the time the manual backup Job was - acknowledged by the Job controller. It is represented in - RFC3339 form and is in UTC. + description: |- + Represents the time the manual backup Job was acknowledged by the Job controller. + It is represented in RFC3339 form and is in UTC. format: date-time type: string succeeded: @@ -15256,16 +17119,19 @@ spec: host properties: apiVersion: - description: 'APIVersion defines the versioned schema of this - representation of an object. Servers should convert recognized - schemas to the latest internal value, and may reject unrecognized - values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST - resource this object represents. Servers may infer this - from the endpoint the client submits requests to. Cannot - be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string ready: description: Whether or not the pgBackRest repository host @@ -15285,14 +17151,14 @@ spec: description: The name of the pgBackRest repository type: string replicaCreateBackupComplete: - description: ReplicaCreateBackupReady indicates whether - a backup exists in the repository as needed to bootstrap - replicas. + description: |- + ReplicaCreateBackupReady indicates whether a backup exists in the repository as needed + to bootstrap replicas. type: boolean repoOptionsHash: - description: A hash of the required fields in the spec for - defining an Azure, GCS or S3 repository, Utilizd to detect - changes to these fields and then execute pgBackRest stanza-create + description: |- + A hash of the required fields in the spec for defining an Azure, GCS or S3 repository, + Utilized to detect changes to these fields and then execute pgBackRest stanza-create commands accordingly. type: string stanzaCreated: @@ -15319,11 +17185,10 @@ spec: format: int32 type: integer completionTime: - description: Represents the time the manual backup Job was - determined by the Job controller to be completed. This - field is only set if the backup completed successfully. - Additionally, it is represented in RFC3339 form and is in - UTC. + description: |- + Represents the time the manual backup Job was determined by the Job controller + to be completed. This field is only set if the backup completed successfully. + Additionally, it is represented in RFC3339 form and is in UTC. format: date-time type: string failed: @@ -15332,18 +17197,19 @@ spec: format: int32 type: integer finished: - description: Specifies whether or not the Job is finished - executing (does not indicate success or failure). + description: |- + Specifies whether or not the Job is finished executing (does not indicate success or + failure). type: boolean id: - description: A unique identifier for the manual backup as - provided using the "pgbackrest-backup" annotation when initiating - a backup. + description: |- + A unique identifier for the manual backup as provided using the "pgbackrest-backup" + annotation when initiating a backup. type: string startTime: - description: Represents the time the manual backup Job was - acknowledged by the Job controller. It is represented in - RFC3339 form and is in UTC. + description: |- + Represents the time the manual backup Job was acknowledged by the Job controller. + It is represented in RFC3339 form and is in UTC. format: date-time type: string succeeded: @@ -15365,11 +17231,10 @@ spec: format: int32 type: integer completionTime: - description: Represents the time the manual backup Job was - determined by the Job controller to be completed. This - field is only set if the backup completed successfully. - Additionally, it is represented in RFC3339 form and is - in UTC. + description: |- + Represents the time the manual backup Job was determined by the Job controller + to be completed. This field is only set if the backup completed successfully. + Additionally, it is represented in RFC3339 form and is in UTC. format: date-time type: string cronJobName: @@ -15385,9 +17250,9 @@ spec: description: The name of the associated pgBackRest repository type: string startTime: - description: Represents the time the manual backup Job was - acknowledged by the Job controller. It is represented - in RFC3339 form and is in UTC. + description: |- + Represents the time the manual backup Job was acknowledged by the Job controller. + It is represented in RFC3339 form and is in UTC. format: date-time type: string succeeded: @@ -15402,8 +17267,9 @@ spec: type: array type: object postgresVersion: - description: Stores the current PostgreSQL major version following - a successful major PostgreSQL upgrade. + description: |- + Stores the current PostgreSQL major version following a successful + major PostgreSQL upgrade. type: integer proxy: description: Current state of the PostgreSQL proxy. @@ -15411,8 +17277,9 @@ spec: pgBouncer: properties: postgresRevision: - description: Identifies the revision of PgBouncer assets that - have been installed into PostgreSQL. + description: |- + Identifies the revision of PgBouncer assets that have been installed into + PostgreSQL. type: string readyReplicas: description: Total number of ready pods. @@ -15425,22 +17292,19 @@ spec: type: object type: object registrationRequired: - description: Version information for installations with a registration - requirement. properties: pgoVersion: type: string type: object startupInstance: - description: The instance that should be started first when bootstrapping - and/or starting a PostgresCluster. + description: |- + The instance that should be started first when bootstrapping and/or starting a + PostgresCluster. type: string startupInstanceSet: description: The instance set associated with the startupInstance type: string tokenRequired: - description: Signals the need for a token to be applied when registration - is required. type: string userInterface: description: Current state of the PostgreSQL user interface. diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 2509f42fe5..85b7cbdf29 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -1,7 +1,17 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: +- bases/postgres-operator.crunchydata.com_crunchybridgeclusters.yaml - bases/postgres-operator.crunchydata.com_postgresclusters.yaml - bases/postgres-operator.crunchydata.com_pgupgrades.yaml - bases/postgres-operator.crunchydata.com_pgadmins.yaml + +patches: +- target: + kind: CustomResourceDefinition + patch: |- + - op: add + path: /metadata/labels + value: + app.kubernetes.io/name: pgo + app.kubernetes.io/version: latest diff --git a/config/default/kustomization.yaml b/config/default/kustomization.yaml index 82b2310ca0..7001380693 100644 --- a/config/default/kustomization.yaml +++ b/config/default/kustomization.yaml @@ -11,7 +11,7 @@ labels: resources: - ../crd -- ../rbac/cluster +- ../rbac - ../manager images: diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index addbd49afa..2eb849e138 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -12,36 +12,38 @@ spec: - name: operator image: postgres-operator env: + - name: PGO_INSTALLER + value: kustomize + - name: PGO_INSTALLER_ORIGIN + value: postgres-operator-repo - name: PGO_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace - name: CRUNCHY_DEBUG value: "true" - - name: RELATED_IMAGE_POSTGRES_14 - value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-14.9-0" - - name: RELATED_IMAGE_POSTGRES_14_GIS_3.1 - value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-14.9-3.1-0" - - name: RELATED_IMAGE_POSTGRES_14_GIS_3.2 - value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-14.9-3.2-0" - - name: RELATED_IMAGE_POSTGRES_14_GIS_3.3 - value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-14.9-3.3-0" - - name: RELATED_IMAGE_POSTGRES_15 - value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-15.4-0" - - name: RELATED_IMAGE_POSTGRES_15_GIS_3.3 - value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-15.4-3.3-0" + - name: RELATED_IMAGE_POSTGRES_16 + value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-16.4-2" + - name: RELATED_IMAGE_POSTGRES_16_GIS_3.3 + value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-16.4-3.3-2" + - name: RELATED_IMAGE_POSTGRES_16_GIS_3.4 + value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-16.4-3.4-2" + - name: RELATED_IMAGE_POSTGRES_17 + value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-17.0-0" + - name: RELATED_IMAGE_POSTGRES_17_GIS_3.4 + value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-17.0-3.4-0" - name: RELATED_IMAGE_PGADMIN - value: "registry.developers.crunchydata.com/crunchydata/crunchy-pgadmin4:ubi8-4.30-17" + value: "registry.developers.crunchydata.com/crunchydata/crunchy-pgadmin4:ubi8-4.30-31" - name: RELATED_IMAGE_PGBACKREST - value: "registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi8-2.47-0" + value: "registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi8-2.53.1-0" - name: RELATED_IMAGE_PGBOUNCER - value: "registry.developers.crunchydata.com/crunchydata/crunchy-pgbouncer:ubi8-1.19-4" + value: "registry.developers.crunchydata.com/crunchydata/crunchy-pgbouncer:ubi8-1.23-0" - name: RELATED_IMAGE_PGEXPORTER value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres-exporter:latest" - name: RELATED_IMAGE_PGUPGRADE value: "registry.developers.crunchydata.com/crunchydata/crunchy-upgrade:latest" - name: RELATED_IMAGE_STANDALONE_PGADMIN - value: "registry.developers.crunchydata.com/crunchydata/crunchy-pgadmin4:ubi8-7.7-0" + value: "registry.developers.crunchydata.com/crunchydata/crunchy-pgadmin4:ubi8-8.12-0" securityContext: allowPrivilegeEscalation: false capabilities: { drop: [ALL] } diff --git a/config/rbac/.gitignore b/config/rbac/.gitignore deleted file mode 100644 index 2ad5901955..0000000000 --- a/config/rbac/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/role.yaml diff --git a/config/rbac/cluster/kustomization.yaml b/config/rbac/kustomization.yaml similarity index 100% rename from config/rbac/cluster/kustomization.yaml rename to config/rbac/kustomization.yaml diff --git a/config/rbac/namespace/kustomization.yaml b/config/rbac/namespace/kustomization.yaml deleted file mode 100644 index 82cfb0841b..0000000000 --- a/config/rbac/namespace/kustomization.yaml +++ /dev/null @@ -1,7 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -resources: -- service_account.yaml -- role.yaml -- role_binding.yaml diff --git a/config/rbac/namespace/role.yaml b/config/rbac/namespace/role.yaml deleted file mode 100644 index 90bc3b9dbb..0000000000 --- a/config/rbac/namespace/role.yaml +++ /dev/null @@ -1,146 +0,0 @@ ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: postgres-operator -rules: -- apiGroups: - - '' - resources: - - configmaps - - persistentvolumeclaims - - secrets - - services - verbs: - - create - - delete - - get - - list - - patch - - watch -- apiGroups: - - '' - resources: - - endpoints - verbs: - - create - - delete - - deletecollection - - get - - list - - patch - - watch -- apiGroups: - - '' - resources: - - endpoints/restricted - - pods/exec - verbs: - - create -- apiGroups: - - '' - resources: - - events - verbs: - - create - - patch -- apiGroups: - - '' - resources: - - pods - verbs: - - delete - - get - - list - - patch - - watch -- apiGroups: - - '' - resources: - - serviceaccounts - verbs: - - create - - get - - list - - patch - - watch -- apiGroups: - - apps - resources: - - deployments - - statefulsets - verbs: - - create - - delete - - get - - list - - patch - - watch -- apiGroups: - - batch - resources: - - cronjobs - - jobs - verbs: - - create - - delete - - get - - list - - patch - - watch -- apiGroups: - - policy - resources: - - poddisruptionbudgets - verbs: - - create - - delete - - get - - list - - patch - - watch -- apiGroups: - - postgres-operator.crunchydata.com - resources: - - pgadmins - - pgupgrades - verbs: - - get - - list - - watch -- apiGroups: - - postgres-operator.crunchydata.com - resources: - - pgadmins/finalizers - - pgupgrades/finalizers - - postgresclusters/finalizers - verbs: - - update -- apiGroups: - - postgres-operator.crunchydata.com - resources: - - pgadmins/status - - pgupgrades/status - - postgresclusters/status - verbs: - - patch -- apiGroups: - - postgres-operator.crunchydata.com - resources: - - postgresclusters - verbs: - - get - - list - - patch - - watch -- apiGroups: - - rbac.authorization.k8s.io - resources: - - rolebindings - - roles - verbs: - - create - - get - - list - - patch - - watch diff --git a/config/rbac/namespace/role_binding.yaml b/config/rbac/namespace/role_binding.yaml deleted file mode 100644 index d7c16c8a5b..0000000000 --- a/config/rbac/namespace/role_binding.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: postgres-operator -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: postgres-operator -subjects: -- kind: ServiceAccount - name: pgo diff --git a/config/rbac/namespace/service_account.yaml b/config/rbac/namespace/service_account.yaml deleted file mode 100644 index 364f797171..0000000000 --- a/config/rbac/namespace/service_account.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: pgo diff --git a/config/rbac/cluster/role.yaml b/config/rbac/role.yaml similarity index 77% rename from config/rbac/cluster/role.yaml rename to config/rbac/role.yaml index ac454385cf..d5783d00b1 100644 --- a/config/rbac/cluster/role.yaml +++ b/config/rbac/role.yaml @@ -5,11 +5,12 @@ metadata: name: postgres-operator rules: - apiGroups: - - '' + - "" resources: - configmaps - persistentvolumeclaims - secrets + - serviceaccounts - services verbs: - create @@ -19,7 +20,7 @@ rules: - patch - watch - apiGroups: - - '' + - "" resources: - endpoints verbs: @@ -31,21 +32,21 @@ rules: - patch - watch - apiGroups: - - '' + - "" resources: - endpoints/restricted - pods/exec verbs: - create - apiGroups: - - '' + - "" resources: - events verbs: - create - patch - apiGroups: - - '' + - "" resources: - pods verbs: @@ -54,16 +55,6 @@ rules: - list - patch - watch -- apiGroups: - - '' - resources: - - serviceaccounts - verbs: - - create - - get - - list - - patch - - watch - apiGroups: - apps resources: @@ -88,6 +79,15 @@ rules: - list - patch - watch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - update + - watch - apiGroups: - policy resources: @@ -99,6 +99,24 @@ rules: - list - patch - watch +- apiGroups: + - postgres-operator.crunchydata.com + resources: + - crunchybridgeclusters + verbs: + - get + - list + - patch + - update + - watch +- apiGroups: + - postgres-operator.crunchydata.com + resources: + - crunchybridgeclusters/finalizers + - crunchybridgeclusters/status + verbs: + - patch + - update - apiGroups: - postgres-operator.crunchydata.com resources: @@ -140,6 +158,18 @@ rules: - roles verbs: - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshots + verbs: + - create + - delete - get - list - patch diff --git a/config/rbac/cluster/role_binding.yaml b/config/rbac/role_binding.yaml similarity index 100% rename from config/rbac/cluster/role_binding.yaml rename to config/rbac/role_binding.yaml diff --git a/config/rbac/cluster/service_account.yaml b/config/rbac/service_account.yaml similarity index 100% rename from config/rbac/cluster/service_account.yaml rename to config/rbac/service_account.yaml diff --git a/config/singlenamespace/kustomization.yaml b/config/singlenamespace/kustomization.yaml deleted file mode 100644 index a6dc8de538..0000000000 --- a/config/singlenamespace/kustomization.yaml +++ /dev/null @@ -1,22 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -namespace: postgres-operator - -labels: -- includeSelectors: true - pairs: - postgres-operator.crunchydata.com/control-plane: postgres-operator - -resources: -- ../crd -- ../rbac/namespace -- ../manager - -images: -- name: postgres-operator - newName: registry.developers.crunchydata.com/crunchydata/postgres-operator - newTag: latest - -patches: -- path: manager-target.yaml diff --git a/config/singlenamespace/manager-target.yaml b/config/singlenamespace/manager-target.yaml deleted file mode 100644 index 949250e264..0000000000 --- a/config/singlenamespace/manager-target.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: pgo -spec: - template: - spec: - containers: - - name: operator - env: - - name: PGO_TARGET_NAMESPACE - valueFrom: { fieldRef: { apiVersion: v1, fieldPath: metadata.namespace } } diff --git a/docs/archetypes/default.md b/docs/archetypes/default.md deleted file mode 100644 index 00e77bd79b..0000000000 --- a/docs/archetypes/default.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -title: "{{ replace .Name "-" " " | title }}" -date: {{ .Date }} -draft: true ---- - diff --git a/docs/config.toml b/docs/config.toml deleted file mode 100644 index f8f3fa533a..0000000000 --- a/docs/config.toml +++ /dev/null @@ -1,94 +0,0 @@ -baseURL= "" - -languageCode = "en-us" -DefaultContentLanguage = "en" -title = "PGO, the Postgres Operator from Crunchy Data" -theme = "crunchy-hugo-theme" -pygmentsCodeFences = true -pygmentsStyle = "monokailight" -publishDir = "" -canonifyurls = true -relativeURLs = true - -defaultContentLanguage = "en" -defaultContentLanguageInSubdir= false -enableMissingTranslationPlaceholders = false - -[params] -editURL = "https://github.com/CrunchyData/postgres-operator/edit/master/docs/content/" -showVisitedLinks = false # default is false -themeStyle = "flex" # "original" or "flex" # default "flex" -themeVariant = "" # choose theme variant "green", "gold" , "gray", "blue" (default) -ordersectionsby = "weight" # ordersectionsby = "title" -disableHomeIcon = true # default is false -disableSearch = false # default is false -disableNavChevron = false # set true to hide next/prev chevron, default is false -highlightClientSide = false # set true to use highlight.pack.js instead of the default hugo chroma highlighter -menushortcutsnewtab = true # set true to open shortcuts links to a new tab/window -enableGitInfo = true -operatorVersion = "5.3.1" -operatorVersionLatestRel5_0 = "5.0.8" -imageCrunchyPostgres = "registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-15.2-0" -imageCrunchyPostgresPrivate = "registry.crunchydata.com/crunchydata/crunchy-postgres:ubi8-15.2-0" -imageCrunchyPGBackrest = "registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi8-2.41-4" -imageCrunchyPGBackrestPrivate = "registry.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi8-2.41-4" -imageCrunchyPGBouncer = "registry.developers.crunchydata.com/crunchydata/crunchy-pgbouncer:ubi8-1.18-0" -imageCrunchyExporter = "registry.developers.crunchydata.com/crunchydata/crunchy-postgres-exporter:ubi8-5.3.1-0" -imageCrunchyPGAdmin = "registry.developers.crunchydata.com/crunchydata/crunchy-pgadmin4:ubi8-4.30-10" -imageCrunchyPGUpgrade = "registry.developers.crunchydata.com/crunchydata/crunchy-upgrade:ubi8-5.3.1-0" -operatorRepository = "registry.developers.crunchydata.com/crunchydata/postgres-operator" -operatorRepositoryPrivate = "registry.crunchydata.com/crunchydata/postgres-operator" -postgresOperatorTag = "ubi8-5.3.1-0" -PGBouncerComponentTagUbi8 = "ubi8-1.18-0" -PGBouncerTagUbi8 = "ubi8-5.3.1-0" -postgres14GIS32ComponentTagUbi8 = "ubi8-14.7-3.2-0" -postgres14GIS32TagUbi8 = "ubi8-14.7-3.2-5.3.1-0" -postgres14GIS31ComponentTagUbi8 = "ubi8-14.7-3.1-0" -postgres14GIS31TagUbi8 = "ubi8-14.7-3.1-5.3.1-0" -fromPostgresVersion = "14" -postgresVersion = "15" -postgresVersion15 = "15.2" -postgresVersion14 = "14.7" -postgresVersion13 = "13.10" -postgresVersion12 = "12.14" -postgresVersion11 = "11.19" -operatorHelmRepository = "oci://registry.developers.crunchydata.com/crunchydata/pgo" - -[outputs] -home = [ "HTML", "RSS", "JSON"] - -[[menu.shortcuts]] -name = "" -url = "/" -weight = 1 - -[[menu.shortcuts]] -name = " " -url = "https://github.com/CrunchyData/postgres-operator" -weight = 10 - -[[menu.shortcuts]] -name = " " -identifier = "kubedoc" -url = "https://kubernetes.io/docs/" -weight = 20 - -[[menu.shortcuts]] -name = " " -url = "https://github.com/CrunchyData/postgres-operator/blob/master/LICENSE.md" -weight = 30 - -[[menu.downloads]] -name = " " -url = "/pdf/postgres_operator.pdf" -weight = 20 - -[[menu.downloads]] -name = " " -url = "/epub/postgres_operator.epub" -weight = 30 - -[markup] - [markup.goldmark] - [markup.goldmark.renderer] - unsafe = true diff --git a/docs/content/_index.md b/docs/content/_index.md deleted file mode 100644 index 077bcf0c6c..0000000000 --- a/docs/content/_index.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: "PGO, the Postgres Operator from Crunchy Data" -date: -draft: false ---- - -# PGO, the Postgres Operator from Crunchy Data - - PGO: The Postgres Operator from Crunchy Data - -Latest Release: {{< param operatorVersion >}} - -# Production Postgres Made Easy - -[PGO](https://github.com/CrunchyData/postgres-operator), the [Postgres Operator]((https://github.com/CrunchyData/postgres-operator)) from [Crunchy Data](https://www.crunchydata.com), gives you a **declarative Postgres** solution that automatically manages your [PostgreSQL](https://www.postgresql.org) clusters. - -Designed for your GitOps workflows, it is [easy to get started]({{< relref "quickstart/_index.md" >}}) with Postgres on Kubernetes with PGO. Within a few moments, you can have a production grade Postgres cluster complete with high availability, disaster recovery, and monitoring, all over secure TLS communications.Even better, PGO lets you easily customize your Postgres cluster to tailor it to your workload! - -With conveniences like cloning Postgres clusters to using rolling updates to roll out disruptive changes with minimal downtime, PGO is ready to support your Postgres data at every stage of your release pipeline. Built for resiliency and uptime, PGO will keep your desired Postgres in a desired state so you do not need to worry about it. - -PGO is developed with many years of production experience in automating Postgres management on Kubernetes, providing a seamless cloud native Postgres solution to keep your data always available. - -## Supported Platforms - -PGO, the Postgres Operator from Crunchy Data, is tested on the following platforms: - -- Kubernetes 1.22-1.25 -- OpenShift 4.8-4.11 -- Rancher -- Google Kubernetes Engine (GKE), including Anthos -- Amazon EKS -- Microsoft AKS -- VMware Tanzu - -This list only includes the platforms that the Postgres Operator is specifically -tested on as part of the release process. PGO works on other -[CNCF Certified Kubernetes](https://www.cncf.io/certification/software-conformance/) -distributions as well. - -The PGO Postgres Operator project source code is available subject to the [Apache 2.0 license](https://raw.githubusercontent.com/CrunchyData/postgres-operator/master/LICENSE.md) with the PGO logo and branding assets covered by [our trademark guidelines](/logos/TRADEMARKS.md). diff --git a/docs/content/architecture/_index.md b/docs/content/architecture/_index.md deleted file mode 100644 index 452f695c33..0000000000 --- a/docs/content/architecture/_index.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -title: "Architecture" -date: -draft: false -weight: 40 ---- diff --git a/docs/content/architecture/backups.md b/docs/content/architecture/backups.md deleted file mode 100644 index 02e5f80883..0000000000 --- a/docs/content/architecture/backups.md +++ /dev/null @@ -1,83 +0,0 @@ ---- -title: "Backup Management" -date: -draft: false -weight: 120 ---- - -When using the PostgreSQL Operator, the answer to the question "do you take -backups of your database" is automatically "yes!" - -The PostgreSQL Operator uses the open source -[pgBackRest](https://pgbackrest.org) backup and restore utility that is designed -for working with databases that are many terabytes in size. As described in the -[tutorial]({{< relref "/tutorial/backups.md" >}}), pgBackRest is enabled by -default as it permits the PostgreSQL Operator to automate some advanced as well -as convenient behaviors, including: - -- Efficient provisioning of new replicas that are added to the PostgreSQL -cluster -- Preventing replicas from falling out of sync from the PostgreSQL primary by -allowing them to replay old WAL logs -- Allowing failed primaries to automatically and efficiently heal using the -"delta restore" feature -- Serving as the basis for the cluster cloning feature -- ...and of course, allowing for one to take full, differential, and incremental -backups and perform full and point-in-time restores - -Below is one example of how PGO manages backups with both a local storage and a Amazon S3 configuration. - -![PostgreSQL Operator pgBackRest Integration](/images/postgresql-cluster-dr-base.png) - -The PostgreSQL Operator leverages a pgBackRest repository to facilitate the -usage of the pgBackRest features in a PostgreSQL cluster. When a new PostgreSQL -cluster is created, it simultaneously creates a pgBackRest repository. - -You can store your pgBackRest backups in up to four different locations and using four different storage types: - -- Any Kubernetes supported storage class -- Amazon S3 (or S3 equivalents like MinIO) -- Google Cloud Storage (GCS) -- Azure Blob Storage - -PostgreSQL is automatically configured to use the `pgbackrest archive-push` command -to archive the write-ahead log (WAL) in all repositories. - -## Backups - -PGO supports three types of pgBackRest backups: - -- Full: A full backup of all the contents of the PostgreSQL cluster -- Differential: A backup of only the files that have changed since the last full backup -- Incremental: A backup of only the files that have changed since the last full, differential, or incremental backup - -## Scheduling Backups - -Any effective disaster recovery strategy includes having regularly scheduled -backups. PGO enables this by managing a series of Kubernetes CronJobs to ensure that backups are executed at scheduled times. - -Note that pgBackRest presently only supports taking one backup at a time. This may change in a future release, but for the time being we suggest that you stagger your backup times. - -Please see the [backup management tutorial]({{< relref "/tutorial/backup-management.md" >}}) for how to set up backup schedules -and configure retention policies. - -## Restores - -The PostgreSQL Operator supports the ability to perform a full restore on a -PostgreSQL cluster as well as a point-in-time-recovery. There are two types of -ways to restore a cluster: - -- Restore to a new cluster -- Restore in-place - -For examples of this, please see the [disaster recovery tutorial]({{< relref "/tutorial/disaster-recovery.md" >}}) - -## Deleting a Backup - -{{% notice warning %}} -If you delete a backup that is *not* set to expire, you may be unable to meet -your retention requirements. If you are deleting backups to free space, it is -recommended to delete your oldest backups first. -{{% /notice %}} - -A backup can be deleted by running the [`pgbackrest expire`](https://pgbackrest.org/command.html#command-expire) command directly on the pgBackRest repository Pod or a Postgres instance. diff --git a/docs/content/architecture/disaster-recovery.md b/docs/content/architecture/disaster-recovery.md deleted file mode 100644 index 70b9a241e8..0000000000 --- a/docs/content/architecture/disaster-recovery.md +++ /dev/null @@ -1,113 +0,0 @@ ---- -title: "Disaster Recovery" -date: -draft: false -weight: 140 ---- - -Advanced high-availability and disaster recovery strategies involve spreading -your database clusters across multiple data centers to help maximize uptime. -In Kubernetes, this technique is known as "[federation](https://en.wikipedia.org/wiki/Federation_(information_technology))". -Federated Kubernetes clusters can communicate with each other, -coordinate changes, and provide resiliency for applications that have high -uptime requirements. - -As of this writing, federation in Kubernetes is still in ongoing development -and is something we monitor with intense interest. As Kubernetes federation -continues to mature, we wanted to provide a way to deploy PostgreSQL clusters -managed by the [PostgreSQL Operator](https://www.crunchydata.com/developers/download-postgres/containers/postgres-operator) -that can span multiple Kubernetes clusters. - -At a high-level, the PostgreSQL Operator follows the "active-standby" data -center deployment model for managing the PostgreSQL clusters across Kubernetes -clusters. In one Kubernetes cluster, the PostgreSQL Operator deploys PostgreSQL as an -"active" PostgreSQL cluster, which means it has one primary and one-or-more -replicas. In another Kubernetes cluster, the PostgreSQL cluster is deployed as -a "standby" cluster: every PostgreSQL instance is a replica. - -A side-effect of this is that in each of the Kubernetes clusters, the PostgreSQL -Operator can be used to deploy both active and standby PostgreSQL clusters, -allowing you to mix and match! While the mixing and matching may not be ideal for -how you deploy your PostgreSQL clusters, it does allow you to perform online -moves of your PostgreSQL data to different Kubernetes clusters as well as manual -online upgrades. - -Lastly, while this feature does extend high-availability, promoting a standby -cluster to an active cluster is **not** automatic. While the PostgreSQL clusters -within a Kubernetes cluster support self-managed high-availability, a -cross-cluster deployment requires someone to promote the cluster -from standby to active. - -## Standby Cluster Overview - -Standby PostgreSQL clusters are managed like any other PostgreSQL cluster that the PostgreSQL -Operator manages. For example, adding replicas to a standby cluster is identical to adding them to a -primary cluster. - -The main difference between a primary and standby cluster is that there is no primary instance on -the standby: one PostgreSQL instance is reading in the database changes from either the backup -repository or via streaming replication, while other instances are replicas of it. - -Any replicas created in the standby cluster are known as cascading replicas, i.e., replicas -replicating from a database server that itself is replicating from another database server. More -information about [cascading replication](https://www.postgresql.org/docs/current/warm-standby.html#CASCADING-REPLICATION) -can be found in the PostgreSQL documentation. - -Because standby clusters are effectively read-only, certain functionality -that involves making changes to a database, e.g., PostgreSQL user changes, is -blocked while a cluster is in standby mode. Additionally, backups and restores -are blocked as well. While [pgBackRest](https://pgbackrest.org/) supports -backups from standbys, this requires direct access to the primary database, -which cannot be done until the PostgreSQL Operator supports Kubernetes -federation. - -### Types of Standby Clusters -There are three ways to deploy a standby cluster with the Postgres Operator. - -#### Repo-based Standby - -A repo-based standby will connect to a pgBackRest repo stored in an external storage system -(S3, GCS, Azure Blob Storage, or any other Kubernetes storage system that can span multiple -clusters). The standby cluster will receive WAL files from the repo and will apply those to the -database. - -![PostgreSQL Operator Repo-based Standby](/images/repo-based-standby.png) - -#### Streaming Standby - -A streaming standby relies on an authenticated connection to the primary over the network. The -standby will receive WAL records directly from the primary as they are generated. - -![PostgreSQL Operator Streaming Standby](/images/streaming-standby.png) - -#### Streaming Standby with an External Repo - -You can also configure the operator to create a cluster that takes advantage of both methods. The -standby cluster will bootstrap from the pgBackRest repo and continue to receive WAL files as they -are pushed to the repo. The cluster will also directly connect to primary and receive WAL records -as they are generated. Using a repo while also streaming ensures that your cluster will still be up -to date with the pgBackRest repo if streaming falls behind. - -![PostgreSQL Operator Streaming Standby with External Repo](/images/streaming-standby-external-repo.png) - -For creating a standby Postgres cluster with PGO, please see the [disaster recovery tutorial]({{< relref "tutorial/disaster-recovery.md" >}}#standby-cluster) - -### Promoting a Standby Cluster - -There comes a time when a standby cluster needs to be promoted to an active cluster. Promoting a -standby cluster means that the standby leader PostgreSQL instance will become a primary and start -accepting both reads and writes. This has the net effect of pushing WAL (transaction archives) to -the pgBackRest repository. Before doing this, we need to ensure we don't accidentally create a split-brain -scenario. - -If you are promoting the standby while the primary is still running, i.e., if this is not a disaster -scenario, you will want to [shutdown the active PostgreSQL cluster]({{< relref "tutorial/administrative-tasks.md" >}}#shutdown). - -The standby can be promoted once the primary is inactive, e.g., is either `shutdown` or failing. -This process essentially removes the standby configuration from the Kubernetes cluster’s DCS, which -triggers the promotion of the current standby leader to a primary PostgreSQL instance. You can view -this promotion in the PostgreSQL standby leader's (soon to be active leader's) logs. - -Once the former standby cluster has been successfully promoted to an active PostgreSQL cluster, -the original active PostgreSQL cluster can be safely [deleted]({{< relref "tutorial/delete-cluster.md" >}}) -and [recreated as a standby cluster]({{< relref "tutorial/disaster-recovery" >}}#standby-cluster). diff --git a/docs/content/architecture/high-availability.md b/docs/content/architecture/high-availability.md deleted file mode 100644 index f33f619525..0000000000 --- a/docs/content/architecture/high-availability.md +++ /dev/null @@ -1,211 +0,0 @@ ---- -title: "High Availability" -date: -draft: false -weight: 110 ---- - -One of the great things about PostgreSQL is its reliability: it is very stable -and typically "just works." However, there are certain things that can happen in -the environment that PostgreSQL is deployed in that can affect its uptime, -including: - -- The database storage disk fails or some other hardware failure occurs -- The network on which the database resides becomes unreachable -- The host operating system becomes unstable and crashes -- A key database file becomes corrupted -- A data center is lost - -There may also be downtime events that are due to the normal case of operations, -such as performing a minor upgrade, security patching of operating system, -hardware upgrade, or other maintenance. - -Fortunately, PGO, the Postgres Operator from Crunchy Data, is prepared for this. - -![PostgreSQL Operator high availability Overview](/images/postgresql-ha-overview.png) - -The Crunchy PostgreSQL Operator supports a distributed-consensus based -high availability (HA) system that keeps its managed PostgreSQL clusters up and -running, even if the PostgreSQL Operator disappears. Additionally, it leverages -Kubernetes specific features such as -[Pod Anti-Affinity](#how-the-crunchy-postgresql-operator-uses-pod-anti-affinity) -to limit the surface area that could lead to a PostgreSQL cluster becoming -unavailable. The PostgreSQL Operator also supports automatic healing of failed -primaries and leverages the efficient pgBackRest "delta restore" method, which -eliminates the need to fully reprovision a failed cluster! - -The Crunchy PostgreSQL Operator also maintains high availability during a -routine task such as a PostgreSQL minor version upgrade. - -For workloads that are sensitive to transaction loss, PGO supports PostgreSQL synchronous replication. - -The high availability backing for your PostgreSQL cluster is only as good as -your high availability backing for Kubernetes. To learn more about creating a -[high availability Kubernetes cluster](https://kubernetes.io/docs/tasks/administer-cluster/highly-available-master/), -please review the [Kubernetes documentation](https://kubernetes.io/docs/tasks/administer-cluster/highly-available-master/) -or consult your systems administrator. - -## The Crunchy Postgres Operator High Availability Algorithm - -A critical aspect of any production-grade PostgreSQL deployment is a reliable -and effective high availability (HA) solution. Organizations want to know that -their PostgreSQL deployments can remain available despite various issues that -have the potential to disrupt operations, including hardware failures, network -outages, software errors, or even human mistakes. - -The key portion of high availability that the PostgreSQL Operator provides is -that it delegates the management of HA to the PostgreSQL clusters themselves. -This ensures that the PostgreSQL Operator is not a single-point of failure for -the availability of any of the PostgreSQL clusters that it manages, as the -PostgreSQL Operator is only maintaining the definitions of what should be in the -cluster (e.g. how many instances in the cluster, etc.). - -Each HA PostgreSQL cluster maintains its availability by using Patroni to manage -failover when the primary becomes compromised. Patroni stores the primary’s ID in -annotations on a Kubernetes `Endpoints` object which acts as a lease. The primary -must periodically renew the lease to signal that it’s healthy. If the primary -misses its deadline, replicas compare their WAL positions to see who has the most -up-to-date data. Instances with the latest data try to overwrite the ID on the lease. -The first to succeed becomes the new primary, and all others follow the new primary. - -## How The Crunchy PostgreSQL Operator Uses Pod Anti-Affinity - -Kubernetes has two types of Pod anti-affinity: - -- Preferred: With preferred (`preferredDuringSchedulingIgnoredDuringExecution`) Pod anti-affinity, Kubernetes will make a best effort to schedule Pods matching the anti-affinity rules to different Nodes. However, if it is not possible to do so, then Kubernetes may schedule one or more Pods to the same Node. -- Required: With required (`requiredDuringSchedulingIgnoredDuringExecution`) Pod anti-affinity, Kubernetes mandates that each Pod matching the anti-affinity rules **must** be scheduled to different Nodes. However, a Pod may not be scheduled if Kubernetes cannot find a Node that does not contain a Pod matching the rules. - -There is a tradeoff with these two types of pod anti-affinity: while "required" anti-affinity will ensure that all the matching Pods are scheduled on different Nodes, if Kubernetes cannot find an available Node, your Postgres instance may not be scheduled. Likewise, while "preferred" anti-affinity will make a best effort to scheduled your Pods on different Nodes, Kubernetes may compromise and schedule more than one Postgres instance of the same cluster on the same Node. - -By understanding these tradeoffs, the makeup of your Kubernetes cluster, and your requirements, you can choose the method that makes the most sense for your Postgres deployment. We'll show examples of both methods below! - -For an example for how pod anti-affinity works with PGO, please see the [high availability tutorial]({{< relref "tutorial/high-availability.md" >}}#pod-anti-affinity). - -## Synchronous Replication: Guarding Against Transactions Loss - -Clusters managed by the Crunchy PostgreSQL Operator can be deployed with -synchronous replication, which is useful for workloads that are sensitive to -losing transactions, as PostgreSQL will not consider a transaction to be -committed until it is committed to all synchronous replicas connected to a -primary. This provides a higher guarantee of data consistency and, when a -healthy synchronous replica is present, a guarantee of the most up-to-date data -during a failover event. - -This comes at a cost of performance: PostgreSQL has to wait for -a transaction to be committed on all synchronous replicas, and a connected client -will have to wait longer than if the transaction only had to be committed on the -primary (which is how asynchronous replication works). Additionally, there is a -potential impact to availability: if a synchronous replica crashes, any writes -to the primary will be blocked until a replica is promoted to become a new -synchronous replica of the primary. - -## Node Affinity - -Kubernetes [Node Affinity](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity) -can be used to scheduled Pods to specific Nodes within a Kubernetes cluster. -This can be useful when you want your PostgreSQL instances to take advantage of -specific hardware (e.g. for geospatial applications) or if you want to have a -replica instance deployed to a specific region within your Kubernetes cluster -for high availability purposes. - -For an example for how node affinity works with PGO, please see the [high availability tutorial]({{< relref "tutorial/high-availability.md" >}}##node-affinity). - -## Tolerations - -Kubernetes [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) -can help with the scheduling of Pods to appropriate nodes. There are many -reasons that a Kubernetes administrator may want to use tolerations, such as -restricting the types of Pods that can be assigned to particular Nodes. -Reasoning and strategy for using taints and tolerations is outside the scope of -this documentation. - -You can configure the tolerations for your Postgres instances on the `postgresclusters` custom resource. - -## Pod Topology Spread Constraints - -Kubernetes [Pod Topology Spread Constraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/) -can also help you efficiently schedule your workloads by ensuring your Pods are -not scheduled in only one portion of your Kubernetes cluster. By spreading your -Pods across your Kubernetes cluster among your various failure-domains, such as -regions, zones, nodes, and other user-defined topology domains, you can achieve -high availability as well as efficient resource utilization. - -For an example of how pod topology spread constraints work with PGO, please see -the [high availability tutorial]({{< relref "tutorial/high-availability.md" >}}#pod-topology-spread-constraints). - -## Rolling Updates - -During the lifecycle of a PostgreSQL cluster, there are certain events that may -require a planned restart, such as an update to a "restart required" PostgreSQL -configuration setting (e.g. [`shared_buffers`](https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-SHARED-BUFFERS)) -or a change to a Kubernetes Pod template (e.g. [changing the memory request]({{< relref "tutorial/resize-cluster.md">}}#customize-cpu-memory)). -Restarts can be disruptive in a high availability deployment, which is -why many setups employ a ["rolling update" strategy](https://kubernetes.io/docs/tutorials/kubernetes-basics/update/update-intro/) -(aka a "rolling restart") to minimize or eliminate downtime during a planned -restart. - -Because PostgreSQL is a stateful application, a simple rolling restart strategy -will not work: PostgreSQL needs to ensure that there is a primary available that -can accept reads and writes. This requires following a method that will minimize -the amount of downtime when the primary is taken offline for a restart. - -The PostgreSQL Operator uses the following algorithm to perform the rolling restart to minimize any potential interruptions: - -1. Each replica is updated in sequential order. This follows the following -process: - - 1. The replica is explicitly shut down to ensure any outstanding changes are - flushed to disk. - - 2. If requested, the PostgreSQL Operator will apply any changes to the Pod. - - 3. The replica is brought back online. The PostgreSQL Operator waits for the - replica to become available before it proceeds to the next replica. - -2. The above steps are repeated until all of the replicas are restarted. - -3. A controlled switchover is performed. The PostgreSQL Operator determines -which replica is the best candidate to become the new primary. It then demotes -the primary to become a replica and promotes the best candidate to become the -new primary. - -4. The former primary follows a process similar to what is described in step 1. - -The downtime is thus constrained to the amount of time the switchover takes. - -PGO will automatically detect when to apply a rolling update. - -## Pod Disruption Budgets - -Pods in a Kubernetes cluster can experience [voluntary disruptions](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/#voluntary-and-involuntary-disruptions) -as a result of actions initiated by the application owner or a Cluster Administrator. During these -voluntary disruptions Pod Disruption Budgets (PDBs) can be used to ensure that a minimum number of Pods -will be running. The operator allows you to define a minimum number of Pods that should be -available for instance sets and PgBouncer deployments in your postgrescluster. This minimum is -configured in the postgrescluster spec and will be used to create PDBs associated to a resource defined -in the spec. For example, the following spec will create two PDBs, one for `instance1` and one for -the PgBouncer deployment: - -``` -spec: - instances: - - name: instance1 - replicas: 3 - minAvailable: 1 - proxy: - pgBouncer: - replicas: 3 - minAvailable: 1 -``` - -{{% notice tip %}} -The `minAvailable` field accepts number (`3`) or string percentage (`50%`) values. For more -information see [Specifying a PodDisruptionBudget](https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget). -{{% /notice %}} - -If `minAvailable` is set to `0`, we will not reconcile a PDB for the resource and any existing PDBs -will be removed. This will effectively disable Pod Disruption Budgets for the resource. - -If `minAvailable` is not provided for an object, a default value will be defined based on the -number of replicas defined for that object. If there is one replica, a PDB will not be created. If -there is more than one replica defined, a minimum of one Pod will be used. diff --git a/docs/content/architecture/monitoring.md b/docs/content/architecture/monitoring.md deleted file mode 100644 index 071ab876a3..0000000000 --- a/docs/content/architecture/monitoring.md +++ /dev/null @@ -1,307 +0,0 @@ ---- -title: "Monitoring" -date: -draft: false -weight: 130 ---- - -![PostgreSQL Operator Monitoring](/images/postgresql-monitoring.png) - -While having [high availability]({{< relref "architecture/high-availability.md" >}}), -[backups]({{< relref "architecture/backups.md" >}}), and disaster recovery systems in place helps in the event of something going wrong with your -PostgreSQL cluster, monitoring helps you anticipate problems before they happen. -Additionally, monitoring can help you diagnose and resolve additional issues -that may not result in downtime, but cause degraded performance. - -There are many different ways to monitor systems within Kubernetes, including -tools that come with Kubernetes itself. This is by no means to be a -comprehensive on how to monitor everything in Kubernetes, but rather what the -PostgreSQL Operator provides to give you an -[out-of-the-box monitoring solution]({{< relref "installation/monitoring/_index.md" >}}). - -## Getting Started - -If you want to install the metrics stack, please visit the [installation]({{< relref "installation/monitoring/_index.md" >}}) -instructions for the [PostgreSQL Operator Monitoring]({{< relref "installation/monitoring/_index.md" >}}) -stack. - -## Components - -The [PostgreSQL Operator Monitoring]({{< relref "installation/monitoring/_index.md" >}}) -stack is made up of several open source components: - -- [pgMonitor](https://github.com/CrunchyData/pgmonitor), which provides the core -of the monitoring infrastructure including the following components: - - [postgres_exporter](https://github.com/CrunchyData/pgmonitor/tree/main/postgres_exporter), - which provides queries used to collect metrics information about a PostgreSQL - instance. - - [Prometheus](https://github.com/prometheus/prometheus), a time-series - database that scrapes and stores the collected metrics so they can be consumed - by other services. - - [Grafana](https://github.com/grafana/grafana), a visualization tool that - provides charting and other capabilities for viewing the collected monitoring - data. - - [Alertmanager](https://github.com/prometheus/alertmanager), a tool that - can send alerts when metrics hit a certain threshold that require someone to - intervene. -- [pgnodemx](https://github.com/CrunchyData/pgnodemx), a PostgreSQL extension -that is able to pull container-specific metrics (e.g. CPU utilization, memory -consumption) from the container itself via SQL queries. - -## pgnodemx and the DownwardAPI - -pgnodemx is able to pull and format container-specific metrics by accessing several -Kubernetes fields that are mounted from the pod to the `database` container's filesystem. -By default, these fields include the pod's labels and annotations, as well as the -`database` pod's CPU and memory. These fields are mounted at the `/etc/database-containerinfo` -path. - -## Visualizations - -Below is a brief description of all the visualizations provided by the -[PostgreSQL Operator Monitoring]({{< relref "installation/monitoring/_index.md" >}}) -stack. Some of the descriptions may include some directional guidance on how to -interpret the charts, though this is only to provide a starting point: actual -causes and effects of issues can vary between systems. - -Many of the visualizations can be broken down based on the following groupings: - -- Cluster: which PostgreSQL cluster should be viewed -- Pod: the specific Pod or PostgreSQL instance - -### Overview - -![PostgreSQL Operator Monitoring - Overview](/images/postgresql-monitoring-overview.png) - -The overview provides an overview of all of the PostgreSQL clusters that are -being monitoring by the PostgreSQL Operator Monitoring stack. This includes the -following information: - -- The name of the PostgreSQL cluster and the namespace that it is in -- The type of PostgreSQL cluster (HA [high availability] or standalone) -- The status of the cluster, as indicate by color. Green indicates the cluster -is available, red indicates that it is not. - -Each entry is clickable to provide additional cluster details. - -### PostgreSQL Details - -![PostgreSQL Operator Monitoring - Cluster Cluster Details](/images/postgresql-monitoring.png) - -The PostgreSQL Details view provides more information about a specific -PostgreSQL cluster that is being managed and monitored by the PostgreSQL -Operator. These include many key PostgreSQL-specific metrics that help make -decisions around managing a PostgreSQL cluster. These include: - -- Backup Status: The last time a backup was taken of the cluster. Green is good. -Orange means that a backup has not been taken in more than a day and may warrant -investigation. -- Active Connections: How many clients are connected to the database. Too many -clients connected could impact performance and, for values approaching 100%, can -lead to clients being unable to connect. -- Idle in Transaction: How many clients have a connection state of "idle in -transaction". Too many clients in this state can cause performance issues and, -in certain cases, maintenance issues. -- Idle: How many clients are connected but are in an "idle" state. -- TPS: The number of "transactions per second" that are occurring. Usually needs -to be combined with another metric to help with analysis. "Higher is better" -when performing benchmarking. -- Connections: An aggregated view of active, idle, and idle in transaction -connections. -- Database Size: How large databases are within a PostgreSQL cluster. Typically -combined with another metric for analysis. Helps keep track of overall disk -usage and if any triage steps need to occur around PVC size. -- WAL Size: How much space write-ahead logs (WAL) are taking up on disk. This -can contribute to extra space being used on your data disk, or can give you an -indication of how much space is being utilized on a separate WAL PVC. If you -are using replication slots, this can help indicate if a slot is not being -acknowledged if the numbers are much larger than the `max_wal_size` setting (the -PostgreSQL Operator does not use slots by default). -- Row Activity: The number of rows that are selected, inserted, updated, and -deleted. This can help you determine what percentage of your workload is read -vs. write, and help make database tuning decisions based on that, in conjunction -with other metrics. -- Replication Status: Provides guidance information on how much replication lag -there is between primary and replica PostgreSQL instances, both in bytes and -time. This can provide an indication of how much data could be lost in the event -of a failover. - -![PostgreSQL Operator Monitoring - Cluster Cluster Details 2](/images/postgresql-monitoring-cluster.png) - -- Conflicts / Deadlocks: These occur when PostgreSQL is unable to complete -operations, which can result in transaction loss. The goal is for these numbers -to be `0`. If these are occurring, check your data access and writing patterns. -- Cache Hit Ratio: A measure of how much of the "working data", e.g. data that -is being accessed and manipulated, resides in memory. This is used to understand -how much PostgreSQL is having to utilize the disk. The target number of this -should be as high as possible. How to achieve this is the subject of books, but -certain takes efforts on your applications use PostgreSQL. -- Buffers: The buffer usage of various parts of the PostgreSQL system. This can -be used to help understand the overall throughput between various parts of the -system. -- Commit & Rollback: How many transactions are committed and rolled back. -- Locks: The number of locks that are present on a given system. - -### Pod Details - -![PostgreSQL Operator Monitoring - Pod Details](/images/postgresql-monitoring-pod.png) - -Pod details provide information about a given Pod or Pods that are being used -by a PostgreSQL cluster. These are similar to "operating system" or "node" -metrics, with the differences that these are looking at resource utilization by -a container, not the entire node. - -It may be helpful to view these metrics on a "pod" basis, by using the Pod -filter at the top of the dashboard. - -- Disk Usage: How much space is being consumed by a volume. -- Disk Activity: How many reads and writes are occurring on a volume. -- Memory: Various information about memory utilization, including the request -and limit as well as actually utilization. -- CPU: The amount of CPU being utilized by a Pod -- Network Traffic: The amount of networking traffic passing through each network -device. -- Container Resources: The CPU and memory limits and requests. - -### Backups - -![PostgreSQL Operator - Monitoring - Backup Health](/images/postgresql-monitoring-backups.png) - -There are a variety of reasons why you need to monitoring your backups, starting -from answering the fundamental question of "do I have backups available?" -Backups can be used for a variety of situations, from cloning new clusters to -restoring clusters after a disaster. Additionally, Postgres can run into issues -if your backup repository is not healthy, e.g. if it cannot push WAL archives. -If your backups are set up properly and healthy, you will be set up to mitigate -the risk of data loss! - -The backup, or pgBackRest panel, will provide information about the overall -state of your backups. This includes: - -- Recovery Window: This is an indicator of how far back you are able to restore -your data from. This represents all of the backups and archives available in -your backup repository. Typically, your recovery window should be close to your -overall data retention specifications. -- Time Since Last Backup: this indicates how long it has been since your last -backup. This is broken down into pgBackRest backup type (full, incremental, -differential) as well as time since the last WAL archive was pushed. -- Backup Runtimes: How long the last backup of a given type (full, incremental -differential) took to execute. If your backups are slow, consider providing more -resources to the backup jobs and tweaking pgBackRest's performance tuning -settings. -- Backup Size: How large the backups of a given type (full, incremental, -differential). -- WAL Stats: Shows the metrics around WAL archive pushes. If you have failing -pushes, you should to see if there is a transient or permanent error that is -preventing WAL archives from being pushed. If left untreated, this could end up -causing issues for your Postgres cluster. - -### PostgreSQL Service Health Overview - -![PostgreSQL Operator Monitoring - Service Health Overview](/images/postgresql-monitoring-service.png) - -The Service Health Overview provides information about the Kubernetes Services -that sit in front of the PostgreSQL Pods. This provides information about the -status of the network. - -- Saturation: How much of the available network to the Service is being -consumed. High saturation may cause degraded performance to clients or create -an inability to connect to the PostgreSQL cluster. -- Traffic: Displays the number of transactions per minute that the Service is -handling. -- Errors: Displays the total number of errors occurring at a particular Service. -- Latency: What the overall network latency is when interfacing with the -Service. - -### Query Runtime - -![PostgreSQL Operator Monitoring - Query Performance](/images/postgresql-monitoring-query-total.png) - -Looking at the overall performance of queries can help optimize a Postgres -deployment, both from [providing resources]({{< relref "tutorial/customize-cluster.md" >}}) to query tuning in the application -itself. - -You can get a sense of the overall activity of a PostgreSQL cluster from the -chart that is visualized above: - -- Queries Executed: The total number of queries executed on a system during the -period. -- Query runtime: The aggregate runtime of all the queries combined across the -system that were executed in the period. -- Query mean runtime: The average query time across all queries executed on the -system in the given period. -- Rows retrieved or affected: The total number of rows in a database that were -either retrieved or had modifications made to them. - -PostgreSQL Operator Monitoring also further breaks down the queries so you can -identify queries that are being executed too frequently or are taking up too -much time. - -![PostgreSQL Operator Monitoring - Query Analysis](/images/postgresql-monitoring-query-topn.png) - -- Query Mean Runtime (Top N): This highlights the N number of slowest queries by -average runtime on the system. This might indicate you are missing an index -somewhere, or perhaps the query could be rewritten to be more efficient. -- Query Max Runtime (Top N): This highlights the N number of slowest queries by -absolute runtime. This could indicate that a specific query or the system as a -whole may need more resources. -- Query Total Runtime (Top N): This highlights the N of slowest queries by -aggregate runtime. This could indicate that a ORM is looping over a single query -and executing it many times that could possibly be rewritten as a single, faster -query. - -### Alerts - -![PostgreSQL Operator Monitoring - Alerts](/images/postgresql-monitoring-alerts.png) - -Alerting lets one view and receive alerts about actions that require -intervention, for example, a HA cluster that cannot self-heal. The alerting -system is powered by [Alertmanager](https://github.com/prometheus/alertmanager). - -The alerts that come installed by default include: - -- `PGExporterScrapeError`: The Crunchy PostgreSQL Exporter is having issues -scraping statistics used as part of the monitoring stack. -- `PGIsUp`: A PostgreSQL instance is down. -- `PGIdleTxn`: There are too many connections that are in the -"idle in transaction" state. -- `PGQueryTime`: A single PostgreSQL query is taking too long to run. Issues a -warning at 12 hours and goes critical after 24. -- `PGConnPerc`: Indicates that there are too many connection slots being used. -Issues a warning at 75% and goes critical above 90%. -- `PGDiskSize`: Indicates that a PostgreSQL database is too large and could be in -danger of running out of disk space. Issues a warning at 75% and goes critical -at 90%. -- `PGReplicationByteLag`: Indicates that a replica is too far behind a primary -instance, which could risk data loss in a failover scenario. Issues a warning at -50MB an goes critical at 100MB. -- `PGReplicationSlotsInactive`: Indicates that a replication slot is inactive. -Not attending to this can lead to out-of-disk errors. -- `PGXIDWraparound`: Indicates that a PostgreSQL instance is nearing transaction -ID wraparound. Issues a warning at 50% and goes critical at 75%. It's important -that you [vacuum your database](https://info.crunchydata.com/blog/managing-transaction-id-wraparound-in-postgresql) -to prevent this. -- `PGEmergencyVacuum`: Indicates that autovacuum is not running or cannot keep -up with ongoing changes, i.e. it's past its "freeze" age. Issues a warning at -110% and goes critical at 125%. -- `PGArchiveCommandStatus`: Indicates that the archive command, which is used -to ship WAL archives to pgBackRest, is failing. -- `PGSequenceExhaustion`: Indicates that a sequence is over 75% used. -- `PGSettingsPendingRestart`: Indicates that there are settings changed on a -PostgreSQL instance that requires a restart. - -Optional alerts that can be enabled: - -- `PGMinimumVersion`: Indicates if PostgreSQL is below a desired version. -- `PGRecoveryStatusSwitch_Replica`: Indicates that a replica has been promoted -to a primary. -- `PGConnectionAbsent_Prod`: Indicates that metrics collection is absent from a -PostgresQL instance. -- `PGSettingsChecksum`: Indicates that PostgreSQL settings have changed from a -previous state. -- `PGDataChecksum`: Indicates that there are data checksum failures on a -PostgreSQL instance. This could be a sign of data corruption. - -You can modify these alerts as you see fit, and add your own alerts as well! -Please see the [installation instructions]({{< relref "installation/monitoring/_index.md" >}}) -for general setup of the PostgreSQL Operator Monitoring stack. diff --git a/docs/content/architecture/overview.md b/docs/content/architecture/overview.md deleted file mode 100644 index 3fc5dc8c8a..0000000000 --- a/docs/content/architecture/overview.md +++ /dev/null @@ -1,101 +0,0 @@ ---- -title: "Overview" -date: -draft: false -weight: 100 ---- - -The goal of PGO, the Postgres Operator from Crunchy Data is to provide a means to quickly get -your applications up and running on Postgres for both development and -production environments. To understand how PGO does this, we -want to give you a tour of its architecture, with explains both the architecture -of the PostgreSQL Operator itself as well as recommended deployment models for -PostgreSQL in production! - -# PGO Architecture - -The Crunchy PostgreSQL Operator extends Kubernetes to provide a higher-level -abstraction for rapid creation and management of PostgreSQL clusters. The -Crunchy PostgreSQL Operator leverages a Kubernetes concept referred to as -"[Custom Resources](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/)” -to create several -[custom resource definitions (CRDs)](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/#customresourcedefinitions) -that allow for the management of PostgreSQL clusters. - -The main custom resource definition is [`postgresclusters.postgres-operator.crunchydata.com`]({{< relref "references/crd.md" >}}). This allows you to control all the information about a Postgres cluster, including: - -- General information -- Resource allocation -- High availability -- Backup management -- Where and how it is deployed (affinity, tolerations, topology spread constraints) -- Disaster Recovery / standby clusters -- Monitoring - -and more. - -PGO itself runs as a Deployment and is composed of a single container. - -- `operator` (image: postgres-operator) - This is the heart of the PostgreSQL -Operator. It contains a series of Kubernetes -[controllers](https://kubernetes.io/docs/concepts/architecture/controller/) that -place watch events on a series of native Kubernetes resources (Jobs, Pods) as -well as the Custom Resources that come with the PostgreSQL Operator (Pgcluster, -Pgtask) - -The main purpose of PGO is to create and update information -around the structure of a Postgres Cluster, and to relay information about the -overall status and health of a PostgreSQL cluster. The goal is to also simplify -this process as much as possible for users. For example, let's say we want to -create a high-availability PostgreSQL cluster that has multiple replicas, -supports having backups in both a local storage area and Amazon S3 and has -built-in metrics and connection pooling, similar to: - -![PostgreSQL Cluster Architecture](/images/postgresql-cluster-architecture.png) - -This can be accomplished with a relatively simple manifest. Please refer to the [tutorial]({{< relref "tutorial/_index.md" >}}) for how to accomplish this, or see the [Postgres Operator examples](https://github.com/CrunchyData/postgres-operator-examples/fork) repo. - -The Postgres Operator handles setting up all of the various StatefulSets, Deployments, Services and other Kubernetes objects. - -You will also notice that **high-availability is enabled by default** if you deploy at least one Postgres replica. The -Crunchy PostgreSQL Operator uses a distributed-consensus method for PostgreSQL -cluster high-availability, and as such delegates the management of each -cluster's availability to the clusters themselves. This removes the PostgreSQL -Operator from being a single-point-of-failure, and has benefits such as faster -recovery times for each PostgreSQL cluster. For a detailed discussion on -high-availability, please see the [High-Availability]({{< relref "architecture/high-availability.md" >}}) -section. - -## Kubernetes StatefulSets: The PGO Deployment Model - -PGO, the Postgres Operator from Crunchy Data, uses [Kubernetes StatefulSets](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) -for running Postgres instances, and will use [Deployments](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) for more ephemeral services. - -PGO deploys Kubernetes Statefulsets in a way to allow for creating both different Postgres instance groups and be able to support advanced operations such as rolling updates that minimize or eliminate Postgres downtime. Additional components in our -PostgreSQL cluster, such as the pgBackRest repository or an optional PgBouncer, -are deployed with Kubernetes Deployments. - -With the PGO architecture, we can also leverage Statefulsets to apply affinity and toleration rules across every Postgres instance or individual ones. For instance, we may want to force one or more of our PostgreSQL replicas to run on Nodes in a different region than -our primary PostgreSQL instances. - -What's great about this is that PGO manages this for you so you don't have to worry! Being aware of -this model can help you understand how the Postgres Operator gives you maximum -flexibility for your PostgreSQL clusters while giving you the tools to -troubleshoot issues in production. - -The last piece of this model is the use of [Kubernetes Services](https://kubernetes.io/docs/concepts/services-networking/service/) -for accessing your PostgreSQL clusters and their various components. The -PostgreSQL Operator puts services in front of each Deployment to ensure you have -a known, consistent means of accessing your PostgreSQL components. - -Note that in some production environments, there can be delays in accessing -Services during transition events. The PostgreSQL Operator attempts to mitigate -delays during critical operations (e.g. failover, restore, etc.) by directly -accessing the Kubernetes Pods to perform given actions. - -# Additional Architecture Information - -There is certainly a lot to unpack in the overall architecture of PGO. Understanding the architecture will help you to plan -the deployment model that is best for your environment. For more information on -the architectures of various components of the PostgreSQL Operator, please read -onward! diff --git a/docs/content/architecture/pgadmin4.md b/docs/content/architecture/pgadmin4.md deleted file mode 100644 index 047db37397..0000000000 --- a/docs/content/architecture/pgadmin4.md +++ /dev/null @@ -1,166 +0,0 @@ ---- -title: "pgAdmin 4" -date: -draft: false -weight: 900 ---- - -![pgAdmin 4 Query](/images/pgadmin4-query.png) - -[pgAdmin 4](https://www.pgadmin.org/) is a popular graphical user interface that -makes it easy to work with PostgreSQL databases from a web-based client. With -its ability to manage and orchestrate changes for PostgreSQL users, the PostgreSQL -Operator is a natural partner to keep a pgAdmin 4 environment synchronized with -a PostgreSQL environment. - -The PostgreSQL Operator lets you deploy a pgAdmin 4 environment alongside a -PostgreSQL cluster and keeps users' database credentials synchronized. You can -simply log into pgAdmin 4 with your PostgreSQL username and password and -immediately have access to your databases. - -## Deploying pgAdmin 4 - -{{% notice warning %}} -Unfortunately, pgAdmin 4 is not currently compatible with PostgreSQL 15. -{{% /notice %}} - -If you've done the [quickstart]({{< relref "quickstart/_index.md" >}}), add the -following fields to the spec and reapply; if you don't have any Postgres clusters -running, add the fields to a spec, and apply. - -```yaml - userInterface: - pgAdmin: - image: {{< param imageCrunchyPGAdmin >}} - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi -``` - -This creates a pgAdmin 4 deployment unique to this PostgreSQL cluster and synchronizes -the PostgreSQL user information. To access pgAdmin 4, you can set up a port-forward -to the Service, which follows the pattern `-pgadmin`, to port `5050`: - -``` -kubectl port-forward svc/hippo-pgadmin 5050:5050 -``` - -Point your browser at `http://localhost:5050` and you will be prompted to log in. -Use your database username with `@pgo` appended and your database password. -In our case, the pgAdmin username is `hippo@pgo` and the password is found in the -user secret, `hippo-pguser-hippo`: - -``` -PG_CLUSTER_USER_SECRET_NAME=hippo-pguser-hippo - -PGPASSWORD=$(kubectl get secrets -n postgres-operator "${PG_CLUSTER_USER_SECRET_NAME}" -o go-template='{{.data.password | base64decode}}') -PGUSER=$(kubectl get secrets -n postgres-operator "${PG_CLUSTER_USER_SECRET_NAME}" -o go-template='{{.data.user | base64decode}}') -``` - -![pgAdmin 4 Login Page](/images/pgadmin4-login.png) - -{{% notice tip %}} -If your password does not appear to work, you can retry setting up the user by -rotating the user password. Do this by deleting the `password` data field from -the user secret (e.g. `hippo-pguser-hippo`). - -Optionally, you can also set a [custom password]({{< relref "architecture/user-management.md" >}}). -{{% /notice %}} - -## User Synchronization - -The operator will synchronize users [defined in the spec]({{< relref "tutorial/user-management.md" >}}) -(e.g., in [`spec.users`]({{< relref "/references/crd#postgresclusterspecusersindex" >}})) -with the pgAdmin 4 deployment. Any user created in the database without being defined in the spec will not be -synchronized. - -## Custom Configuration - -You can adjust some pgAdmin settings through the -[`userInterface.pgAdmin.config`]({{< relref "/references/crd#postgresclusterspecuserinterfacepgadminconfig" >}}) -field. For example, set `SHOW_GRAVATAR_IMAGE` to `False` to disable automatic profile pictures: - -```yaml - userInterface: - pgAdmin: - config: - settings: - SHOW_GRAVATAR_IMAGE: False -``` - -You can also mount files to `/etc/pgadmin/conf.d` inside the pgAdmin container using -[projected volumes](https://kubernetes.io/docs/concepts/storage/projected-volumes/). -The following mounts `useful.txt` of Secret `mysecret` to `/etc/pgadmin/conf.d/useful.txt`: - -```yaml - userInterface: - pgAdmin: - config: - files: - - secret: - name: mysecret - items: - - key: useful.txt - - configMap: - name: myconfigmap - optional: false -``` - -### Kerberos Configuration - -You can configure pgAdmin to [authenticate its users using Kerberos](https://www.pgadmin.org/docs/pgadmin4/latest/kerberos.html) -SPNEGO. In addition to setting `AUTHENTICATION_SOURCES` and `KRB_APP_HOST_NAME`, you need to -enable `KERBEROS_AUTO_CREATE_USER` and mount a `krb5.conf` and a keytab file: - -```yaml - userInterface: - pgAdmin: - config: - settings: - AUTHENTICATION_SOURCES: ['kerberos'] - KERBEROS_AUTO_CREATE_USER: True - KRB_APP_HOST_NAME: my.service.principal.name.local # without HTTP class - KRB_KTNAME: /etc/pgadmin/conf.d/krb5.keytab - files: - - secret: - name: mysecret - items: - - key: krb5.conf - - key: krb5.keytab -``` - -### LDAP Configuration - -You can configure pgAdmin to [authenticate its users using LDAP](https://www.pgadmin.org/docs/pgadmin4/latest/ldap.html) -passwords. In addition to setting `AUTHENTICATION_SOURCES` and `LDAP_SERVER_URI`, you need to -enable `LDAP_AUTO_CREATE_USER`: - -```yaml - userInterface: - pgAdmin: - config: - settings: - AUTHENTICATION_SOURCES: ['ldap'] - LDAP_AUTO_CREATE_USER: True - LDAP_SERVER_URI: ldaps://my.ds.example.com -``` - -When using a dedicated user to bind, you can store the `LDAP_BIND_PASSWORD` setting in a Secret and -reference it through the [`ldapBindPassword`]({{< relref "/references/crd#postgresclusterspecuserinterfacepgadminconfigldapbindpassword" >}}) -field: - -```yaml - userInterface: - pgAdmin: - config: - ldapBindPassword: - name: ldappass - key: mypw -``` - -## Deleting pgAdmin 4 - -You can remove the pgAdmin 4 deployment by removing the `userInterface` field from the spec. diff --git a/docs/content/architecture/scheduling.md b/docs/content/architecture/scheduling.md deleted file mode 100644 index de9e248d2f..0000000000 --- a/docs/content/architecture/scheduling.md +++ /dev/null @@ -1,107 +0,0 @@ ---- -title: "Scheduling" -date: -draft: false -weight: 120 ---- - -Deploying to your Kubernetes cluster may allow for greater reliability than other -environments, but that's only the case when it's configured correctly. Fortunately, -PGO, the Postgres Operator from Crunchy Data, is ready to help with helpful -default settings to ensure you make the most out of your Kubernetes environment! - -## High Availability By Default - -As shown in the [high availability tutorial]({{< relref "tutorial/high-availability.md" >}}#pod-topology-spread-constraints), -PGO supports the use of [Pod Topology Spread Constraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/) -to customize your Pod deployment strategy, but useful defaults are already in place -for you without any additional configuration required! - -PGO's default scheduling constraints for HA is implemented for the various Pods - comprising a PostgreSQL cluster, specifically to ensure the Operator always - deploys a High-Availability cluster architecture by default. - - Using Pod Topology Spread Constraints, the general scheduling guidelines are as - follows: - -- Pods are only considered from the same cluster. -- PgBouncer pods are only considered amongst other PgBouncer pods. -- Postgres pods are considered amongst all Postgres pods and pgBackRest repo host Pods. -- pgBackRest repo host Pods are considered amongst all Postgres pods and pgBackRest repo hosts Pods. -- Pods are scheduled across the different `kubernetes.io/hostname` and `topology.kubernetes.io/zone` failure domains. -- Pods are scheduled when there are fewer nodes than pods, e.g. single node. - -With the above configuration, your data is distributed as widely as possible -throughout your Kubernetes cluster to maximize safety. - -## Customization - -While the default scheduling settings are designed to meet the widest variety of -environments, they can be customized or removed as needed. Assuming a PostgresCluster -named 'hippo', the default Pod Topology Spread Constraints applied on Postgres -Instance and pgBackRest Repo Host Pods are as follows: - -``` -topologySpreadConstraints: - - maxSkew: 1 - topologyKey: kubernetes.io/hostname - whenUnsatisfiable: ScheduleAnyway - labelSelector: - matchLabels: - postgres-operator.crunchydata.com/cluster: hippo - matchExpressions: - - key: postgres-operator.crunchydata.com/data - operator: In - values: - - postgres - - pgbackrest - - maxSkew: 1 - topologyKey: topology.kubernetes.io/zone - whenUnsatisfiable: ScheduleAnyway - labelSelector: - matchLabels: - postgres-operator.crunchydata.com/cluster: hippo - matchExpressions: - - key: postgres-operator.crunchydata.com/data - operator: In - values: - - postgres - - pgbackrest -``` - -Similarly, for PgBouncer Pods they will be: - -``` -topologySpreadConstraints: - - maxSkew: 1 - topologyKey: kubernetes.io/hostname - whenUnsatisfiable: ScheduleAnyway - labelSelector: - matchLabels: - postgres-operator.crunchydata.com/cluster: hippo - postgres-operator.crunchydata.com/role: pgbouncer - - maxSkew: 1 - topologyKey: topology.kubernetes.io/zone - whenUnsatisfiable: ScheduleAnyway - labelSelector: - matchLabels: - postgres-operator.crunchydata.com/cluster: hippo - postgres-operator.crunchydata.com/role: pgbouncer -``` - -Which, as described in the [API documentation](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods), -means that there should be a maximum of one Pod difference within the -`kubernetes.io/hostname` and `topology.kubernetes.io/zone` failure domains when -considering either `data` Pods, i.e. Postgres Instance or pgBackRest repo host Pods -from a single PostgresCluster or when considering PgBouncer Pods from a single -PostgresCluster. - -Any other scheduling configuration settings, such as [Affinity, Anti-affinity](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity), -[Taints, Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/), -or other [Pod Topology Spread Constraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/) -will be added in addition to these defaults. Care should be taken to ensure the -combined effect of these settings are appropriate for your Kubernetes cluster. - -In cases where these defaults are not desired, PGO does provide a method to disable -the default Pod scheduling by setting the `spec.disableDefaultPodScheduling` to -'true'. diff --git a/docs/content/architecture/user-management.md b/docs/content/architecture/user-management.md deleted file mode 100644 index ed8c75bb2b..0000000000 --- a/docs/content/architecture/user-management.md +++ /dev/null @@ -1,111 +0,0 @@ ---- -title: "User Management" -date: -draft: false -weight: 125 ---- - -PGO manages PostgreSQL users that you define in [`PostgresCluster.spec.users`]({{< relref "/references/crd#postgresclusterspecusersindex" >}}). -There, you can list their [role attributes](https://www.postgresql.org/docs/current/role-attributes.html) and which databases they can access. - -Below is some information on how the user and database management systems work. To try out some examples, please see the [user and database management]({{< relref "tutorial/user-management.md" >}}) section of the [tutorial]({{< relref "tutorial/_index.md" >}}). - -## Understanding Default User Management - -When you create a Postgres cluster with PGO and do not specify any additional users or databases, PGO will do the following: - -- Create a database that matches the name of the Postgres cluster. -- Create an unprivileged Postgres user with the name of the cluster. This user has access to the database created in the previous step. -- Create a Secret with the login credentials and connection details for the Postgres user in relation to the database. This is stored in a Secret named `-pguser-`. These credentials include: - - `user`: The name of the user account. - - `password`: The password for the user account. - - `dbname`: The name of the database that the user has access to by default. - - `host`: The name of the host of the database. - This references the [Service](https://kubernetes.io/docs/concepts/services-networking/service/) of the primary Postgres instance. - - `port`: The port that the database is listening on. - - `uri`: A [PostgreSQL connection URI](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING) - that provides all the information for logging into the Postgres database. - - `jdbc-uri`: A [PostgreSQL JDBC connection URI](https://jdbc.postgresql.org/documentation/use/) - that provides all the information for logging into the Postgres database via the JDBC driver. - -You can see this default behavior in the [connect to a cluster]({{< relref "tutorial/connect-cluster.md" >}}) portion of the tutorial. - -As an example, using our `hippo` Postgres cluster, we would see the following created: - -- A database named `hippo`. -- A Postgres user named `hippo`. -- A Secret named `hippo-pguser-hippo` that contains the user credentials and connection information. - -While the above defaults may work for your application, there are certain cases where you may need to customize your user and databases: - -- You may require access to the `postgres` superuser. -- You may need to define privileges for your users. -- You may need multiple databases in your cluster, e.g. in a multi-tenant application. -- Certain users may only be able to access certain databases. - -## Custom Users and Databases - -Users and databases can be customized in the [`spec.users`]({{< relref "/references/crd#postgresclusterspecusersindex" >}}) section of the custom resource. These can be adding during cluster creation and adjusted over time, but it's important to note the following: - -- If `spec.users` is set during cluster creation, PGO will **not** create any default users or databases except for `postgres`. If you want additional databases, you will need to specify them. -- For any users added in `spec.users`, PGO will created a Secret of the format `-pguser-`. This will contain the user credentials. - - If no databases are specified, `dbname` and `uri` will not be present in the Secret. - - If at least one `spec.users.databases` is specified, the first database in the list will be populated into the connection credentials. -- To prevent accidental data loss, PGO does not automatically drop users. We will see how to drop a user below. -- Similarly, to prevent accidental data loss PGO does not automatically drop databases. We will see how to drop a database below. -- Role attributes are not automatically dropped if you remove them. You will have to set the inverse attribute to drop them (e.g. `NOSUPERUSER`). -- The special `postgres` user can be added as one of the custom users; however, the privileges of the users cannot be adjusted. - -For specific examples for how to manage users, please see the [user and database management]({{< relref "tutorial/user-management.md" >}}) section of the [tutorial]({{< relref "tutorial/_index.md" >}}). - -## Generated Passwords - -PGO generates a random password for each Postgres user it creates. Postgres allows almost any character -in its passwords, but your application may have stricter requirements. To have PGO generate a password -without special characters, set the `spec.users.password.type` field for that user to `AlphaNumeric`. -For complete control over a user's password, see the [custom passwords](#custom-passwords) section. - -To have PGO generate a new password, remove the existing `password` field from the user _Secret_. -For example, on a Postgres cluster named `hippo` in the `postgres-operator` namespace with -a Postgres user named `hippo`, use the following `kubectl patch` command: - -```shell -kubectl patch secret -n postgres-operator hippo-pguser-hippo -p '{"data":{"password":""}}' -``` - -## Custom Passwords {#custom-passwords} - -There are cases where you may want to explicitly provide your own password for a Postgres user. -PGO determines the password from an attribute in the user Secret called `verifier`. This contains -a hashed copy of your password. When `verifier` changes, PGO will load the contents of the verifier -into your Postgres cluster. This method allows for the secure transmission of the password into the -Postgres database. - -Postgres provides two methods for hashing passwords: SCRAM-SHA-256 and MD5. -PGO uses the preferred (and as of PostgreSQL 14, default) method, SCRAM-SHA-256. - -There are two ways you can set a custom password for a user. You can provide a plaintext password -in the `password` field and remove the `verifier`. When PGO detects a password without a verifier -it will generate the SCRAM `verifier` for you. Optionally, you can generate your own password and -verifier. When both values are found in the user secret PGO will not generate anything. Once the -password and verifier are found PGO will ensure the provided credential is properly set in postgres. - -### Example - -For example, let's say we have a Postgres cluster named `hippo` and a Postgres user named `hippo`. -The Secret then would be called `hippo-pguser-hippo`. We want to set the password for `hippo` to -be `datalake` and we can achieve this with a simple `kubectl patch` command. The below assumes that -the Secret is stored in the `postgres-operator` namespace: - -```shell -kubectl patch secret -n postgres-operator hippo-pguser-hippo -p \ - '{"stringData":{"password":"datalake","verifier":""}}' -``` - -{{% notice tip %}} -We can take advantage of the [Kubernetes Secret](https://kubernetes.io/docs/reference/kubernetes-api/config-and-storage-resources/secret-v1/#Secret) -`stringData` field to specify non-binary secret data in string form. -{{% /notice %}} - -PGO generates the SCRAM verifier and applies the updated password to Postgres, and you will be -able to log in with the password `datalake`. diff --git a/docs/content/faq/_index.md b/docs/content/faq/_index.md deleted file mode 100644 index 6f59c11a01..0000000000 --- a/docs/content/faq/_index.md +++ /dev/null @@ -1,74 +0,0 @@ ---- -title: "FAQ" -date: -draft: false -weight: 105 - -aliases: - - /contributing ---- - -## Project FAQ - -### What is The PGO Project? - -The PGO Project is the open source project associated with the development of [PGO](https://github.com/CrunchyData/postgres-operator), the [Postgres Operator](https://github.com/CrunchyData/postgres-operator) for Kubernetes from [Crunchy Data](https://www.crunchydata.com). - -PGO is a [Kubernetes Operator](https://kubernetes.io/docs/concepts/extend-kubernetes/operator/), providing a declarative solution for managing your PostgreSQL clusters. Within a few moments, you can have a Postgres cluster complete with high availability, disaster recovery, and monitoring, all over secure TLS communications. - -PGO is the upstream project from which [Crunchy PostgreSQL for Kubernetes](https://www.crunchydata.com/products/crunchy-postgresql-for-kubernetes/) is derived. You can find more information on Crunchy PostgreSQL for Kubernetes [here](https://www.crunchydata.com/products/crunchy-postgresql-for-kubernetes/). - -### What’s the difference between PGO and Crunchy PostgreSQL for Kubernetes? - -PGO is the Postgres Operator from Crunchy Data. It developed pursuant to the PGO Project and is designed to be a frequently released, fast-moving project where all new development happens. - -[Crunchy PostgreSQL for Kubernetes](https://www.crunchydata.com/products/crunchy-postgresql-for-kubernetes/) is produced by taking selected releases of PGO, combining them with Crunchy Certified PostgreSQL and PostgreSQL containers certified by Crunchy Data, maintained for commercial support, and made available to customers as the Crunchy PostgreSQL for Kubernetes offering. - -### Where can I find support for PGO? - -The community can help answer questions about PGO via the [PGO mailing list](https://groups.google.com/a/crunchydata.com/forum/#!forum/postgres-operator/join). - -Information regarding support for PGO is available in the [Support]({{< relref "support/_index.md" >}}) section of the PGO documentation, which you can find [here]({{< relref "support/_index.md" >}}). - -For additional information regarding commercial support and Crunchy PostgreSQL for Kubernetes, you can [contact Crunchy Data](https://www.crunchydata.com/contact/). - -### Under which open source license is PGO source code available? - -The PGO source code is available under the [Apache License 2.0](https://github.com/CrunchyData/postgres-operator/blob/master/LICENSE.md). - -### Where are the release tags for PGO v5? - -With PGO v5, we've made some changes to our overall process. Instead of providing quarterly release -tags as we did with PGO v4, we're focused on ongoing active development in the v5 primary -development branch (`master`, which will become `main`). Consistent with our practices in v4, -previews of stable releases with the release tags are made available in the -[Crunchy Data Developer Portal](https://www.crunchydata.com/developers). - -These changes allow for more rapid feature development and releases in the upstream PGO project, -while providing -[Crunchy Postgres for Kubernetes](https://www.crunchydata.com/products/crunchy-postgresql-for-kubernetes/) -users with stable releases for production use. - -To the extent you have constraints specific to your use, please feel free to reach out on -[info@crunchydata.com](mailto:info@crunchydata.com) to discuss how we can address those -specifically. - -### How can I get involved with the PGO Project? - -PGO is developed by the PGO Project. The PGO Project that welcomes community engagement and contribution. - -The PGO source code and community issue trackers are hosted at [GitHub](https://github.com/CrunchyData/postgres-operator). - -For community questions and support, please sign up for the [PGO mailing list](https://groups.google.com/a/crunchydata.com/forum/#!forum/postgres-operator/join). - -For information regarding contribution, please review the contributor guide [here](https://github.com/CrunchyData/postgres-operator/blob/master/CONTRIBUTING.md). - -Please register for the [Crunchy Data Developer Portal mailing list](https://www.crunchydata.com/developers/newsletter) to receive updates regarding Crunchy PostgreSQL for Kubernetes releases and the [Crunchy Data newsletter](https://www.crunchydata.com/newsletter/) for general updates from Crunchy Data. - -### Where do I report a PGO bug? - -The PGO Project uses GitHub for its [issue tracking](https://github.com/CrunchyData/postgres-operator/issues/new/choose). You can file your issue [here](https://github.com/CrunchyData/postgres-operator/issues/new/choose). - -### How often is PGO released? - -The PGO team currently plans to release new builds approximately every few weeks. The PGO team will flag certain builds as “stable” at their discretion. Note that the term “stable” does not imply fitness for production usage or any kind of warranty whatsoever. diff --git a/docs/content/guides/_index.md b/docs/content/guides/_index.md deleted file mode 100644 index ec165cc4ff..0000000000 --- a/docs/content/guides/_index.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: "Guides" -date: -draft: false -weight: 35 ---- - -This section contains guides on handling various scenarios when managing Postgres clusters using PGO, the Postgres Operator. These include step-by-step instructions for situations such as migrating data to a PGO managed Postgres cluster or upgrading from an older version of PGO. - -These guides are in no particular order: choose the guide that is most applicable to your situation. - -If you are looking for how to manage most day-to-day Postgres scenarios, we recommend first going through the [Tutorial]({{< relref "tutorial/_index.md" >}}). diff --git a/docs/content/guides/data-migration.md b/docs/content/guides/data-migration.md deleted file mode 100644 index 8752cb111c..0000000000 --- a/docs/content/guides/data-migration.md +++ /dev/null @@ -1,132 +0,0 @@ ---- -title: "Migrate Data Volumes to New Clusters" -date: -draft: false -weight: 105 ---- - -There are certain cases where you may want to migrate existing volumes to a new cluster. If so, read on for an in depth look at the steps required. - -## Configure your PostgresCluster CRD - -In order to use existing pgData, pg_wal or pgBackRest repo volumes in a new PostgresCluster, you will need to configure the `spec.dataSource.volumes` section of your PostgresCluster CRD. As shown below, there are three possible volumes you may configure, `pgDataVolume`, `pgWALVolume` and `pgBackRestVolume`. Under each, you must define the PVC name to use in the new cluster. A directory may also be defined, as needed, for cases where the existing directory name does not match the v5 directory. - -To help explain how these fields are used, we will consider a `pgcluster` from PGO v4, `oldhippo`. We will assume that the `pgcluster` has been deleted and only the PVCs have been left in place. - -**Please note that any differences in configuration or other datasources will alter this procedure significantly and that certain storage options require additional steps (see *Considerations* below)!** - -In a standard PGO v4.7 cluster, a primary database pod with a separate pg_wal PVC will mount its pgData PVC, named "oldhippo", at `/pgdata` and its pg_wal PVC, named "oldhippo-wal", at `/pgwal` within the pod's file system. In this pod, the standard pgData directory will be `/pgdata/oldhippo` and the standard pg_wal directory will be `/pgwal/oldhippo-wal`. The pgBackRest repo pod will mount its PVC at `/backrestrepo` and the repo directory will be `/backrestrepo/oldhippo-backrest-shared-repo`. - -With the above in mind, we need to reference the three PVCs we wish to migrate in the `dataSource.volumes` portion of the PostgresCluster spec. Additionally, to accommodate the PGO v5 file structure, we must also reference the pgData and pgBackRest repo directories. Note that the pg_wal directory does not need to be moved when migrating from v4 to v5! - -Now, we just need to populate our CRD with the information described above: - -``` -spec: - dataSource: - volumes: - pgDataVolume: - pvcName: oldhippo - directory: oldhippo - pgWALVolume: - pvcName: oldhippo-wal - pgBackRestVolume: - pvcName: oldhippo-pgbr-repo - directory: oldhippo-backrest-shared-repo -``` - -Lastly, it is very important that the PostgreSQL version and storage configuration in your PostgresCluster match *exactly* the existing volumes being used. - -If the volumes were used with PostgreSQL 13, the `spec.postgresVersion` value should be `13` and the associated `spec.image` value should refer to a PostgreSQL 13 image. - -Similarly, the configured data volume definitions in your PostgresCluster spec should match your existing volumes. For example, if the existing pgData PVC has a RWO access mode and is 1 Gigabyte, the relevant `dataVolumeClaimSpec` should be configured as - -``` -dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1G -``` - -With the above configuration in place, your existing PVC will be used when creating your PostgresCluster. They will be given appropriate Labels and ownership references, and the necessary directory updates will be made so that your cluster is able to find the existing directories. - -## Considerations - -### Removing PGO v4 labels - -When migrating data volumes from v4 to v5, PGO relabels all volumes for PGO v5, but **will not remove existing PGO v4 labels**. This results in PVCs that are labeled for both PGO v4 and v5, which can lead to unintended behavior. - -To avoid that, you must manually remove the `pg-cluster` and `vendor` labels, which you can do with a `kubectl` command. For instance, given a cluster named `hippo` with a dedicated pgBackRest repo, the PVC will be `hippo-pgbr-repo`, and the PGO v4 labels can be removed with the below command: - -``` -kubectl label pvc hippo-pgbr-repo \ - pg-cluster- \ - vendor- -``` - -### Proper file permissions for certain storage options - -Additional steps are required to set proper file permissions when using certain storage options, such as NFS and HostPath storage due to a known issue with how fsGroups are applied. - -When migrating from PGO v4, this will require the user to manually set the group value of the pgBackRest repo directory, and all subdirectories, to `26` to match the `postgres` group used in PGO v5. Please see [here](https://github.com/kubernetes/examples/issues/260) for more information. - -### Additional Considerations - -- An existing pg_wal volume is not required when the pg_wal directory is located on the same PVC as the pgData directory. -- When using existing pg_wal volumes, an existing pgData volume **must** also be defined to ensure consistent naming and proper bootstrapping. -- When migrating from PGO v4 volumes, it is recommended to use the most recently available version of PGO v4. -- As there are many factors that may impact this procedure, it is strongly recommended that a test run be completed beforehand to ensure successful operation. - -## Putting it all together - -Now that we've identified all of our volumes and required directories, we're ready to create our new cluster! - -Below is a complete PostgresCluster that includes everything we've talked about. After your `PostgresCluster` is created, you should remove the `spec.dataSource.volumes` section. - -``` -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: oldhippo -spec: - image: {{< param imageCrunchyPostgres >}} - postgresVersion: {{< param postgresVersion >}} - dataSource: - volumes: - pgDataVolume: - pvcName: oldhippo - directory: oldhippo - pgWALVolume: - pvcName: oldhippo-wal - pgBackRestVolume: - pvcName: oldhippo-pgbr-repo - directory: oldhippo-backrest-shared-repo - instances: - - name: instance1 - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1G - walVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1G - backups: - pgbackrest: - image: {{< param imageCrunchyPGBackrest >}} - repos: - - name: repo1 - volume: - volumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1G -``` diff --git a/docs/content/guides/extension-management.md b/docs/content/guides/extension-management.md deleted file mode 100644 index 8d84277d10..0000000000 --- a/docs/content/guides/extension-management.md +++ /dev/null @@ -1,120 +0,0 @@ ---- -title: "Extension Management" -date: -draft: false -weight: 175 ---- - -[Extensions](https://www.postgresql.org/docs/current/external-extensions.html) combine functions, data types, casts, etc. -- everything you need -to add some new feature to PostgreSQL in an easy to install package. How easy to install? -For many extensions, like the `fuzzystrmatch` extension, it's as easy as connecting to the database and running a command like this: - -``` -CREATE EXTENSION fuzzystrmatch; -``` - -However, in other cases, an extension might require additional configuration management. -PGO lets you add those configurations to the `PostgresCluster` spec easily. - - -PGO also allows you to add a custom databse initialization script in case you would like to -automate how and where the extension is installed. - - -This guide will walk through adding custom configuration for an extension and -automating installation, using the example of Crunchy Data's own `pgnodemx` extension. - -- [pgnodemx](#pgnodemx) - -## `pgnodemx` - -[`pgnodemx`](https://github.com/CrunchyData/pgnodemx) is a PostgreSQL extension -that is able to pull container-specific metrics (e.g. CPU utilization, memory -consumption) from the container itself via SQL queries. - -In order to do this, `pgnodemx` requires information from the Kubernetes [DownwardAPI](https://kubernetes.io/docs/tasks/inject-data-application/downward-api-volume-expose-pod-information/) -to be mounted on the PostgreSQL pods. Please see the `pgnodemx and the DownwardAPI` -section of the [backup architecture]({{< relref "architecture/backups.md" >}}) page for more information on -where and how the DownwardAPI is mounted. - -### `pgnodemx` Configuration - -To enable the `pgnodemx` extension, we need to set certain configurations. Luckily, -this can all be done directly through the spec: - -```yaml -spec: - patroni: - dynamicConfiguration: - postgresql: - parameters: - shared_preload_libraries: pgnodemx - pgnodemx.kdapi_enabled: on - pgnodemx.kdapi_path: /etc/database-containerinfo -``` - -Those three settings will - -* load `pgnodemx` at start; -* enable the `kdapi` functions (which are specific to the capture of Kubernetes DownwardAPI information); -* tell `pgnodemx` where those DownwardAPI files are mounted (at the `/etc/dabatase-containerinfo` path). - -If you create a `PostgresCluster` with those configurations, you will be able to connect, -create the extension in a database, and run the functions installed by that extension: - -``` -CREATE EXTENSION pgnodemx; -SELECT * FROM proc_diskstats(); -``` - -### Automating `pgnodemx` Creation - -Now that you know how to configure `pgnodemx`, let's say you want to automate the creation of -the extension in a particular database, or in all databases. We can do that through -a custom database initialization. - -First, we have to create a ConfigMap with the initialization SQL. Let's start with the -case where we want `pgnodemx` created for us in the `hippo` database. Our initialization -SQL file might be named `init.sql` and look like this: - -``` -\c hippo\\ -CREATE EXTENSION pgnodemx; -``` - -Now we create the ConfigMap from that file in the same namespace as our PostgresCluster will be created: - -```shell -kubectl create configmap hippo-init-sql -n postgres-operator --from-file=init.sql=path/to/init.sql -``` - -You can check that the ConfigMap was created and has the right information: - -```shell -kubectl get configmap -n postgres-operator hippo-init-sql -o yaml - -apiVersion: v1 -data: - init.sql: |- - \c hippo\\ - CREATE EXTENSION pgnodemx; -kind: ConfigMap -metadata: - name: hippo-init-sql - namespace: postgres-operator -``` - -Now, in addition to the spec changes we made above to allow `pgnodemx` to run, -we add that ConfigMap's information to the PostgresCluster spec: the name of the -ConfigMap (`hippo-init-sql`) and the key for the data (`init.sql`): - -```yaml -spec: - databaseInitSQL: - key: init.sql - name: hippo-init-sql -``` - -Apply that spec to a new or existing PostgresCluster, and the pods should spin up with -`pgnodemx` already installed in the `hippo` database. - diff --git a/docs/content/guides/huge-pages.md b/docs/content/guides/huge-pages.md deleted file mode 100644 index 7dce29b6d8..0000000000 --- a/docs/content/guides/huge-pages.md +++ /dev/null @@ -1,83 +0,0 @@ ---- -title: "Huge Pages" -date: -draft: false -weight: 100 ---- - -# Huge Pages - -Huge Pages, a.k.a. "Super Pages" or "Large Pages", are larger chunks of memory that can speed up your system. Normally, the chunks of memory, or "pages", used by the CPU are 4kB in size. The more memory a process needs, the more pages the CPU needs to manage. By using larger pages, the CPU can manage fewer pages and increase its efficiency. For this reason, it is generally recommended to use Huge Pages with your Postgres databases. - -# Configuring Huge Pages with PGO - -To turn Huge Pages on with PGO, you first need to have Huge Pages turned on at the OS level. This means having them enabled, and a specific number of pages preallocated, on the node(s) where you plan to schedule your pods. All processes that run on a given node and request Huge pages will be sharing this pool of pages, so it is important to allocate enough pages for all the different processes to get what they need. This system/kube-level configuration is outside the scope of this document, since the way that Huge Pages are configured at the OS/node level is dependent on your Kube environment. Consult your Kube environment documentation and any IT support you have for assistance with this step. - -When you enable Huge Pages in your Kube cluster, it is important to keep a few things in mind during the rest of the configuration process: -1. What size of Huge Pages are enabled? If there are multiple sizes enabled, which one is the default? Which one do you want Postgres to use? -2. How many pages were preallocated? Are there any other applications or processes that will be using these pages? -3. Which nodes have Huge Pages enabled? Is it possible that more nodes will be added to the cluster? If so, will they also have Huge Pages enabled? - -Once Huge Pages are enabled on one or more nodes in your Kubernetes cluster, you can tell Postgres to start using them by adding some configuration to your PostgresCluster spec: - -{{% notice warning %}} -Warning: setting/changing this setting will cause your database to restart. -{{% /notice %}} - -```yaml -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: hippo -spec: - image: {{< param imageCrunchyPostgres >}} - postgresVersion: {{< param postgresVersion >}} - instances: - - name: instance1 - resources: - limits: - hugepages-2Mi: 16Mi - memory: 4Gi -``` - -This is where it is important to know the size and the number of Huge Pages available. In the spec above, the `hugepages-2Mi` line indicates that we want to use 2MiB sized pages. If your system only has 1GiB sized pages available, then you will want to use `hugepages-1Gi` as the setting instead. The value after it, `16Mi` in our example, determines the amount of pages to be allocated to this Postgres instance. If you have multiple instances, you will need to enable/allocate Huge Pages on an instance by instance basis. Keep in mind that if you have a "Highly Available" cluster, meaning you have multiple replicas, each replica will also request Huge Pages. You therefore need to be cognizant of the total amount of Huge Pages available on the node(s) and the amount your cluster is requesting. If you request more pages than are available, you might see some replicas/instances fail to start. - -Note: In the `instances.#.resources` spec, there are `limits` and `requests`. If a request value is not specified (like in the example above), it is presumed to be equal to the limit value. For Huge Pages, the request value must always be equal to the limit value, therefore, it is perfectly acceptable to just specify it in the `limits` section. - -Note: Postgres uses the system default size by default. This means that if there are multiple sizes of Huge Pages available on the node(s) and you attempt to use a size in your PostgresCluster that is not the system default, it will fail. To use a non-default size you will need to tell Postgres the size to use with the `huge_page_size` variable, which can be set via dynamic configuration: - -{{% notice warning %}} -Warning: setting/changing this parameter will cause your database to restart. -{{% /notice %}} - -```yaml -patroni: - dynamicConfiguration: - postgresql: - parameters: - huge_page_size: 1GB -``` - -# The Kubernetes Issue - -There is an issue in Kubernetes where essentially, if Huge Pages are available on a node, it will tell the processes running in the pods on that node that it has Huge Pages available even if the pod has not actually requested any Huge Pages. This is an issue because by default, Postgres is set to "try" to use Huge Pages. When Postgres is led to believe that Huge Pages are available and it attempts to use Huge Pages only to find that the pod doesn't actually have any Huge Pages allocated since they were never requested, Postgres will fail. - -We have worked around this issue by setting `huge_pages = off` in our newest Crunchy Postgres images. PGO will automatically turn `huge_pages` back to `try` whenever Huge Pages are requested in the resources spec. Those who were already happily using Huge Pages will be unaffected, and those who were not using Huge Pages, but were attempting to run their Postgres containers on nodes that have Huge Pages enabled, will no longer see their databases crash. - -The only dilemma that remains is that those whose PostgresClusters are not using Huge Pages, but are running on nodes that have Huge Pages enabled, will see their `shared_buffers` set to their lowest possible setting. This is due to the way that Postgres' `initdb` works when bootstrapping a database. There are few ways to work around this issue: - -1. Use Huge Pages! You're already running your Postgres containers on nodes that have Huge Pages enabled, why not use them in Postgres? -2. Create nodes in your Kubernetes cluster that don't have Huge Pages enabled, and put your Postgres containers on those nodes. -3. If for some reason you cannot use Huge Pages in Postgres, but you must run your Postgres containers on nodes that have Huge Pages enabled, you can manually set the `shared_buffers` parameter back to a good setting using dynamic configuration: - -{{% notice warning %}} -Warning: setting/changing this parameter will cause your database to restart. -{{% /notice %}} - -```yaml -patroni: - dynamicConfiguration: - postgresql: - parameters: - shared_buffers: 128MB -``` diff --git a/docs/content/guides/logical-replication.md b/docs/content/guides/logical-replication.md deleted file mode 100644 index 649db6ae7b..0000000000 --- a/docs/content/guides/logical-replication.md +++ /dev/null @@ -1,173 +0,0 @@ ---- -title: "Logical Replication" -date: -draft: false -weight: 150 ---- - -[Logical replication](https://www.postgresql.org/docs/current/logical-replication.html) is a Postgres feature that provides a convenient way for moving data between databases, particularly Postgres clusters that are in an active state. - -You can set up your PGO managed Postgres clusters to use logical replication. This guide provides an example for how to do so. - -## Set Up Logical Replication - -This example creates two separate Postgres clusters named `hippo` and `rhino`. We will logically replicate data from `rhino` to `hippo`. We can create these two Postgres clusters using the manifests below: - -``` ---- -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: hippo -spec: - image: {{< param imageCrunchyPostgres >}} - postgresVersion: {{< param postgresVersion >}} - instances: - - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - backups: - pgbackrest: - image: {{< param imageCrunchyPGBackrest >}} - repos: - - name: repo1 - volume: - volumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi ---- -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: rhino -spec: - image: {{< param imageCrunchyPostgres >}} - postgresVersion: {{< param postgresVersion >}} - instances: - - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - backups: - pgbackrest: - image: {{< param imageCrunchyPGBackrest >}} - repos: - - name: repo1 - volume: - volumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - users: - - name: logic - databases: - - zoo - options: "REPLICATION" -``` - -The key difference between the two Postgres clusters is this section in the `rhino` manifest: - -``` -users: - - name: logic - databases: - - zoo - options: "REPLICATION" -``` - -This creates a database called `zoo` and a user named `logic` with `REPLICATION` privileges. This will allow for replicating data logically to the `hippo` Postgres cluster. - -Create these two Postgres clusters. When the `rhino` cluster is ready, [log into the `zoo` database]({{< relref "tutorial/connect-cluster.md" >}}). For convenience, you can use the `kubectl exec` method of logging in: - -``` -kubectl exec -it -n postgres-operator -c database \ - $(kubectl get pods -n postgres-operator --selector='postgres-operator.crunchydata.com/cluster=rhino,postgres-operator.crunchydata.com/role=master' -o name) -- psql zoo -``` - -Let's create a simple table called `abc` that contains just integer data. We will also populate this table: - -``` -CREATE TABLE abc (id int PRIMARY KEY); -INSERT INTO abc SELECT * FROM generate_series(1,10); -``` - -We need to grant `SELECT` privileges to the `logic` user in order for it to perform an initial data synchronization during logical replication. You can do so with the following command: - -``` -GRANT SELECT ON abc TO logic; -``` - -Finally, create a [publication](https://www.postgresql.org/docs/current/logical-replication-publication.html) that allows for the replication of data from `abc`: - -``` -CREATE PUBLICATION zoo FOR ALL TABLES; -``` - -Quit out of the `rhino` Postgres cluster. - -For the next step, you will need to get the connection information for how to connection as the `logic` user to the `rhino` Postgres database. You can get the key information from the following commands, which return the hostname, username, and password: - -``` -kubectl -n postgres-operator get secrets rhino-pguser-logic -o jsonpath={.data.host} | base64 -d -kubectl -n postgres-operator get secrets rhino-pguser-logic -o jsonpath={.data.user} | base64 -d -kubectl -n postgres-operator get secrets rhino-pguser-logic -o jsonpath={.data.password} | base64 -d -``` - -The host will be something like `rhino-primary.postgres-operator.svc` and the user will be `logic`. Further down, the guide references the password as ``. You can substitute the actual password there. - -Log into the `hippo` Postgres cluster. Note that we are logging into the `postgres` database within the `hippo` cluster: - -``` -kubectl exec -it -n postgres-operator -c database \ - $(kubectl get pods -n postgres-operator --selector='postgres-operator.crunchydata.com/cluster=hippo,postgres-operator.crunchydata.com/role=master' -o name) -- psql -``` - -Create a table called `abc` that is identical to the table in the `rhino` database: - -``` -CREATE TABLE abc (id int PRIMARY KEY); -``` - -Finally, create a [subscription](https://www.postgresql.org/docs/current/logical-replication-subscription.html) that will manage the data replication from `rhino` into `hippo`: - -``` -CREATE SUBSCRIPTION zoo - CONNECTION 'host=rhino-primary.postgres-operator.svc user=logic dbname=zoo password=' - PUBLICATION zoo; -``` - -In a few moments, you should see the data replicated into your table: - -``` -TABLE abc; -``` - -which yields: - -``` - id ----- - 1 - 2 - 3 - 4 - 5 - 6 - 7 - 8 - 9 - 10 -(10 rows) -``` - -You can further test that logical replication is working by modifying the data on `rhino` in the `abc` table, and the verifying that it is replicated into `hippo`. diff --git a/docs/content/guides/major-postgres-version-upgrade.md b/docs/content/guides/major-postgres-version-upgrade.md deleted file mode 100644 index da63a1a31f..0000000000 --- a/docs/content/guides/major-postgres-version-upgrade.md +++ /dev/null @@ -1,177 +0,0 @@ ---- -title: "Postgres Major Version Upgrade" -date: -draft: false -weight: 100 ---- - -You can perform a PostgreSQL major version upgrade declaratively using PGO! The below guide will show you how you can upgrade Postgres to a newer major version. For minor updates, i.e. applying a bug fix release, you can follow the [applying software updates]({{< relref "/tutorial/update-cluster.md" >}}) guide in the [tutorial]({{< relref "/tutorial/_index.md" >}}). - -Note that major version upgrades are **permanent**: you cannot roll back a major version upgrade through declarative management at this time. If this is an issue, we recommend keeping a copy of your Postgres cluster running your previous version of Postgres. - -{{% notice warning %}} -**Please note the following prior to performing a PostgreSQL major version upgrade:** -- Any Postgres cluster being upgraded must be in a healthy state in order for the upgrade to -complete successfully. If the cluster is experiencing issues such as Pods that are not running -properly, or any other similar problems, those issues must be addressed before proceeding. -- Major PostgreSQL version upgrades of PostGIS clusters are not currently supported. -{{% /notice %}} - -## Step 1: Take a Full Backup - -Before starting your major upgrade, you should take a new full [backup]({{< relref "tutorial/backup-management.md" >}}) of your data. This adds another layer of protection in cases where the upgrade process does not complete as expected. - -At this point, your running cluster is ready for the major upgrade. - -## Step 2: Configure the Upgrade Parameters through a PGUpgrade object - -The next step is to create a `PGUpgrade` resource. This is the resource that tells the PGO-Upgrade controller which cluster to upgrade, what version to upgrade from, and what version to upgrade to. There are other optional fields to fill in as well, such as `Resources` and `Tolerations`; to learn more about these optional fields, check out the [Upgrade CRD API]({{< relref "references/crd.md" >}}). - -For instance, if you have a Postgres cluster named `hippo` running PG {{< param fromPostgresVersion >}} but want to upgrade it to PG {{< param postgresVersion >}}, the corresponding `PGUpgrade` manifest would look like this: - -```yaml -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PGUpgrade -metadata: - name: hippo-upgrade -spec: - image: {{< param imageCrunchyPGUpgrade >}} - postgresClusterName: hippo - fromPostgresVersion: {{< param fromPostgresVersion >}} - toPostgresVersion: {{< param postgresVersion >}} -``` - -The `postgresClusterName` gives the name of the target Postgres cluster to upgrade and `toPostgresVersion` gives the version to update to. It may seem unnecessary to include the `fromPostgresVersion`, but that is one of the safety checks we have built into the upgrade process: in order to successfully upgrade a Postgres cluster, you have to know what version you mean to be upgrading from. - -One very important thing to note: upgrade objects should be made in the same namespace as the Postgres cluster that you mean to upgrade. For security, the PGO-Upgrade controller does not allow for cross-namespace processes. - -If you look at the status of the `PGUpgrade` object at this point, you should see a condition saying this: - -``` -type: "progressing", -status: "false", -reason: "PGClusterNotShutdown", -message: "PostgresCluster instances still running", -``` - -What that means is that the upgrade process is blocked because the cluster is not yet shutdown. We are stuck ("progressing" is false) until we shutdown the cluster. So let's go ahead and do that now. - -## Step 3: Shutdown and Annotate the Cluster - -In order to kick off the upgrade process, you need to shutdown the cluster and add an annotation to the cluster signalling which PGUpgrade to run. - -Why do we need to add an annotation to the cluster if the PGUpgrade already has the cluster's name? This is another security mechanism--think of it as a two-key nuclear system: the `PGUpgrade` has to know which Postgres cluster to upgrade; and the Postgres cluster has to allow this upgrade to work on it. - -The annotation to add is `postgres-operator.crunchydata.com/allow-upgrade`, with the name of the `PGUpgrade` object as the value. So for our example above with a Postgres cluster named `hippo` and a `PGUpgrade` object named `hippo-upgrade`, we could annotate the cluster with the command - -```bash -kubectl -n postgres-operator annotate postgrescluster hippo postgres-operator.crunchydata.com/allow-upgrade="hippo-upgrade" -``` - -To shutdown the cluster, edit the `spec.shutdown` field to true and reapply the spec with `kubectl`. For example, if you used the [tutorial]({{< relref "tutorial/_index.md" >}}) to [create your Postgres cluster]({{< relref "tutorial/create-cluster.md" >}}), you would run the following command: - -``` -kubectl -n postgres-operator apply -k kustomize/postgres -``` - -(Note: you could also change the annotation at the same time as you shutdown the cluster; the purpose of demonstrating how to annotate was primarily to show what the label would look like.) - -## Step 4: Watch and wait - -When the last Postgres Pod is terminated, the PGO-Upgrade process will kick into action, upgrading the primary database and preparing the replicas. If you are watching the namespace, you will see the PGUpgrade controller start Pods for each of those actions. But you don't have to watch the namespace to keep track of the upgrade process. - -To keep track of the process and see when it finishes, you can look at the `status.conditions` field of the `PGUpgrade` object. If the upgrade process encounters any blockers preventing it from finishing, the `status.conditions` field will report on those blockers. When it finishes upgrading the cluster, it will show the status conditions: - -``` -type: "Progressing" -status: "false" -reason: "PGUpgradeCompleted" - -type: "Succeeded" -status: "true" -reason: "PGUpgradeSucceeded" -``` - -You can also check the Postgres cluster itself to see when the upgrade has completed. When the upgrade is complete, the cluster will show the new version in its `status.postgresVersion` field. - -If the process encounters any errors, the upgrade process will stop to prevent further data loss; and the `PGUpgrade` object will report the failure in its status. For more specifics about the failure, you can check the logs of the individual Pods that were doing the upgrade jobs. - -## Step 5: Restart your Postgres cluster with the new version - -Once the upgrade process is complete, you can erase the `PGUpgrade` object, which will clean up any Jobs and Pods that were created during the upgrade. But as long as the process completed successfully, that `PGUpgrade` object will remain inert. If you find yourself needing to upgrade the cluster again, you will not be able to edit the existing `PGUpgrade` object with the new versions, but will have to create a new `PGUpgrade` object. Again, this is a safety mechanism to make sure that any PGUpgrade can only be run once. - -Likewise, you may remove the annotation on the Postgres cluster as part of the cleanup. While not necessary, it is recommended to leave your cluster without unnecessary annotations. - -To restart your newly upgraded Postgres cluster, you will have to update the `spec.postgresVersion` to the new version. You may also have to update the `spec.image` value to reflect the image you plan to use if that field is already filled in. Turn `spec.shutdown` to false, and PGO will restart your cluster: - -``` -spec: - shutdown: false - image: {{< param imageCrunchyPostgres >}} - postgresVersion: {{< param postgresVersion >}} -``` - -{{% notice warning %}} -Setting and applying the `postgresVersion` or `image` values before the upgrade will result in the upgrade process being rejected. -{{% /notice %}} - -## Step 6: Complete the Post-Upgrade Tasks - -After the upgrade Job has completed, there will be some amount of post-upgrade processing that -needs to be done. During the upgrade process, the upgrade Job, via [`pg_upgrade`](https://www.postgresql.org/docs/current/pgupgrade.html), will issue warnings and possibly create scripts to perform post-upgrade tasks. You can see the full output of the upgrade Job by running a command similar to this: - -``` -kubectl -n postgres-operator logs hippo-pgupgrade-abcd -``` - -While the scripts are placed on the Postgres data PVC, you may not have access to them. The below information describes what each script does and how you can execute them. - -In Postgres 13 and older, `pg_upgrade` creates a script called `analyze_new_cluster.sh` to perform a post-upgrade analyze using [`vacuumdb`](https://www.postgresql.org/docs/current/app-vacuumdb.html) on the database. - -The script provides two ways of doing so: - -``` -vacuumdb --all --analyze-in-stages -``` - -or - -``` -vacuumdb --all --analyze-only -``` - -Note that these commands need to be run as a Postgres superuser (e.g. `postgres`). For more information on the difference between the options, please see the documentation for [`vacuumdb`](https://www.postgresql.org/docs/current/app-vacuumdb.html). - -If you are unable to exec into the Pod, you can run `ANALYZE` directly on each of your databases. - -`pg_upgrade` may also create a script called `delete_old_cluster.sh`, which contains the equivalent of - -``` -rm -rf '/pgdata/pg{{< param fromPostgresVersion >}}' -``` - -When you are satisfied with the upgrade, you can execute this command to remove the old data directory. Do so at your discretion. - -Note that the `delete_old_cluster.sh` script does not delete the old WAL files. These are typically found in `/pgdata/pg{{< param fromPostgresVersion >}}_wal`, although they can be stored elsewhere. If you would like to delete these files, this must be done manually. - -If you have extensions installed you may need to upgrade those as well. For example, for the `pgaudit` extension we recommend running the following to upgrade: - -```sql -DROP EXTENSION pgaudit; -CREATE EXTENSION pgaudit; -``` - -`pg_upgrade` may also create a file called `update_extensions.sql` to facilitate extension upgrades. Be aware some of the recommended ways to upgrade may be outdated. - -Please carefully review the `update_extensions.sql` file before you run it, and if you want to upgrade `pgaudit` via this file, update the file with the above commands for `pgaudit` prior to execution. We recommend verifying all extension updates from this file with the appropriate extension documentation and their recommendation for upgrading the extension prior to execution. After you update the file, you can execute this script using `kubectl exec`, e.g. - -``` -$ kubectl -n postgres-operator exec -it -c database \ - $(kubectl -n postgres-operator get pods --selector='postgres-operator.crunchydata.com/cluster=hippo,postgres-operator.crunchydata.com/role=master' -o name) -- psql -f /pgdata/update_extensions.sql -``` - -If you cannot exec into your Pod, you can also manually run these commands as a Postgres superuser. - -Ensure the execution of this and any other SQL scripts completes successfully, otherwise your data may be unavailable. - -Once this is done, your major upgrade is complete! Enjoy using your newer version of Postgres! diff --git a/docs/content/guides/private-registries.md b/docs/content/guides/private-registries.md deleted file mode 100644 index 54f8bb481c..0000000000 --- a/docs/content/guides/private-registries.md +++ /dev/null @@ -1,144 +0,0 @@ ---- -title: "Private Registries" -date: -draft: false -weight: 200 ---- - -PGO, the open source Postgres Operator, can use containers that are stored in private registries. -There are a variety of techniques that are used to load containers from private registries, -including [image pull secrets](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/). -This guide will demonstrate how to install PGO and deploy a Postgres cluster using the -[Crunchy Data Customer Portal](https://access.crunchydata.com/) registry as an example. - -## Create an Image Pull Secret - -The Kubernetes documentation provides several methods for creating -[image pull secrets](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/). -You can choose the method that is most appropriate for your installation. You will need to create -image pull secrets in the namespace that PGO is deployed and in each namespace where you plan to -deploy Postgres clusters. - -For example, to create an image pull secret for accessing the Crunchy Data Customer Portal image -registry in the `postgres-operator` namespace, you can execute the following commands: - -```shell -kubectl create ns postgres-operator - -kubectl create secret docker-registry crunchy-regcred -n postgres-operator \ - --docker-server=registry.crunchydata.com \ - --docker-username= \ - --docker-email= \ - --docker-password= -``` - -This creates an image pull secret named `crunchy-regcred` in the `postgres-operator` namespace. - -## Install PGO from a Private Registry - -To [install PGO]({{< relref "installation/_index.md" >}}) from a private registry, you will need to -set an image pull secret on the installation manifest. - -For example, to set up an image pull secret using the [Kustomize install method]({{< relref "installation/_index.md" >}}) -to install PGO from the [Crunchy Data Customer Portal](https://access.crunchydata.com/), you can set -the following in the `kustomize/install/default/kustomization.yaml` manifest: - -```yaml -images: -- name: postgres-operator - newName: {{< param operatorRepositoryPrivate >}} - newTag: {{< param postgresOperatorTag >}} - -patchesJson6902: - - target: - group: apps - version: v1 - kind: Deployment - name: pgo - patch: |- - - op: remove - path: /spec/selector/matchLabels/app.kubernetes.io~1name - - op: remove - path: /spec/selector/matchLabels/app.kubernetes.io~1version - - op: add - path: /spec/template/spec/imagePullSecrets - value: - - name: crunchy-regcred -``` - -If you are using a version of `kubectl` prior to `v1.21.0`, you will have to create an explicit -patch file named `install-ops.yaml`: - -```yaml -- op: remove - path: /spec/selector/matchLabels/app.kubernetes.io~1name -- op: remove - path: /spec/selector/matchLabels/app.kubernetes.io~1version -- op: add - path: /spec/template/spec/imagePullSecrets - value: - - name: crunchy-regcred -``` - -and modify the manifest to be the following: - -```yaml -images: -- name: postgres-operator - newName: {{< param operatorRepositoryPrivate >}} - newTag: {{< param postgresOperatorTag >}} - -patchesJson6902: - - target: - group: apps - version: v1 - kind: Deployment - name: pgo - path: install-ops.yaml -``` - -You can then install PGO from the private registry using the standard installation procedure, e.g.: - -```shell -kubectl apply --server-side -k kustomize/install/default -``` - -## Deploy a Postgres cluster from a Private Registry - -To deploy a Postgres cluster using images from a private registry, you will need to set the value of -`spec.imagePullSecrets` on a `PostgresCluster` custom resource. - -For example, to deploy a Postgres cluster using images from the [Crunchy Data Customer Portal](https://access.crunchydata.com/) -with an image pull secret in the `postgres-operator` namespace, you can use the following manifest: - -```yaml -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: hippo -spec: - imagePullSecrets: - - name: crunchy-regcred - image: {{< param imageCrunchyPostgresPrivate >}} - postgresVersion: {{< param postgresVersion >}} - instances: - - name: instance1 - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - backups: - pgbackrest: - image: {{< param imageCrunchyPGBackrestPrivate >}} - repos: - - name: repo1 - volume: - volumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi -``` diff --git a/docs/content/guides/storage-retention.md b/docs/content/guides/storage-retention.md deleted file mode 100644 index 12c5782693..0000000000 --- a/docs/content/guides/storage-retention.md +++ /dev/null @@ -1,230 +0,0 @@ ---- -title: "Storage Retention" -date: -draft: false -weight: 125 ---- - -PGO uses [persistent volumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) to store Postgres data and, based on your configuration, data for backups, archives, etc. There are cases where you may want to retain your volumes for [later use]({{< relref "./data-migration.md" >}}). - -The below guide shows how to configure your persistent volumes (PVs) to remain after a Postgres cluster managed by PGO is deleted and to deploy the retained PVs to a new Postgres cluster. - -For the purposes of this exercise, we will use a Postgres cluster named `hippo`. - -## Modify Persistent Volume Retention - -Retention of persistent volumes is set using a [reclaim policy](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#reclaiming). By default, more persistent volumes have a policy of `Delete`, which removes any data on a persistent volume once there are no more persistent volume claims (PVCs) associated with it. - -To retain a persistent volume you will need to set the reclaim policy to `Retain`. Note that persistent volumes are cluster-wide objects, so you will need to appropriate permissions to be able to modify a persistent volume. - -To retain the persistent volume associated with your Postgres database, you must first determine which persistent volume is associated with the persistent volume claim for your database. First, local the persistent volume claim. For example, with the `hippo` cluster, you can do so with the following command: - -``` -kubectl get pvc -n postgres-operator --selector=postgres-operator.crunchydata.com/cluster=hippo,postgres-operator.crunchydata.com/data=postgres -``` - -This will yield something similar to the below, which are the PVCs associated with any Postgres instance: - -``` -NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE -hippo-instance1-x9vq-pgdata Bound pvc-aef7ee64-4495-4813-b896-8a67edc53e58 1Gi RWO standard 6m53s -``` - -The `VOLUME` column contains the name of the persistent volume. You can inspect it using `kubectl get pv`, e.g.: - -``` -kubectl get pv pvc-aef7ee64-4495-4813-b896-8a67edc53e58 -``` - -which should yield: - -``` -NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE -pvc-aef7ee64-4495-4813-b896-8a67edc53e58 1Gi RWO Delete Bound postgres-operator/hippo-instance1-x9vq-pgdata standard 8m10s -``` - -To modify the reclaim policy set it to `Retain`, you can run a command similar to this: - -``` -kubectl patch pv pvc-aef7ee64-4495-4813-b896-8a67edc53e58 -p '{"spec":{"persistentVolumeReclaimPolicy":"Retain"}}' -``` - -Verify that the change occurred: - -``` -kubectl get pv pvc-aef7ee64-4495-4813-b896-8a67edc53e58 -``` - -should show that `Retain` is set in the `RECLAIM POLICY` column: - -``` -NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE -pvc-aef7ee64-4495-4813-b896-8a67edc53e58 1Gi RWO Retain Bound postgres-operator/hippo-instance1-x9vq-pgdata standard 9m53s -``` - -## Delete Postgres Cluster, Retain Volume - -{{% notice warning %}} -**This is a potentially destructive action**. Please be sure that your volume retention is set correctly and/or you have backups in place to restore your data. -{{% / notice %}} - -[Delete your Postgres cluster]({{< relref "tutorial/delete-cluster.md" >}}). You can delete it using the manifest or with a command similar to: - -``` -kubectl -n postgres-operator delete postgrescluster hippo -``` - -Wait for the Postgres cluster to finish deleting. You should then verify that the persistent volume is still there: - -``` -kubectl get pv pvc-aef7ee64-4495-4813-b896-8a67edc53e58 -``` - -should yield: - -``` -NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE -pvc-aef7ee64-4495-4813-b896-8a67edc53e58 1Gi RWO Retain Released postgres-operator/hippo-instance1-x9vq-pgdata standard 21m -``` - -## Create Postgres Cluster With Retained Volume - -You can now create a new Postgres cluster with the retained volume. First, to aid the process, you will want to provide a label that is unique for your persistent volumes so we can identify it in the manifest. For example: - -``` -kubectl label pv pvc-aef7ee64-4495-4813-b896-8a67edc53e58 pgo-postgres-cluster=postgres-operator-hippo -``` - -(This label uses the format `-`). - -Next, you will need to reference this persistent volume in your Postgres cluster manifest. For example: - -```yaml -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: hippo -spec: - image: {{< param imageCrunchyPostgres >}} - postgresVersion: {{< param postgresVersion >}} - instances: - - name: instance1 - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - selector: - matchLabels: - pgo-postgres-cluster: postgres-operator-hippo - backups: - pgbackrest: - image: {{< param imageCrunchyPGBackrest >}} - repos: - - name: repo1 - volume: - volumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi -``` - -Wait for the Pods to come up. You may see the Postgres Pod is in a `Pending` state. You will need to go in and clear the claim on the persistent volume that you want to use for this Postgres cluster, e.g.: - -``` -kubectl patch pv pvc-aef7ee64-4495-4813-b896-8a67edc53e58 -p '{"spec":{"claimRef": null}}' -``` - -After that, your Postgres cluster will come up and will be using the previously used persistent volume! - -If you ultimately want the volume to be deleted, you will need to revert the reclaim policy to `Delete`, e.g.: - -``` -kubectl patch pv pvc-aef7ee64-4495-4813-b896-8a67edc53e58 -p '{"spec":{"persistentVolumeReclaimPolicy":"Delete"}}' -``` - -After doing that, the next time you delete your Postgres cluster, the volume and your data will be deleted. - -### Additional Notes on Storage Retention - -Systems using "hostpath" storage or a storage class that does not support label selectors may not be able to use the label selector method for using a retained volume volume. You would have to specify the `volumeName` directly, e.g.: - -```yaml -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: hippo -spec: - image: {{< param imageCrunchyPostgres >}} - postgresVersion: {{< param postgresVersion >}} - instances: - - name: instance1 - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - volumeName: "pvc-aef7ee64-4495-4813-b896-8a67edc53e58" - backups: - pgbackrest: - image: {{< param imageCrunchyPGBackrest >}} - repos: - - name: repo1 - volume: - volumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi -``` - -Additionally, to add additional replicas to your Postgres cluster, you will have to make changes to your spec. You can do one of the following: - -1. Remove the volume-specific configuration from the volume claim spec (e.g. delete `spec.instances.selector` or `spec.instances.volumeName`) - -2. Add a new instance set specifically for your replicas, e.g.: - -```yaml -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: hippo -spec: - image: {{< param imageCrunchyPostgres >}} - postgresVersion: {{< param postgresVersion >}} - instances: - - name: instance1 - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - selector: - matchLabels: - pgo-postgres-cluster: postgres-operator-hippo - - name: instance2 - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - backups: - pgbackrest: - image: {{< param imageCrunchyPGBackrest >}} - repos: - - name: repo1 - volume: - volumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi -``` diff --git a/docs/content/guides/tablespaces.md b/docs/content/guides/tablespaces.md deleted file mode 100644 index 0bfd8ff2d8..0000000000 --- a/docs/content/guides/tablespaces.md +++ /dev/null @@ -1,311 +0,0 @@ ---- -title: "Tablespaces in PGO" -date: -draft: false -weight: 160 ---- - -{{% notice warning %}} -PGO tablespaces currently requires enabling the `TablespaceVolumes` feature gate -and may interfere with other features. (See below for more details.) -{{% /notice %}} - -A [Tablespace](https://www.postgresql.org/docs/current/manage-ag-tablespaces.html) -is a Postgres feature that is used to store data on a different volume than the -primary data directory. While most workloads do not require tablespaces, they can -be helpful for larger data sets or utilizing particular hardware to optimize -performance on a particular Postgres object (a table, index, etc.). Some examples -of use cases for tablespaces include: - -- Partitioning larger data sets across different volumes -- Putting data onto archival systems -- Utilizing faster/more performant hardware (or a storage class) for a particular database -- Storing sensitive data on a volume that supports transparent data-encryption (TDE) - -and others. - -In order to use Postgres tablespaces properly in a highly-available, -distributed system, there are several considerations to ensure proper operations: - -- Each tablespace must have its own volume; this means that every tablespace for -every replica in a system must have its own volume; -- The available filesystem paths must be consistent on each Postgres pod in a Postgres cluster; -- The backup & disaster recovery management system must be able to safely backup -and restore data to tablespaces. - -Additionally, a tablespace is a critical piece of a Postgres instance: if -Postgres expects a tablespace to exist and the tablespace volume is unavailable, -this could trigger a downtime scenario. - -While there are certain challenges with creating a Postgres cluster with -high-availability along with tablespaces in a Kubernetes-based environment, the -Postgres Operator adds many conveniences to make it easier to use tablespaces. - -## Enabling TablespaceVolumes in PGO v5 - -In PGO v5, tablespace support is currently feature-gated. If you want to use this -experimental feature, you will need to enable the feature via the PGO `TablespaceVolumes` -[feature gate](https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/). - -PGO feature gates are enabled by setting the `PGO_FEATURE_GATES` environment -variable on the PGO Deployment. To enable tablespaces, you would want to set - -``` -PGO_FEATURE_GATES="TablespaceVolumes=true" -``` - -Please note that it is possible to enable more than one feature at a time as -this variable accepts a comma delimited list. For example, to enable multiple features, -you would set `PGO_FEATURE_GATES` like so: - -``` -PGO_FEATURE_GATES="FeatureName=true,FeatureName2=true,FeatureName3=true..." -``` - -## Adding TablespaceVolumes to a postgrescluster in PGO v5 - -Once you have enabled `TablespaceVolumes` on your PGO deployment, you can add volumes to -a new or existing cluster by adding volumes to the `spec.instances.tablespaceVolumes` field. - -A `TablespaceVolume` object has two fields: a name (which is required and used to set the path) -and a `dataVolumeClaimSpec`, which describes the storage that your Postgres instance will use -for this volume. This field behaves identically to the `dataVolumeClaimSpec` in the `instances` -list. For example, you could use the following to create a `postgrescluster`: - -```yaml -spec: - instances: - - name: instance1 - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - tablespaceVolumes: - - name: user - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi -``` - -In this case, the `postgrescluster` will have 1Gi for the database volume and 1Gi for the tablespace -volume, and both will be provisioned by PGO. - -But if you were attempting to migrate data from one `postgrescluster` to another, you could re-use -pre-existing volumes by passing in some label selector or the `volumeName` into the -`tablespaceVolumes.dataVolumeClaimSpec` the same way you would pass that information into the -`instances.dataVolumeClaimSpec` field: - -```yaml -spec: - instances: - - name: instance1 - dataVolumeClaimSpec: - volumeName: pvc-1001c17d-c137-4f78-8505-be4b26136924 # A preexisting volume you want to reuse for PGDATA - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - tablespaceVolumes: - - name: user - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - volumeName: pvc-3fea1531-617a-4fff-9032-6487206ce644 # A preexisting volume you want to use for this tablespace -``` - -Note: the `name` of the `tablespaceVolume` needs to be - -* unique in the instance since that name becomes part of the mount path for that volume; -* valid as part of a path name, label, and part of a volume name. - -There is validation on the CRD for these requirements. - -Once you request those `tablespaceVolumes`, PGO takes care of creating (or reusing) those volumes, -including mounting them to the pod at a known path (`/tablespaces/NAME`) and adding them to the -necessary containers. - -### How to use Postgres Tablespaces in PGO v5 - -After PGO has mounted the volumes at the requested locations, the startup container makes sure -that those locations have the appropriate owner and permissions. This behavior mimics the startup -behavior behind the `PGDATA` directory, so that when you connect to your cluster, you should be -able to start using those tablespaces. - -In order to use those tablespaces in Postgres, you will first need to create the tablespace, -including the location. As noted above, PGO mounts the requested volumes at `/tablespaces/NAME`. -So if you request tablespaces with the names `books` and `authors`, the two volumes will be -mounted at `/tablespaces/books` and `/tablespaces/authors`. - -However, in order to make sure that the directory has the appropriate ownership so that Postgres -can use it, we create a subdirectory called `data` in each volume. - -To create a tablespace in Postgres, you will issue a command of the form - -``` -CREATE TABLESPACE name LOCATION '/path/to/dir'; -``` - -So to create a tablespace called `books` in the new `books` volume, your command might look like - -``` -CREATE TABLESPACE books LOCATION '/tablespaces/books/data'; -``` - -To break that path down: `tablespaces` is the mount point for all tablespace volumes; `books` -is the name of the volume in the spec; and `data` is a directory created with the appropriate -ownership by the startup script. - -Once you have - -* enabled the `TablespaceVolumes` feature gate, -* added `tablespaceVolumes` to your cluster spec, -* and created the tablespace in Postgres, - -then you are ready to use tablespaces in your cluster. For example, if you wanted to create a -table called `books` on the `books` tablespace, you could execute the following SQL: - -```sql -CREATE TABLE books ( - book_id VARCHAR2(20), - title VARCHAR2(50) - author_last_name VARCHAR2(30) -) -TABLESPACE books; -``` - -## Considerations - -### Only one pod per volume - -As stated above, it is important to ensure that every tablespace has its own volume -(i.e. its own [persistent volume claim](https://kubernetes.io/docs/concepts/storage/persistent-volumes/)). -This is especially true for any replicas in a cluster: you don't want multiple Postgres instances -writing to the same volume. - -So if you have a single named volume in your spec (for either the main PGDATA directory or -for tablespaces), you should not raise the `spec.instances.replicas` field above 1, because if you -did, multiple pods would try to use the same volume. - -### Too-long names? - -Different Kubernetes objects have different limits about the length of their names. For example, -services follow the DNS label conventions: 63 characters or less, lowercase, and alphanumeric with -hyphens U+002D allowed in between. - -Occasionally some PGO-managed objects will go over the limit set for that object type because of -the user-set cluster or instance name. - -We do not anticipate this being a problem with the `PersistentVolumeClaim` created for a tablespace. -The name for a `PersistentVolumeClaim` created by PGO for a tablespace will potentially be long since -the name is a combination of the cluster, the instance, the tablespace, and the `-tablespace` suffix. -However, a `PersistentVolumeClaim` name can be up to 253 characters in length. - -### Same tablespace volume names across replicas - -We want to make sure that every pod has a consistent filesystem because Postgres expects -the same path on each replica. - -For instance, imagine on your primary Postgres, you add a tablespace with the location -`/tablespaces/kafka/data`. If you have a replica attached to that primary, it will likewise -try to add a tablespace at the location `/tablespaces/kafka/data`; and if that location doesn't -exist on the replica's filesystem, Postgres will rightly complain. - -Therefore, if you expand your `postgrescluster` with multiple instances, you will need to make -sure that the multiple instances have `tablespaceVolumes` with the *same names*, like so: - -```yaml -spec: - instances: - - name: instance1 - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - tablespaceVolumes: - - name: user - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - - name: instance2 - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - tablespaceVolumes: - - name: user - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi -``` - -### Tablespace backups - -PGO uses `pgBackRest` as our backup solution, and `pgBackRest` is built to work with tablespaces -natively. That is, `pgBackRest` should back up the entire database, including tablespaces, without -any additional work on your part. - -**Note**: `pgBackRest` does not itself use tablespaces, so all the backups will go to a single volume. -One of the primary uses of tablespaces is to relieve disk pressure by separating the database among -multiple volumes, but if you are running out of room on your `pgBackRest` persistent volume, -tablespaces will not help, and you should first solve your backup space problem. - -### Adding tablespaces to existing clusters - -As with other changes made to the definition of a Postgres pod, adding `tablespaceVolumes` to an -existing cluster may cause downtime. The act of mounting a new PVC to a Kubernetes Deployment -causes the Pods in the deployment to restart. - -### Restoring from a cluster with tablespaces - -This functionality has not been fully tested. Enjoy! - -### Removing tablespaces - -Removing a tablespace is a nontrivial operation. Postgres does not provide a -`DROP TABLESPACE .. CASCADE` command that would drop any associated objects with a tablespace. -Additionally, the Postgres documentation covering the -[`DROP TABLESPACE`](https://www.postgresql.org/docs/current/sql-droptablespace.html) -command goes on to note: - -> A tablespace can only be dropped by its owner or a superuser. The tablespace -> must be empty of all database objects before it can be dropped. It is possible -> that objects in other databases might still reside in the tablespace even if -> no objects in the current database are using the tablespace. Also, if the -> tablespace is listed in the temp_tablespaces setting of any active session, -> the DROP might fail due to temporary files residing in the tablespace. - -Because of this, and to avoid a situation where a Postgres cluster is left in an inconsistent -state due to trying to remove a tablespace, PGO does not provide any means to remove tablespaces -automatically. If you need to remove a tablespace from a Postgres deployment, we recommend -following this procedure: - -1. As a database administrator: - 1. Log into the primary instance of your cluster. - 1. Drop any objects (tables, indexes, etc) that reside within the tablespace you wish to delete. - 1. Delete this tablespace from the Postgres cluster using the `DROP TABLESPACE` command. -1. As a Kubernetes user who can modify `postgrescluster` specs - 1. Remove the `tablespaceVolumes` entries for the tablespaces you wish to remove. - -## More Information - -For more information on how tablespaces work in Postgres please refer to the -[Postgres manual](https://www.postgresql.org/docs/current/manage-ag-tablespaces.html). \ No newline at end of file diff --git a/docs/content/installation/_index.md b/docs/content/installation/_index.md deleted file mode 100644 index add5679273..0000000000 --- a/docs/content/installation/_index.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: "Installation" -date: -draft: false -weight: 30 ---- - -This section provides detailed instructions for anything and everything related to installing PGO -in your Kubernetes environment. This includes instructions for installing PGO according to a -variety of supported installation methods, along with information for customizing the installation -of PGO according your specific needs. - -Additionally, instructions are provided for installing and configuring [PGO Monitoring]({{< relref "./monitoring" >}}). - -## Installing PGO - -- [PGO Kustomize Install]({{< relref "./kustomize.md" >}}) -- [PGO Helm Install]({{< relref "./helm.md" >}}) - -## Installing PGO Monitoring - -- [PGO Monitoring Kustomize Install]({{< relref "./monitoring/kustomize.md" >}}) diff --git a/docs/content/installation/helm.md b/docs/content/installation/helm.md deleted file mode 100644 index 32781466d2..0000000000 --- a/docs/content/installation/helm.md +++ /dev/null @@ -1,156 +0,0 @@ ---- -title: "Helm" -date: -draft: false -weight: 20 ---- - -# Installing PGO Using Helm - -This section provides instructions for installing and configuring PGO using Helm. - -There are two sources for the PGO Helm chart: -* the Postgres Operator examples repo; -* the Helm chart hosted on the Crunchy container registry, which supports direct Helm installs. - -# The Postgres Operator Examples repo - -## Prerequisites - -First, go to GitHub and [fork the Postgres Operator examples](https://github.com/CrunchyData/postgres-operator-examples/fork) -repository, which contains the PGO Helm installer. - -[https://github.com/CrunchyData/postgres-operator-examples/fork](https://github.com/CrunchyData/postgres-operator-examples/fork) - -Once you have forked this repo, you can download it to your working environment with a command -similar to this: - -``` -YOUR_GITHUB_UN="" -git clone --depth 1 "git@github.com:${YOUR_GITHUB_UN}/postgres-operator-examples.git" -cd postgres-operator-examples -``` - -The PGO Helm chart is located in the `helm/install` directory of this repository. - -## Configuration - -The `values.yaml` file for the Helm chart contains all of the available configuration settings for -PGO. The default `values.yaml` settings should work in most Kubernetes environments, but it may -require some customization depending on your specific environment and needs. - -For instance, it might be necessary to customize the image tags that are utilized using the -`controllerImages` setting: - -```yaml -controllerImages: - cluster: {{< param operatorRepository >}}:{{< param postgresOperatorTag >}} -``` - -Please note that the `values.yaml` file is located in `helm/install`. - -### Logging - -By default, PGO deploys with debug logging turned on. If you wish to disable this, you need to set the `debug` attribute in the `values.yaml` to false, e.g.: - -```yaml -debug: false -``` - -### Installation Mode - -When PGO is installed, it can be configured to manage PostgreSQL clusters in all namespaces within -the Kubernetes cluster, or just those within a single namespace. When managing PostgreSQL -clusters in all namespaces, a ClusterRole and ClusterRoleBinding is created to ensure PGO has -the permissions it requires to properly manage PostgreSQL clusters across all namespaces. However, -when PGO is configured to manage PostgreSQL clusters within a single namespace only, a Role and -RoleBinding is created instead. - -In order to select between these two modes when installing PGO using Helm, the `singleNamespace` -setting in the `values.yaml` file can be utilized: - -```yaml -singleNamespace: false -``` - -Specifically, if this setting is set to `false` (which is the default), then a ClusterRole and -ClusterRoleBinding will be created, and PGO will manage PostgreSQL clusters in all namespaces. -However, if this setting is set to `true`, then a Role and RoleBinding will be created instead, -allowing PGO to only manage PostgreSQL clusters in the same namespace utilized when installing -the PGO Helm chart. - -## Install - -Once you have configured the Helm chart according to your specific needs, it can then be installed -using `helm`: - -```shell -helm install -n helm/install -``` - -### Automated Upgrade Checks - -By default, PGO will automatically check for updates to itself and software components by making a request to a URL. If PGO detects there are updates available, it will print them in the logs. As part of the check, PGO will send aggregated, anonymized information about the current deployment to the endpoint. An upcoming release will allow for PGO to opt-in to receive and apply updates to software components automatically. - -PGO will check for updates upon startup and once every 24 hours. Any errors in checking will have no impact on PGO's operation. To disable the upgrade check, you can set the `disable_check_for_upgrades` value in the Helm chart to `true`. - -For more information about collected data, see the Crunchy Data [collection notice](https://www.crunchydata.com/developers/data-collection-notice). - -## Uninstall - -To uninstall PGO, remove all your PostgresCluster objects, then use the `helm uninstall` command: - -```shell -helm uninstall -n -``` - -Helm [leaves the CRDs][helm-crd-limits] in place. You can remove them with `kubectl delete`: - -```shell -kubectl delete -f helm/install/crds -``` - -# The Crunchy Container Registry - -## Installing directly from the registry - -Crunchy Data hosts an OCI registry that `helm` can use directly. -(Not all `helm` commands support OCI registries. For more information on -which commands can be used, see [the Helm documentation](https://helm.sh/docs/topics/registries/).) - -You can install PGO directly from the registry using the `helm install` command: - -``` -helm install pgo {{< param operatorHelmRepository >}} -``` - -Or to see what values are set in the default `values.yaml` before installing, you could run a -`helm show` command just as you would with any other registry: - -``` -helm show values {{< param operatorHelmRepository >}} -``` - -## Downloading from the registry - -Rather than deploying directly from the Crunchy registry, you can instead use the registry as the -source for the Helm chart. - -To do so, download the Helm chart from the Crunchy Container Registry: - -``` -# To pull down the most recent Helm chart -helm pull {{< param operatorHelmRepository >}} - -# To pull down a specific Helm chart -helm pull {{< param operatorHelmRepository >}} --version {{< param operatorVersion >}} -``` - -Once the Helm chart has been downloaded, uncompress the bundle - -``` -tar -xvf pgo-{{< param operatorVersion >}}.tgz -``` - -And from there, you can follow the instructions above on setting the [Configuration](#configuration) -and installing a local Helm chart. diff --git a/docs/content/installation/kustomize.md b/docs/content/installation/kustomize.md deleted file mode 100644 index 7c601e3060..0000000000 --- a/docs/content/installation/kustomize.md +++ /dev/null @@ -1,161 +0,0 @@ ---- -title: "Kustomize" -date: -draft: false -weight: 10 ---- - -# Installing PGO Using Kustomize - -This section provides instructions for installing and configuring PGO using Kustomize. - -If you are deploying using the installer from the [Crunchy Data Customer Portal](https://access.crunchydata.com/), please refer to the guide there for alternative setup information. - -## Prerequisites - -First, go to GitHub and [fork the Postgres Operator examples](https://github.com/CrunchyData/postgres-operator-examples/fork) -repository, which contains the PGO Kustomize installer. - -[https://github.com/CrunchyData/postgres-operator-examples/fork](https://github.com/CrunchyData/postgres-operator-examples/fork) - -Once you have forked this repo, you can download it to your working environment with a command -similar to this: - -``` -YOUR_GITHUB_UN="" -git clone --depth 1 "git@github.com:${YOUR_GITHUB_UN}/postgres-operator-examples.git" -cd postgres-operator-examples -``` - -The PGO installation project is located in the `kustomize/install` directory. - -## Configuration - -While the default Kustomize install should work in most Kubernetes environments, it may be -necessary to further customize the Kustomize project(s) according to your specific needs. - -For instance, to customize the image tags utilized for the PGO Deployment, the `images` setting -in the `kustomize/install/default/kustomization.yaml` file can be modified: - -```yaml -images: -- name: postgres-operator - newName: {{< param operatorRepository >}} - newTag: {{< param postgresOperatorTag >}} -``` - -If you are deploying using the images from the [Crunchy Data Customer Portal](https://access.crunchydata.com/), please refer to the [private registries]({{< relref "guides/private-registries.md" >}}) guide for additional setup information. - -Please note that the Kustomize install project will also create a namespace for PGO -by default (though it is possible to install without creating the namespace, as shown below). To -modify the name of namespace created by the installer, the `kustomize/install/namespace/namespace.yaml` -should be modified: - -```yaml -apiVersion: v1 -kind: Namespace -metadata: - name: custom-namespace -``` - -The `namespace` setting in `kustomize/install/default/kustomization.yaml` should be -modified accordingly. - -```yaml -namespace: custom-namespace -``` - -By default, PGO deploys with debug logging turned on. If you wish to disable this, you need to set the `CRUNCHY_DEBUG` environmental variable to `"false"` that is found in the `kustomize/install/manager/manager.yaml` file. Alternatively, you can add the following to your `kustomize/install/manager/kustomization.yaml` to disable debug logging: - -```yaml -patchesStrategicMerge: -- |- - apiVersion: apps/v1 - kind: Deployment - metadata: - name: pgo - spec: - template: - spec: - containers: - - name: operator - env: - - name: CRUNCHY_DEBUG - value: "false" -``` - -You can also create additional Kustomize overlays to further patch and customize the installation according to your specific needs. - -### Installation Mode - -When PGO is installed, it can be configured to manage PostgreSQL clusters in all namespaces within -the Kubernetes cluster, or just those within a single namespace. When managing PostgreSQL -clusters in all namespaces, a ClusterRole and ClusterRoleBinding is created to ensure PGO has -the permissions it requires to properly manage PostgreSQL clusters across all namespaces. However, -when PGO is configured to manage PostgreSQL clusters within a single namespace only, a Role and -RoleBinding is created instead. - -The installation of the necessary resources for a cluster-wide or a namespace-limited -operator is done automatically by Kustomize, as described below in the Install section. -The only potential change you may need to make is to the Namespace resource and the -`namespace` field if using a namespace other than the default `postgres-operator`. - -## Install - -Once the Kustomize project has been modified according to your specific needs, PGO can then -be installed using `kubectl` and Kustomize. To create the target namespace, run the following: - -```shell -kubectl apply -k kustomize/install/namespace -``` - -This will create the default `postgres-operator` namespace, unless you have edited the -`kustomize/install/namespace/namespace.yaml` resource. That `Namespace` resource should have the -same value as the `namespace` field in the `kustomization.yaml` file (located either at -`kustomize/install/default` or `kustomize/install/singlenamespace`, depending on whether you -are deploying the operator with cluster-wide or namespace-limited permissions). - -To install PGO itself in cluster-wide mode, apply the kustomization file in the `default` folder: - -```shell -kubectl apply --server-side -k kustomize/install/default -``` - -To install PGO itself in namespace-limited mode, apply the kustomization file in the -`singlenamespace` folder: - -```shell -kubectl apply --server-side -k kustomize/install/singlenamespace -``` - -The `kustomization.yaml` files in those folders take care of applying the appropriate permissions. - -### Automated Upgrade Checks - -By default, PGO will automatically check for updates to itself and software components by making a request to a URL. If PGO detects there are updates available, it will print them in the logs. As part of the check, PGO will send aggregated, anonymized information about the current deployment to the endpoint. An upcoming release will allow for PGO to opt-in to receive and apply updates to software components automatically. - -PGO will check for updates upon startup and once every 24 hours. Any errors in checking will have no impact on PGO's operation. To disable the upgrade check, you can set the `CHECK_FOR_UPGRADES` environmental variable on the `pgo` Deployment to `"false"`. - -For more information about collected data, see the Crunchy Data [collection notice](https://www.crunchydata.com/developers/data-collection-notice). - -## Uninstall - -Once PGO has been installed, it can also be uninstalled using `kubectl` and Kustomize. -To uninstall PGO (assuming it was installed in cluster-wide mode), the following command can be -utilized: - -```shell -kubectl delete -k kustomize/install/default -``` - -To uninstall PGO installed with only namespace permissions, use: - -```shell -kubectl delete -k kustomize/install/singlenamespace -``` - -The namespace created with this installation can likewise be cleaned up with: - -```shell -kubectl delete -k kustomize/install/namespace -``` diff --git a/docs/content/installation/monitoring/_index.md b/docs/content/installation/monitoring/_index.md deleted file mode 100644 index ef3fd62963..0000000000 --- a/docs/content/installation/monitoring/_index.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: "PGO Monitoring" -date: -draft: false -weight: 100 ---- - -The PGO Monitoring stack is a fully integrated solution for monitoring and visualizing metrics -captured from PostgreSQL clusters created using PGO. By leveraging [pgMonitor][] to configure -and integrate the various tools, components and metrics needed to effectively monitor PostgreSQL -clusters, PGO Monitoring provides an powerful and easy-to-use solution to effectively monitor -and visualize pertinent PostgreSQL database and container metrics. Included in the monitoring -infrastructure are the following components: - -- [pgMonitor][] - Provides the configuration needed to enable the effective capture and -visualization of PostgreSQL database metrics using the various tools comprising the PostgreSQL -Operator Monitoring infrastructure -- [Grafana](https://grafana.com/) - Enables visual dashboard capabilities for monitoring -PostgreSQL clusters, specifically using Crunchy PostgreSQL Exporter data stored within Prometheus -- [Prometheus](https://prometheus.io/) - A multi-dimensional data model with time series data, -which is used in collaboration with the Crunchy PostgreSQL Exporter to provide and store -metrics -- [Alertmanager](https://prometheus.io/docs/alerting/latest/alertmanager/) - Handles alerts -sent by Prometheus by deduplicating, grouping, and routing them to receiver integrations. - -By leveraging the installation method described in this section, PGO Monitoring can be deployed -alongside PGO. - - - -[pgMonitor]: https://github.com/CrunchyData/pgmonitor diff --git a/docs/content/installation/monitoring/kustomize.md b/docs/content/installation/monitoring/kustomize.md deleted file mode 100644 index 9d322d55b6..0000000000 --- a/docs/content/installation/monitoring/kustomize.md +++ /dev/null @@ -1,98 +0,0 @@ ---- -title: "Kustomize" -date: -draft: false -weight: 10 ---- - -# Installing PGO Monitoring Using Kustomize - -This section provides instructions for installing and configuring PGO Monitoring using Kustomize. - -## Prerequisites - -First, go to GitHub and [fork the Postgres Operator examples](https://github.com/CrunchyData/postgres-operator-examples/fork) -repository, which contains the PGO Monitoring Kustomize installer. - -[https://github.com/CrunchyData/postgres-operator-examples/fork](https://github.com/CrunchyData/postgres-operator-examples/fork) - -Once you have forked this repo, you can download it to your working environment with a command -similar to this: - -``` -YOUR_GITHUB_UN="" -git clone --depth 1 "git@github.com:${YOUR_GITHUB_UN}/postgres-operator-examples.git" -cd postgres-operator-examples -``` - -The PGO Monitoring project is located in the `kustomize/monitoring` directory. - -## Configuration - -While the default Kustomize install should work in most Kubernetes environments, it may be -necessary to further customize the project according to your specific needs. - -For instance, by default `fsGroup` is set to `26` for the `securityContext` defined for the -various Deployments comprising the PGO Monitoring stack: - -```yaml -securityContext: - fsGroup: 26 -``` - -In most Kubernetes environments this setting is needed to ensure processes within the container -have the permissions needed to write to any volumes mounted to each of the Pods comprising the PGO -Monitoring stack. However, when installing in an OpenShift environment (and more specifically when -using the `restricted` Security Context Constraint), the `fsGroup` setting should be removed -since OpenShift will automatically handle setting the proper `fsGroup` within the Pod's -`securityContext`. - -Additionally, within this same section it may also be necessary to modify the `supplmentalGroups` -setting according to your specific storage configuration: - -```yaml -securityContext: - supplementalGroups : 65534 -``` - -Therefore, the following files (located under `kustomize/monitoring`) should be modified and/or -patched (e.g. using additional overlays) as needed to ensure the `securityContext` is properly -defined for your Kubernetes environment: - -- `deploy-alertmanager.yaml` -- `deploy-grafana.yaml` -- `deploy-prometheus.yaml` - -And to modify the configuration for the various storage resources (i.e. PersistentVolumeClaims) -created by the PGO Monitoring installer, the `kustomize/monitoring/pvcs.yaml` file can also -be modified. - -Additionally, it is also possible to further customize the configuration for the various components -comprising the PGO Monitoring stack (Grafana, Prometheus and/or AlertManager) by modifying the -following configuration resources: - -- `alertmanager-config.yaml` -- `alertmanager-rules-config.yaml` -- `grafana-datasources.yaml` -- `prometheus-config.yaml` - -Finally, please note that the default username and password for Grafana can be updated by -modifying the Grafana Secret in file `kustomize/monitoring/grafana-secret.yaml`. - -## Install - -Once the Kustomize project has been modified according to your specific needs, PGO Monitoring can -then be installed using `kubectl` and Kustomize: - -```shell -kubectl apply -k kustomize/monitoring -``` - -## Uninstall - -And similarly, once PGO Monitoring has been installed, it can uninstalled using `kubectl` and -Kustomize: - -```shell -kubectl delete -k kustomize/monitoring -``` diff --git a/docs/content/quickstart/_index.md b/docs/content/quickstart/_index.md deleted file mode 100644 index 089070eb5c..0000000000 --- a/docs/content/quickstart/_index.md +++ /dev/null @@ -1,206 +0,0 @@ ---- -title: "Quickstart" -date: -draft: false -weight: 10 ---- - -Can't wait to try out the [PGO](https://github.com/CrunchyData/postgres-operator), the [Postgres Operator](https://github.com/CrunchyData/postgres-operator) from [Crunchy Data](https://www.crunchydata.com)? Let us show you the quickest possible path to getting up and running. - -## Prerequisites - -Please be sure you have the following utilities installed on your host machine: - -- `kubectl` -- `git` - -## Installation - -### Step 1: Download the Examples - -First, go to GitHub and [fork the Postgres Operator examples](https://github.com/CrunchyData/postgres-operator-examples/fork) repository: - -[https://github.com/CrunchyData/postgres-operator-examples/fork](https://github.com/CrunchyData/postgres-operator-examples/fork) - -Once you have forked this repo, you can download it to your working environment with a command similar to this: - -``` -YOUR_GITHUB_UN="" -git clone --depth 1 "git@github.com:${YOUR_GITHUB_UN}/postgres-operator-examples.git" -cd postgres-operator-examples -``` -### Step 2: Install PGO, the Postgres Operator - -You can install PGO, the Postgres Operator from Crunchy Data, using the command below: - -``` -kubectl apply -k kustomize/install/namespace -kubectl apply --server-side -k kustomize/install/default -``` - -This will create a namespace called `postgres-operator` and create all of the objects required to deploy PGO. - -To check on the status of your installation, you can run the following command: - -``` -kubectl -n postgres-operator get pods \ - --selector=postgres-operator.crunchydata.com/control-plane=postgres-operator \ - --field-selector=status.phase=Running -``` - -If the PGO Pod is healthy, you should see output similar to: - -``` -NAME READY STATUS RESTARTS AGE -postgres-operator-9dd545d64-t4h8d 1/1 Running 0 3s -``` - -## Create a Postgres Cluster - -Let's create a simple Postgres cluster. You can do this by executing the following command: - -``` -kubectl apply -k kustomize/postgres -``` - -This will create a Postgres cluster named `hippo` in the `postgres-operator` namespace. You can track the progress of your cluster using the following command: - -``` -kubectl -n postgres-operator describe postgresclusters.postgres-operator.crunchydata.com hippo -``` - -## Connect to the Postgres cluster - -As part of creating a Postgres cluster, the Postgres Operator creates a PostgreSQL user account. The credentials for this account are stored in a Secret that has the name `-pguser-`. - -Within this Secret are attributes that provide information to let you log into the PostgreSQL cluster. These include: - -- `user`: The name of the user account. -- `password`: The password for the user account. -- `dbname`: The name of the database that the user has access to by default. -- `host`: The name of the host of the database. - This references the [Service](https://kubernetes.io/docs/concepts/services-networking/service/) of the primary Postgres instance. -- `port`: The port that the database is listening on. -- `uri`: A [PostgreSQL connection URI](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING) - that provides all the information for logging into the Postgres database. -- `jdbc-uri`: A [PostgreSQL JDBC connection URI](https://jdbc.postgresql.org/documentation/use/) - that provides all the information for logging into the Postgres database via the JDBC driver. - -If you deploy your Postgres cluster with the [PgBouncer](https://www.pgbouncer.org/) connection pooler, there are additional values that are populated in the user Secret, including: - -- `pgbouncer-host`: The name of the host of the PgBouncer connection pooler. - This references the [Service](https://kubernetes.io/docs/concepts/services-networking/service/) of the PgBouncer connection pooler. -- `pgbouncer-port`: The port that the PgBouncer connection pooler is listening on. -- `pgbouncer-uri`: A [PostgreSQL connection URI](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING) - that provides all the information for logging into the Postgres database via the PgBouncer connection pooler. -- `pgbouncer-jdbc-uri`: A [PostgreSQL JDBC connection URI](https://jdbc.postgresql.org/documentation/use/) - that provides all the information for logging into the Postgres database via the PgBouncer connection pooler using the JDBC driver. - -Note that **all connections use TLS**. PGO sets up a PKI for your Postgres clusters. You can also choose to bring your own PKI / certificate authority; this is covered later in the documentation. - -### Connect via `psql` in the Terminal - -#### Connect Directly - -If you are on the same network as your PostgreSQL cluster, you can connect directly to it using the following command: - -``` -psql $(kubectl -n postgres-operator get secrets hippo-pguser-hippo -o go-template='{{.data.uri | base64decode}}') -``` - -#### Connect Using a Port-Forward - -In a new terminal, create a port forward: - -``` -PG_CLUSTER_PRIMARY_POD=$(kubectl get pod -n postgres-operator -o name \ - -l postgres-operator.crunchydata.com/cluster=hippo,postgres-operator.crunchydata.com/role=master) -kubectl -n postgres-operator port-forward "${PG_CLUSTER_PRIMARY_POD}" 5432:5432 -``` - -Establish a connection to the PostgreSQL cluster. - -``` -PG_CLUSTER_USER_SECRET_NAME=hippo-pguser-hippo - -PGPASSWORD=$(kubectl get secrets -n postgres-operator "${PG_CLUSTER_USER_SECRET_NAME}" -o go-template='{{.data.password | base64decode}}') \ -PGUSER=$(kubectl get secrets -n postgres-operator "${PG_CLUSTER_USER_SECRET_NAME}" -o go-template='{{.data.user | base64decode}}') \ -PGDATABASE=$(kubectl get secrets -n postgres-operator "${PG_CLUSTER_USER_SECRET_NAME}" -o go-template='{{.data.dbname | base64decode}}') \ -psql -h localhost -``` - -### Connect an Application - -The information provided in the user Secret will allow you to connect an application directly to your PostgreSQL database. - -For example, let's connect [Keycloak](https://www.keycloak.org/). Keycloak is a popular open source identity management tool that is backed by a PostgreSQL database. Using the `hippo` cluster we created, we can deploy the following manifest file: - -``` -cat <> keycloak.yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: keycloak - namespace: postgres-operator - labels: - app.kubernetes.io/name: keycloak -spec: - selector: - matchLabels: - app.kubernetes.io/name: keycloak - template: - metadata: - labels: - app.kubernetes.io/name: keycloak - spec: - containers: - - image: quay.io/keycloak/keycloak:latest - name: keycloak - env: - - name: DB_VENDOR - value: "postgres" - - name: DB_ADDR - valueFrom: { secretKeyRef: { name: hippo-pguser-hippo, key: host } } - - name: DB_PORT - valueFrom: { secretKeyRef: { name: hippo-pguser-hippo, key: port } } - - name: DB_DATABASE - valueFrom: { secretKeyRef: { name: hippo-pguser-hippo, key: dbname } } - - name: DB_USER - valueFrom: { secretKeyRef: { name: hippo-pguser-hippo, key: user } } - - name: DB_PASSWORD - valueFrom: { secretKeyRef: { name: hippo-pguser-hippo, key: password } } - - name: KEYCLOAK_USER - value: "admin" - - name: KEYCLOAK_PASSWORD - value: "admin" - - name: PROXY_ADDRESS_FORWARDING - value: "true" - ports: - - name: http - containerPort: 8080 - - name: https - containerPort: 8443 - readinessProbe: - httpGet: - path: /auth/realms/master - port: 8080 - restartPolicy: Always - -EOF - -kubectl apply -f keycloak.yaml -``` - -There is a full example for how to deploy Keycloak with the Postgres Operator in the `kustomize/keycloak` folder. - -## Next Steps - -Congratulations, you've got your Postgres cluster up and running, perhaps with an application connected to it! 👏 👏 👏 - -You can find out more about the [`postgresclusters` custom resource definition]({{< relref "references/crd.md" >}}) through the [documentation]({{< relref "references/crd.md" >}}) and through `kubectl explain`, i.e.: - -``` -kubectl explain postgresclusters -``` - -Let's work through a tutorial together to better understand the various components of PGO, the Postgres Operator, and how you can fine tune your settings to tailor your Postgres cluster to your application. diff --git a/docs/content/references/.gitattributes b/docs/content/references/.gitattributes deleted file mode 100644 index 230f0d5267..0000000000 --- a/docs/content/references/.gitattributes +++ /dev/null @@ -1,3 +0,0 @@ -# https://docs.github.com/en/repositories/working-with-files/managing-files/customizing-how-changed-files-appear-on-github -# https://github.com/github/linguist/blob/v7.16.0/docs/overrides.md -/crd.md linguist-generated diff --git a/docs/content/references/_index.md b/docs/content/references/_index.md deleted file mode 100644 index f5b4f37f0b..0000000000 --- a/docs/content/references/_index.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -title: "References" -date: -draft: false -weight: 100 ---- diff --git a/docs/content/references/components.md b/docs/content/references/components.md deleted file mode 100644 index a8ca095edb..0000000000 --- a/docs/content/references/components.md +++ /dev/null @@ -1,167 +0,0 @@ ---- -title: "Components and Compatibility" -date: -draft: false -weight: 110 ---- - -## Kubernetes Compatibility - -PGO, the Postgres Operator from Crunchy Data, is tested on the following platforms: - -- Kubernetes 1.22-1.25 -- OpenShift 4.8-4.11 -- Rancher -- Google Kubernetes Engine (GKE), including Anthos -- Amazon EKS -- Microsoft AKS -- VMware Tanzu - -## Components Compatibility - -The following table defines the compatibility between PGO and the various component containers -needed to deploy PostgreSQL clusters using PGO. - -The listed versions of Postgres show the latest minor release (e.g. {{< param postgresVersion13 >}}) of each major version (e.g. {{< param postgresVersion >}}). Older minor releases may still be compatible with PGO. We generally recommend to run the latest minor release for the [same reasons that the PostgreSQL community provides](https://www.postgresql.org/support/versioning/). - -Note that for the 5.0.3 release and beyond, the Postgres containers were renamed to `crunchy-postgres` and `crunchy-postgres-gis`. - -| PGO | pgAdmin* | pgBackRest | PgBouncer | Postgres | PostGIS | -|-----|---------|------------|-----------|----------|---------| -| `5.3.0` | `4.30` | `2.41` | `1.17` | `15,14,13,12,11` | `3.3,3.2,3.1,3.0,2.5,2.4` | -| `5.2.1` | `4.30` | `2.41` | `1.17` | `14,13,12,11,10` | `3.2,3.1,3.0,2.5,2.4,2.3` | -| `5.2.0` | `4.30` | `2.40` | `1.17` | `14,13,12,11,10` | `3.2,3.1,3.0,2.5,2.4,2.3` | -| `5.1.4` | `4.30` | `2.41` | `1.17` | `14,13,12,11,10` | `3.2,3.1,3.0,2.5,2.4,2.3` | -| `5.1.3` | `4.30` | `2.40` | `1.17` | `14,13,12,11,10` | `3.2,3.1,3.0,2.5,2.4,2.3` | -| `5.1.2` | `4.30` | `2.38` | `1.16` | `14,13,12,11,10` | `3.2,3.1,3.0,2.5,2.4,2.3` | -| `5.1.1` | `4.30` | `2.38` | `1.16` | `14,13,12,11,10` | `3.2,3.1,3.0,2.5,2.4,2.3` | -| `5.1.0` | `4.30` | `2.38` | `1.16` | `14,13,12,11,10` | `3.1,3.0,2.5,2.4,2.3` | -| `5.0.9` | `n/a` | `2.41` | `1.17` | `14,13,12,11,10` | `3.1,3.0,2.5,2.4,2.3` | -| `5.0.8` | `n/a` | `2.40` | `1.17` | `14,13,12,11,10` | `3.1,3.0,2.5,2.4,2.3` | -| `5.0.7` | `n/a` | `2.38` | `1.16` | `14,13,12,11,10` | `3,2,3.1,3.0,2.5,2.4,2.3` | -| `5.0.6` | `n/a` | `2.38` | `1.16` | `14,13,12,11,10` | `3.2,3.1,3.0,2.5,2.4,2.3` | -| `5.0.5` | `n/a` | `2.36` | `1.16` | `14,13,12,11,10` | `3.1,3.0,2.5,2.4,2.3` | -| `5.0.4` | `n/a` | `2.36` | `1.16` | `14,13,12,11,10` | `3.1,3.0,2.5,2.4,2.3` | -| `5.0.3` | `n/a` | `2.35` | `1.15` | `14,13,12,11,10` | `3.1,3.0,2.5,2.4,2.3` | - -_*pgAdmin 4.30 does not currently support Postgres 15._ - -The latest Postgres containers include Patroni 2.1.3. - -The following are the Postgres containers available for version 5.0.2 of PGO and older: - -| Component | Version | PGO Version Min. | PGO Version Max. | -|-----------|---------|------------------|------------------| -| `crunchy-postgres-ha` | 13.4 | 5.0.0 | 5.0.2 | -| `crunchy-postgres-ha` | 12.8 | 5.0.0 | 5.0.2 | -| `crunchy-postgres-ha` | 11.13 | 5.0.0 | 5.0.2 | -| `crunchy-postgres-ha` | 10.18 | 5.0.0 | 5.0.2 | -| `crunchy-postgres-gis-ha` | 13.4-3.1 | 5.0.0 | 5.0.2 | -| `crunchy-postgres-gis-ha` | 13.4-3.0 | 5.0.0 | 5.0.2 | -| `crunchy-postgres-gis-ha` | 12.8-3.0 | 5.0.0 | 5.0.2 | -| `crunchy-postgres-gis-ha` | 12.8-2.5 | 5.0.0 | 5.0.2 | -| `crunchy-postgres-gis-ha` | 11.13-2.5 | 5.0.0 | 5.0.2 | -| `crunchy-postgres-gis-ha` | 11.13-2.4 | 5.0.0 | 5.0.2 | -| `crunchy-postgres-gis-ha` | 10.18-2.4 | 5.0.0 | 5.0.2 | -| `crunchy-postgres-gis-ha` | 10.18-2.3 | 5.0.0 | 5.0.2 | - -### Container Tags - -The container tags follow one of two patterns: - -- `--` -- `---` (Customer Portal only) - -For example, when pulling from the [customer portal](https://access.crunchydata.com/), the following would both be valid tags to reference the PgBouncer container: - -- `{{< param PGBouncerComponentTagUbi8 >}}` -- `{{< param PGBouncerTagUbi8 >}}` - -On the [developer portal](https://www.crunchydata.com/developers/download-postgres/containers), PgBouncer would use this tag: - -- `{{< param PGBouncerComponentTagUbi8 >}}` - -PostGIS enabled containers have both the Postgres and PostGIS software versions included. For example, Postgres 14 with PostGIS 3.2 would use the following tags: - -- `{{< param postgres14GIS32ComponentTagUbi8 >}}` -- `{{< param postgres14GIS32TagUbi8 >}}` - -## Extensions Compatibility - -The following table defines the compatibility between Postgres extensions and versions of Postgres they are available in. The "Postgres version" corresponds with the major version of a Postgres container. - -The table also lists the initial PGO version that the version of the extension is available in. - -| Extension | Version | Postgres Versions | Initial PGO Version | -|-----------|---------|-------------------|---------------------| -| `orafce` | 3.25.1 | 15, 14, 13, 12, 11 | 5.3.0 | -| `orafce` | 3.25.1 | 14, 13, 12, 11, 10 | 5.2.1 | -| `orafce` | 3.24.0 | 14, 13, 12, 11, 10 | 5.1.3 | -| `orafce` | 3.22.0 | 14, 13, 12, 11, 10 | 5.0.8 | -| `pgAudit` | 1.7.0 | 15 | 5.3.0 | -| `pgAudit` | 1.6.2 | 14 | 5.1.0 | -| `pgAudit` | 1.6.2 | 14 | 5.0.6 | -| `pgAudit` | 1.6.1 | 14 | 5.0.4 | -| `pgAudit` | 1.6.0 | 14 | 5.0.3 | -| `pgAudit` | 1.5.2 | 13 | 5.1.0 | -| `pgAudit` | 1.5.2 | 13 | 5.0.6 | -| `pgAudit` | 1.5.0 | 13 | 5.0.0 | -| `pgAudit` | 1.4.3 | 12 | 5.1.0 | -| `pgAudit` | 1.4.1 | 12 | 5.0.0 | -| `pgAudit` | 1.3.4 | 11 | 5.1.0 | -| `pgAudit` | 1.3.4 | 11 | 5.0.6 | -| `pgAudit` | 1.3.2 | 11 | 5.0.0 | -| `pgAudit` | 1.2.4 | 10 | 5.1.0 | -| `pgAudit` | 1.2.4 | 10 | 5.0.6 | -| `pgAudit` | 1.2.2 | 10 | 5.0.0 | -| `pgAudit Analyze` | 1.0.8 | 14, 13, 12, 11, 10 | 5.0.3 | -| `pgAudit Analyze` | 1.0.7 | 13, 12, 11, 10 | 5.0.0 | -| `pg_cron` | 1.4.2 | 15, 14, 13 | 5.3.0 | -| `pg_cron` | 1.4.2 | 14, 13 | 5.2.1 | -| `pg_cron` | 1.4.1 | 14, 13, 12, 11, 10 | 5.0.5 | -| `pg_cron` | 1.3.1 | 14, 13, 12, 11, 10 | 5.0.0 | -| `pg_partman` | 4.7.1 | 15, 14, 13, 12, 11 | 5.3.0 | -| `pg_partman` | 4.6.2 | 14, 13, 12, 11, 10 | 5.2.0 | -| `pg_partman` | 4.6.2 | 14, 13, 12, 11, 10 | 5.1.3 | -| `pg_partman` | 4.6.2 | 14, 13, 12, 11, 10 | 5.0.8 | -| `pg_partman` | 4.6.1 | 14, 13, 12, 11, 10 | 5.1.1 | -| `pg_partman` | 4.6.1 | 14, 13, 12, 11, 10 | 5.0.6 | -| `pg_partman` | 4.6.0 | 14, 13, 12, 11, 10 | 5.0.4 | -| `pg_partman` | 4.5.1 | 13, 12, 11, 10 | 5.0.0 | -| `pgnodemx` | 1.3.0 | 14, 13, 12, 11, 10 | 5.1.0 | -| `pgnodemx` | 1.3.0 | 14, 13, 12, 11, 10 | 5.0.6 | -| `pgnodemx` | 1.2.0 | 14, 13, 12, 11, 10 | 5.0.4 | -| `pgnodemx` | 1.0.5 | 14, 13, 12, 11, 10 | 5.0.3 | -| `pgnodemx` | 1.0.4 | 13, 12, 11, 10 | 5.0.0 | -| `set_user` | 3.0.0 | 14, 13, 12, 11, 10 | 5.0.3 | -| `set_user` | 2.0.1 | 13, 12, 11, 10 | 5.0.2 | -| `set_user` | 2.0.0 | 13, 12, 11, 10 | 5.0.0 | -| `TimescaleDB` | 2.8.1 | 14, 13, 12 | 5.3.0 | -| `TimescaleDB` | 2.6.1 | 14, 13, 12 | 5.1.1 | -| `TimescaleDB` | 2.6.1 | 14, 13, 12 | 5.0.6 | -| `TimescaleDB` | 2.6.0 | 14, 13, 12 | 5.1.0 | -| `TimescaleDB` | 2.5.0 | 14, 13, 12 | 5.0.3 | -| `TimescaleDB` | 2.4.2 | 13, 12 | 5.0.3 | -| `TimescaleDB` | 2.4.0 | 13, 12 | 5.0.2 | -| `TimescaleDB` | 2.3.1 | 11 | 5.0.1 | -| `TimescaleDB` | 2.2.0 | 13, 12, 11 | 5.0.0 | -| `wal2json` | 2.4 | 14, 13, 12, 11, 10 | 5.0.3 | -| `wal2json` | 2.3 | 13, 12, 11, 10 | 5.0.0 | - -### Geospatial Extensions - -The following extensions are available in the geospatially aware containers (`crunchy-postgres-gis`): - -| Extension | Version | Postgres Versions | Initial PGO Version | -|-----------|---------|-------------------|---------------------| -| `PostGIS` | 3.2 | 14 | 5.1.1 | -| `PostGIS` | 3.2 | 14 | 5.0.6 | -| `PostGIS` | 3.1 | 14, 13 | 5.0.0 | -| `PostGIS` | 3.0 | 13, 12 | 5.0.0 | -| `PostGIS` | 2.5 | 12, 11 | 5.0.0 | -| `PostGIS` | 2.4 | 11, 10 | 5.0.0 | -| `PostGIS` | 2.3 | 10 | 5.0.0 | -| `pgrouting` | 3.1.4 | 14 | 5.0.4 | -| `pgrouting` | 3.1.3 | 13 | 5.0.0 | -| `pgrouting` | 3.0.5 | 13, 12 | 5.0.0 | -| `pgrouting` | 2.6.3 | 12, 11, 10 | 5.0.0 | diff --git a/docs/content/references/crd.md b/docs/content/references/crd.md deleted file mode 100644 index 25eceb069e..0000000000 --- a/docs/content/references/crd.md +++ /dev/null @@ -1,25909 +0,0 @@ ---- -title: CRD Reference -draft: false -weight: 100 ---- - -Packages: - -- [postgres-operator.crunchydata.com/v1beta1](#postgres-operatorcrunchydatacomv1beta1) - -

postgres-operator.crunchydata.com/v1beta1

- -Resource Types: - -- [PGAdmin](#pgadmin) - -- [PGUpgrade](#pgupgrade) - -- [PostgresCluster](#postgrescluster) - - - - -

PGAdmin

- - - - - - -PGAdmin is the Schema for the pgadmins API - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
apiVersionstringpostgres-operator.crunchydata.com/v1beta1true
kindstringPGAdmintrue
metadataobjectRefer to the Kubernetes API documentation for the fields of the `metadata` field.true
specobjectPGAdminSpec defines the desired state of PGAdminfalse
statusobjectPGAdminStatus defines the observed state of PGAdminfalse
- - -

- PGAdmin.spec - ↩ Parent -

- - - -PGAdminSpec defines the desired state of PGAdmin - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
dataVolumeClaimSpecobjectDefines a PersistentVolumeClaim for pgAdmin data. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumestrue
affinityobjectScheduling constraints of the PGAdmin pod. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-nodefalse
configobjectConfiguration settings for the pgAdmin process. Changes to any of these values will be loaded without validation. Be careful, as you may put pgAdmin into an unusable state.false
imagestringThe image name to use for pgAdmin instance.false
imagePullPolicyenumImagePullPolicy is used to determine when Kubernetes will attempt to pull (download) container images. More info: https://kubernetes.io/docs/concepts/containers/images/#image-pull-policyfalse
imagePullSecrets[]objectThe image pull secrets used to pull from a private registry. Changing this value causes all running PGAdmin pods to restart. https://k8s.io/docs/tasks/configure-pod-container/pull-image-private-registry/false
metadataobjectMetadata contains metadata for custom resourcesfalse
priorityClassNamestringPriority class name for the PGAdmin pod. Changing this value causes PGAdmin pod to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/false
resourcesobjectResource requirements for the PGAdmin container.false
serverGroups[]objectServerGroups for importing PostgresClusters to pgAdmin. To create a pgAdmin with no selectors, leave this field empty. A pgAdmin created with no `ServerGroups` will not automatically add any servers through discovery. PostgresClusters can still be added manually.false
tolerations[]objectTolerations of the PGAdmin pod. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-tolerationfalse
- - -

- PGAdmin.spec.dataVolumeClaimSpec - ↩ Parent -

- - - -Defines a PersistentVolumeClaim for pgAdmin data. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
accessModes[]stringaccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1false
dataSourceobjectdataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field.false
dataSourceRefobjectdataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.false
resourcesobjectresources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resourcesfalse
selectorobjectselector is a label query over volumes to consider for binding.false
storageClassNamestringstorageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1false
volumeModestringvolumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.false
volumeNamestringvolumeName is the binding reference to the PersistentVolume backing this claim.false
- - -

- PGAdmin.spec.dataVolumeClaimSpec.dataSource - ↩ Parent -

- - - -dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
kindstringKind is the type of resource being referencedtrue
namestringName is the name of resource being referencedtrue
apiGroupstringAPIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.false
- - -

- PGAdmin.spec.dataVolumeClaimSpec.dataSourceRef - ↩ Parent -

- - - -dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
kindstringKind is the type of resource being referencedtrue
namestringName is the name of resource being referencedtrue
apiGroupstringAPIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.false
- - -

- PGAdmin.spec.dataVolumeClaimSpec.resources - ↩ Parent -

- - - -resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
limitsmap[string]int or stringLimits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
requestsmap[string]int or stringRequests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
- - -

- PGAdmin.spec.dataVolumeClaimSpec.selector - ↩ Parent -

- - - -selector is a label query over volumes to consider for binding. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PGAdmin.spec.dataVolumeClaimSpec.selector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PGAdmin.spec.affinity - ↩ Parent -

- - - -Scheduling constraints of the PGAdmin pod. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
nodeAffinityobjectDescribes node affinity scheduling rules for the pod.false
podAffinityobjectDescribes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).false
podAntiAffinityobjectDescribes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).false
- - -

- PGAdmin.spec.affinity.nodeAffinity - ↩ Parent -

- - - -Describes node affinity scheduling rules for the pod. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecutionobjectIf the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.false
- - -

- PGAdmin.spec.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
preferenceobjectA node selector term, associated with the corresponding weight.true
weightintegerWeight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.true
- - -

- PGAdmin.spec.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference - ↩ Parent -

- - - -A node selector term, associated with the corresponding weight. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectA list of node selector requirements by node's labels.false
matchFields[]objectA list of node selector requirements by node's fields.false
- - -

- PGAdmin.spec.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference.matchExpressions[index] - ↩ Parent -

- - - -A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
- - -

- PGAdmin.spec.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference.matchFields[index] - ↩ Parent -

- - - -A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
- - -

- PGAdmin.spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution - ↩ Parent -

- - - -If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
nodeSelectorTerms[]objectRequired. A list of node selector terms. The terms are ORed.true
- - -

- PGAdmin.spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index] - ↩ Parent -

- - - -A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectA list of node selector requirements by node's labels.false
matchFields[]objectA list of node selector requirements by node's fields.false
- - -

- PGAdmin.spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index].matchExpressions[index] - ↩ Parent -

- - - -A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
- - -

- PGAdmin.spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index].matchFields[index] - ↩ Parent -

- - - -A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
- - -

- PGAdmin.spec.affinity.podAffinity - ↩ Parent -

- - - -Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecution[]objectIf the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.false
- - -

- PGAdmin.spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
podAffinityTermobjectRequired. A pod affinity term, associated with the corresponding weight.true
weightintegerweight associated with matching the corresponding podAffinityTerm, in the range 1-100.true
- - -

- PGAdmin.spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm - ↩ Parent -

- - - -Required. A pod affinity term, associated with the corresponding weight. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
- - -

- PGAdmin.spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector - ↩ Parent -

- - - -A label query over a set of resources, in this case pods. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PGAdmin.spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PGAdmin.spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector - ↩ Parent -

- - - -A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PGAdmin.spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PGAdmin.spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
- - -

- PGAdmin.spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector - ↩ Parent -

- - - -A label query over a set of resources, in this case pods. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PGAdmin.spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PGAdmin.spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector - ↩ Parent -

- - - -A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PGAdmin.spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PGAdmin.spec.affinity.podAntiAffinity - ↩ Parent -

- - - -Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecution[]objectIf the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.false
- - -

- PGAdmin.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
podAffinityTermobjectRequired. A pod affinity term, associated with the corresponding weight.true
weightintegerweight associated with matching the corresponding podAffinityTerm, in the range 1-100.true
- - -

- PGAdmin.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm - ↩ Parent -

- - - -Required. A pod affinity term, associated with the corresponding weight. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
- - -

- PGAdmin.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector - ↩ Parent -

- - - -A label query over a set of resources, in this case pods. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PGAdmin.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PGAdmin.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector - ↩ Parent -

- - - -A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PGAdmin.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PGAdmin.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
- - -

- PGAdmin.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector - ↩ Parent -

- - - -A label query over a set of resources, in this case pods. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PGAdmin.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PGAdmin.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector - ↩ Parent -

- - - -A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PGAdmin.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PGAdmin.spec.config - ↩ Parent -

- - - -Configuration settings for the pgAdmin process. Changes to any of these values will be loaded without validation. Be careful, as you may put pgAdmin into an unusable state. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
files[]objectFiles allows the user to mount projected volumes into the pgAdmin container so that files can be referenced by pgAdmin as needed.false
ldapBindPasswordobjectA Secret containing the value for the LDAP_BIND_PASSWORD setting. More info: https://www.pgadmin.org/docs/pgadmin4/latest/ldap.htmlfalse
settingsobjectSettings for the pgAdmin server process. Keys should be uppercase and values must be constants. More info: https://www.pgadmin.org/docs/pgadmin4/latest/config_py.htmlfalse
- - -

- PGAdmin.spec.config.files[index] - ↩ Parent -

- - - -Projection that may be projected along with other supported volume types - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
configMapobjectconfigMap information about the configMap data to projectfalse
downwardAPIobjectdownwardAPI information about the downwardAPI data to projectfalse
secretobjectsecret information about the secret data to projectfalse
serviceAccountTokenobjectserviceAccountToken is information about the serviceAccountToken data to projectfalse
- - -

- PGAdmin.spec.config.files[index].configMap - ↩ Parent -

- - - -configMap information about the configMap data to project - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
items[]objectitems if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.false
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#namesfalse
optionalbooleanoptional specify whether the ConfigMap or its keys must be definedfalse
- - -

- PGAdmin.spec.config.files[index].configMap.items[index] - ↩ Parent -

- - - -Maps a string key to a path within a volume. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the key to project.true
pathstringpath is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.true
modeintegermode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.false
- - -

- PGAdmin.spec.config.files[index].downwardAPI - ↩ Parent -

- - - -downwardAPI information about the downwardAPI data to project - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
items[]objectItems is a list of DownwardAPIVolume filefalse
- - -

- PGAdmin.spec.config.files[index].downwardAPI.items[index] - ↩ Parent -

- - - -DownwardAPIVolumeFile represents information to create the file containing the pod field - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
pathstringRequired: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'true
fieldRefobjectRequired: Selects a field of the pod: only annotations, labels, name and namespace are supported.false
modeintegerOptional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.false
resourceFieldRefobjectSelects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.false
- - -

- PGAdmin.spec.config.files[index].downwardAPI.items[index].fieldRef - ↩ Parent -

- - - -Required: Selects a field of the pod: only annotations, labels, name and namespace are supported. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
fieldPathstringPath of the field to select in the specified API version.true
apiVersionstringVersion of the schema the FieldPath is written in terms of, defaults to "v1".false
- - -

- PGAdmin.spec.config.files[index].downwardAPI.items[index].resourceFieldRef - ↩ Parent -

- - - -Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
resourcestringRequired: resource to selecttrue
containerNamestringContainer name: required for volumes, optional for env varsfalse
divisorint or stringSpecifies the output format of the exposed resources, defaults to "1"false
- - -

- PGAdmin.spec.config.files[index].secret - ↩ Parent -

- - - -secret information about the secret data to project - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
items[]objectitems if unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.false
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#namesfalse
optionalbooleanoptional field specify whether the Secret or its key must be definedfalse
- - -

- PGAdmin.spec.config.files[index].secret.items[index] - ↩ Parent -

- - - -Maps a string key to a path within a volume. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the key to project.true
pathstringpath is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.true
modeintegermode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.false
- - -

- PGAdmin.spec.config.files[index].serviceAccountToken - ↩ Parent -

- - - -serviceAccountToken is information about the serviceAccountToken data to project - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
pathstringpath is the path relative to the mount point of the file to project the token into.true
audiencestringaudience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver.false
expirationSecondsintegerexpirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes.false
- - -

- PGAdmin.spec.config.ldapBindPassword - ↩ Parent -

- - - -A Secret containing the value for the LDAP_BIND_PASSWORD setting. More info: https://www.pgadmin.org/docs/pgadmin4/latest/ldap.html - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringThe key of the secret to select from. Must be a valid secret key.true
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#namesfalse
optionalbooleanSpecify whether the Secret or its key must be definedfalse
- - -

- PGAdmin.spec.imagePullSecrets[index] - ↩ Parent -

- - - -LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#namesfalse
- - -

- PGAdmin.spec.metadata - ↩ Parent -

- - - -Metadata contains metadata for custom resources - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
annotationsmap[string]stringfalse
labelsmap[string]stringfalse
- - -

- PGAdmin.spec.resources - ↩ Parent -

- - - -Resource requirements for the PGAdmin container. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
limitsmap[string]int or stringLimits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
requestsmap[string]int or stringRequests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
- - -

- PGAdmin.spec.serverGroups[index] - ↩ Parent -

- - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
namestringThe name for the ServerGroup in pgAdmin. Must be unique in the pgAdmin's ServerGroups since it becomes the ServerGroup name in pgAdmin.true
postgresClusterSelectorobjectPostgresClusterSelector selects clusters to dynamically add to pgAdmin by matching labels. An empty selector like `{}` will select ALL clusters in the namespace.true
- - -

- PGAdmin.spec.serverGroups[index].postgresClusterSelector - ↩ Parent -

- - - -PostgresClusterSelector selects clusters to dynamically add to pgAdmin by matching labels. An empty selector like `{}` will select ALL clusters in the namespace. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PGAdmin.spec.serverGroups[index].postgresClusterSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PGAdmin.spec.tolerations[index] - ↩ Parent -

- - - -The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
effectstringEffect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.false
keystringKey is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.false
operatorstringOperator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.false
tolerationSecondsintegerTolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.false
valuestringValue is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.false
- - -

- PGAdmin.status - ↩ Parent -

- - - -PGAdminStatus defines the observed state of PGAdmin - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
conditions[]objectconditions represent the observations of pgadmin's current state. Known .status.conditions.type are: "PersistentVolumeResizing", "Progressing", "ProxyAvailable"false
observedGenerationintegerobservedGeneration represents the .metadata.generation on which the status was based.false
- - -

- PGAdmin.status.conditions[index] - ↩ Parent -

- - - -Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` - // other fields } - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
lastTransitionTimestringlastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.true
messagestringmessage is a human readable message indicating details about the transition. This may be an empty string.true
reasonstringreason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty.true
statusenumstatus of the condition, one of True, False, Unknown.true
typestringtype of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)true
observedGenerationintegerobservedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance.false
- -

PGUpgrade

- - - - - - -PGUpgrade is the Schema for the pgupgrades API - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
apiVersionstringpostgres-operator.crunchydata.com/v1beta1true
kindstringPGUpgradetrue
metadataobjectRefer to the Kubernetes API documentation for the fields of the `metadata` field.true
specobjectPGUpgradeSpec defines the desired state of PGUpgradefalse
statusobjectPGUpgradeStatus defines the observed state of PGUpgradefalse
- - -

- PGUpgrade.spec - ↩ Parent -

- - - -PGUpgradeSpec defines the desired state of PGUpgrade - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
fromPostgresVersionintegerThe major version of PostgreSQL before the upgrade.true
postgresClusterNamestringThe name of the cluster to be updatedtrue
toPostgresVersionintegerThe major version of PostgreSQL to be upgraded to.true
affinityobjectScheduling constraints of the PGUpgrade pod. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-nodefalse
imagestringThe image name to use for major PostgreSQL upgrades.false
imagePullPolicyenumImagePullPolicy is used to determine when Kubernetes will attempt to pull (download) container images. More info: https://kubernetes.io/docs/concepts/containers/images/#image-pull-policyfalse
imagePullSecrets[]objectThe image pull secrets used to pull from a private registry. Changing this value causes all running PGUpgrade pods to restart. https://k8s.io/docs/tasks/configure-pod-container/pull-image-private-registry/false
metadataobjectMetadata contains metadata for custom resourcesfalse
priorityClassNamestringPriority class name for the PGUpgrade pod. Changing this value causes PGUpgrade pod to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/false
resourcesobjectResource requirements for the PGUpgrade container.false
toPostgresImagestringThe image name to use for PostgreSQL containers after upgrade. When omitted, the value comes from an operator environment variable.false
tolerations[]objectTolerations of the PGUpgrade pod. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-tolerationfalse
- - -

- PGUpgrade.spec.affinity - ↩ Parent -

- - - -Scheduling constraints of the PGUpgrade pod. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
nodeAffinityobjectDescribes node affinity scheduling rules for the pod.false
podAffinityobjectDescribes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).false
podAntiAffinityobjectDescribes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).false
- - -

- PGUpgrade.spec.affinity.nodeAffinity - ↩ Parent -

- - - -Describes node affinity scheduling rules for the pod. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecutionobjectIf the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.false
- - -

- PGUpgrade.spec.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
preferenceobjectA node selector term, associated with the corresponding weight.true
weightintegerWeight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.true
- - -

- PGUpgrade.spec.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference - ↩ Parent -

- - - -A node selector term, associated with the corresponding weight. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectA list of node selector requirements by node's labels.false
matchFields[]objectA list of node selector requirements by node's fields.false
- - -

- PGUpgrade.spec.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference.matchExpressions[index] - ↩ Parent -

- - - -A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
- - -

- PGUpgrade.spec.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference.matchFields[index] - ↩ Parent -

- - - -A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
- - -

- PGUpgrade.spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution - ↩ Parent -

- - - -If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
nodeSelectorTerms[]objectRequired. A list of node selector terms. The terms are ORed.true
- - -

- PGUpgrade.spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index] - ↩ Parent -

- - - -A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectA list of node selector requirements by node's labels.false
matchFields[]objectA list of node selector requirements by node's fields.false
- - -

- PGUpgrade.spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index].matchExpressions[index] - ↩ Parent -

- - - -A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
- - -

- PGUpgrade.spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index].matchFields[index] - ↩ Parent -

- - - -A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
- - -

- PGUpgrade.spec.affinity.podAffinity - ↩ Parent -

- - - -Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecution[]objectIf the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.false
- - -

- PGUpgrade.spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
podAffinityTermobjectRequired. A pod affinity term, associated with the corresponding weight.true
weightintegerweight associated with matching the corresponding podAffinityTerm, in the range 1-100.true
- - -

- PGUpgrade.spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm - ↩ Parent -

- - - -Required. A pod affinity term, associated with the corresponding weight. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
- - -

- PGUpgrade.spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector - ↩ Parent -

- - - -A label query over a set of resources, in this case pods. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PGUpgrade.spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PGUpgrade.spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector - ↩ Parent -

- - - -A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PGUpgrade.spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PGUpgrade.spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
- - -

- PGUpgrade.spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector - ↩ Parent -

- - - -A label query over a set of resources, in this case pods. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PGUpgrade.spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PGUpgrade.spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector - ↩ Parent -

- - - -A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PGUpgrade.spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PGUpgrade.spec.affinity.podAntiAffinity - ↩ Parent -

- - - -Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecution[]objectIf the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.false
- - -

- PGUpgrade.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
podAffinityTermobjectRequired. A pod affinity term, associated with the corresponding weight.true
weightintegerweight associated with matching the corresponding podAffinityTerm, in the range 1-100.true
- - -

- PGUpgrade.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm - ↩ Parent -

- - - -Required. A pod affinity term, associated with the corresponding weight. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
- - -

- PGUpgrade.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector - ↩ Parent -

- - - -A label query over a set of resources, in this case pods. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PGUpgrade.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PGUpgrade.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector - ↩ Parent -

- - - -A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PGUpgrade.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PGUpgrade.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
- - -

- PGUpgrade.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector - ↩ Parent -

- - - -A label query over a set of resources, in this case pods. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PGUpgrade.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PGUpgrade.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector - ↩ Parent -

- - - -A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PGUpgrade.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PGUpgrade.spec.imagePullSecrets[index] - ↩ Parent -

- - - -LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#namesfalse
- - -

- PGUpgrade.spec.metadata - ↩ Parent -

- - - -Metadata contains metadata for custom resources - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
annotationsmap[string]stringfalse
labelsmap[string]stringfalse
- - -

- PGUpgrade.spec.resources - ↩ Parent -

- - - -Resource requirements for the PGUpgrade container. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
limitsmap[string]int or stringLimits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
requestsmap[string]int or stringRequests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
- - -

- PGUpgrade.spec.tolerations[index] - ↩ Parent -

- - - -The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
effectstringEffect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.false
keystringKey is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.false
operatorstringOperator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.false
tolerationSecondsintegerTolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.false
valuestringValue is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.false
- - -

- PGUpgrade.status - ↩ Parent -

- - - -PGUpgradeStatus defines the observed state of PGUpgrade - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
conditions[]objectconditions represent the observations of PGUpgrade's current state.false
observedGenerationintegerobservedGeneration represents the .metadata.generation on which the status was based.false
- - -

- PGUpgrade.status.conditions[index] - ↩ Parent -

- - - -Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` - // other fields } - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
lastTransitionTimestringlastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.true
messagestringmessage is a human readable message indicating details about the transition. This may be an empty string.true
reasonstringreason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty.true
statusenumstatus of the condition, one of True, False, Unknown.true
typestringtype of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)true
observedGenerationintegerobservedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance.false
- -

PostgresCluster

- - - - - - -PostgresCluster is the Schema for the postgresclusters API - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
apiVersionstringpostgres-operator.crunchydata.com/v1beta1true
kindstringPostgresClustertrue
metadataobjectRefer to the Kubernetes API documentation for the fields of the `metadata` field.true
specobjectPostgresClusterSpec defines the desired state of PostgresClusterfalse
statusobjectPostgresClusterStatus defines the observed state of PostgresClusterfalse
- - -

- PostgresCluster.spec - ↩ Parent -

- - - -PostgresClusterSpec defines the desired state of PostgresCluster - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
backupsobjectPostgreSQL backup configurationtrue
instances[]objectSpecifies one or more sets of PostgreSQL pods that replicate data for this cluster.true
postgresVersionintegerThe major version of PostgreSQL installed in the PostgreSQL imagetrue
configobjectfalse
customReplicationTLSSecretobjectThe secret containing the replication client certificates and keys for secure connections to the PostgreSQL server. It will need to contain the client TLS certificate, TLS key and the Certificate Authority certificate with the data keys set to tls.crt, tls.key and ca.crt, respectively. NOTE: If CustomReplicationClientTLSSecret is provided, CustomTLSSecret MUST be provided and the ca.crt provided must be the same.false
customTLSSecretobjectThe secret containing the Certificates and Keys to encrypt PostgreSQL traffic will need to contain the server TLS certificate, TLS key and the Certificate Authority certificate with the data keys set to tls.crt, tls.key and ca.crt, respectively. It will then be mounted as a volume projection to the '/pgconf/tls' directory. For more information on Kubernetes secret projections, please see https://k8s.io/docs/concepts/configuration/secret/#projection-of-secret-keys-to-specific-paths NOTE: If CustomTLSSecret is provided, CustomReplicationClientTLSSecret MUST be provided and the ca.crt provided must be the same.false
dataSourceobjectSpecifies a data source for bootstrapping the PostgreSQL cluster.false
databaseInitSQLobjectDatabaseInitSQL defines a ConfigMap containing custom SQL that will be run after the cluster is initialized. This ConfigMap must be in the same namespace as the cluster.false
disableDefaultPodSchedulingbooleanWhether or not the PostgreSQL cluster should use the defined default scheduling constraints. If the field is unset or false, the default scheduling constraints will be used in addition to any custom constraints provided.false
imagestringThe image name to use for PostgreSQL containers. When omitted, the value comes from an operator environment variable. For standard PostgreSQL images, the format is RELATED_IMAGE_POSTGRES_{postgresVersion}, e.g. RELATED_IMAGE_POSTGRES_13. For PostGIS enabled PostgreSQL images, the format is RELATED_IMAGE_POSTGRES_{postgresVersion}_GIS_{postGISVersion}, e.g. RELATED_IMAGE_POSTGRES_13_GIS_3.1.false
imagePullPolicyenumImagePullPolicy is used to determine when Kubernetes will attempt to pull (download) container images. More info: https://kubernetes.io/docs/concepts/containers/images/#image-pull-policyfalse
imagePullSecrets[]objectThe image pull secrets used to pull from a private registry Changing this value causes all running pods to restart. https://k8s.io/docs/tasks/configure-pod-container/pull-image-private-registry/false
metadataobjectMetadata contains metadata for custom resourcesfalse
monitoringobjectThe specification of monitoring tools that connect to PostgreSQLfalse
openshiftbooleanWhether or not the PostgreSQL cluster is being deployed to an OpenShift environment. If the field is unset, the operator will automatically detect the environment.false
patroniobjectfalse
pausedbooleanSuspends the rollout and reconciliation of changes made to the PostgresCluster spec.false
portintegerThe port on which PostgreSQL should listen.false
postGISVersionstringThe PostGIS extension version installed in the PostgreSQL image. When image is not set, indicates a PostGIS enabled image will be used.false
proxyobjectThe specification of a proxy that connects to PostgreSQL.false
serviceobjectSpecification of the service that exposes the PostgreSQL primary instance.false
shutdownbooleanWhether or not the PostgreSQL cluster should be stopped. When this is true, workloads are scaled to zero and CronJobs are suspended. Other resources, such as Services and Volumes, remain in place.false
standbyobjectRun this cluster as a read-only copy of an existing cluster or archive.false
supplementalGroups[]integerA list of group IDs applied to the process of a container. These can be useful when accessing shared file systems with constrained permissions. More info: https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-contextfalse
userInterfaceobjectThe specification of a user interface that connects to PostgreSQL.false
users[]objectUsers to create inside PostgreSQL and the databases they should access. The default creates one user that can access one database matching the PostgresCluster name. An empty list creates no users. Removing a user from this list does NOT drop the user nor revoke their access.false
- - -

- PostgresCluster.spec.backups - ↩ Parent -

- - - -PostgreSQL backup configuration - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
pgbackrestobjectpgBackRest archive configurationtrue
- - -

- PostgresCluster.spec.backups.pgbackrest - ↩ Parent -

- - - -pgBackRest archive configuration - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
repos[]objectDefines a pgBackRest repositorytrue
configuration[]objectProjected volumes containing custom pgBackRest configuration. These files are mounted under "/etc/pgbackrest/conf.d" alongside any pgBackRest configuration generated by the PostgreSQL Operator: https://pgbackrest.org/configuration.htmlfalse
globalmap[string]stringGlobal pgBackRest configuration settings. These settings are included in the "global" section of the pgBackRest configuration generated by the PostgreSQL Operator, and then mounted under "/etc/pgbackrest/conf.d": https://pgbackrest.org/configuration.htmlfalse
imagestringThe image name to use for pgBackRest containers. Utilized to run pgBackRest repository hosts and backups. The image may also be set using the RELATED_IMAGE_PGBACKREST environment variablefalse
jobsobjectJobs field allows configuration for all backup jobsfalse
manualobjectDefines details for manual pgBackRest backup Jobsfalse
metadataobjectMetadata contains metadata for custom resourcesfalse
repoHostobjectDefines configuration for a pgBackRest dedicated repository host. This section is only applicable if at least one "volume" (i.e. PVC-based) repository is defined in the "repos" section, therefore enabling a dedicated repository host Deployment.false
restoreobjectDefines details for performing an in-place restore using pgBackRestfalse
sidecarsobjectConfiguration for pgBackRest sidecar containersfalse
- - -

- PostgresCluster.spec.backups.pgbackrest.repos[index] - ↩ Parent -

- - - -PGBackRestRepo represents a pgBackRest repository. Only one of its members may be specified. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
namestringThe name of the the repositorytrue
azureobjectRepresents a pgBackRest repository that is created using Azure storagefalse
gcsobjectRepresents a pgBackRest repository that is created using Google Cloud Storagefalse
s3objectRepoS3 represents a pgBackRest repository that is created using AWS S3 (or S3-compatible) storagefalse
schedulesobjectDefines the schedules for the pgBackRest backups Full, Differential and Incremental backup types are supported: https://pgbackrest.org/user-guide.html#concept/backupfalse
volumeobjectRepresents a pgBackRest repository that is created using a PersistentVolumeClaimfalse
- - -

- PostgresCluster.spec.backups.pgbackrest.repos[index].azure - ↩ Parent -

- - - -Represents a pgBackRest repository that is created using Azure storage - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
containerstringThe Azure container utilized for the repositorytrue
- - -

- PostgresCluster.spec.backups.pgbackrest.repos[index].gcs - ↩ Parent -

- - - -Represents a pgBackRest repository that is created using Google Cloud Storage - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
bucketstringThe GCS bucket utilized for the repositorytrue
- - -

- PostgresCluster.spec.backups.pgbackrest.repos[index].s3 - ↩ Parent -

- - - -RepoS3 represents a pgBackRest repository that is created using AWS S3 (or S3-compatible) storage - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
bucketstringThe S3 bucket utilized for the repositorytrue
endpointstringA valid endpoint corresponding to the specified regiontrue
regionstringThe region corresponding to the S3 buckettrue
- - -

- PostgresCluster.spec.backups.pgbackrest.repos[index].schedules - ↩ Parent -

- - - -Defines the schedules for the pgBackRest backups Full, Differential and Incremental backup types are supported: https://pgbackrest.org/user-guide.html#concept/backup - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
differentialstringDefines the Cron schedule for a differential pgBackRest backup. Follows the standard Cron schedule syntax: https://k8s.io/docs/concepts/workloads/controllers/cron-jobs/#cron-schedule-syntaxfalse
fullstringDefines the Cron schedule for a full pgBackRest backup. Follows the standard Cron schedule syntax: https://k8s.io/docs/concepts/workloads/controllers/cron-jobs/#cron-schedule-syntaxfalse
incrementalstringDefines the Cron schedule for an incremental pgBackRest backup. Follows the standard Cron schedule syntax: https://k8s.io/docs/concepts/workloads/controllers/cron-jobs/#cron-schedule-syntaxfalse
- - -

- PostgresCluster.spec.backups.pgbackrest.repos[index].volume - ↩ Parent -

- - - -Represents a pgBackRest repository that is created using a PersistentVolumeClaim - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
volumeClaimSpecobjectDefines a PersistentVolumeClaim spec used to create and/or bind a volumetrue
- - -

- PostgresCluster.spec.backups.pgbackrest.repos[index].volume.volumeClaimSpec - ↩ Parent -

- - - -Defines a PersistentVolumeClaim spec used to create and/or bind a volume - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
accessModes[]stringaccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1true
resourcesobjectresources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resourcestrue
dataSourceobjectdataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field.false
dataSourceRefobjectdataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.false
selectorobjectselector is a label query over volumes to consider for binding.false
storageClassNamestringstorageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1false
volumeModestringvolumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.false
volumeNamestringvolumeName is the binding reference to the PersistentVolume backing this claim.false
- - -

- PostgresCluster.spec.backups.pgbackrest.repos[index].volume.volumeClaimSpec.resources - ↩ Parent -

- - - -resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
requestsmap[string]int or stringRequests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/true
limitsmap[string]int or stringLimits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
- - -

- PostgresCluster.spec.backups.pgbackrest.repos[index].volume.volumeClaimSpec.dataSource - ↩ Parent -

- - - -dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
kindstringKind is the type of resource being referencedtrue
namestringName is the name of resource being referencedtrue
apiGroupstringAPIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.false
- - -

- PostgresCluster.spec.backups.pgbackrest.repos[index].volume.volumeClaimSpec.dataSourceRef - ↩ Parent -

- - - -dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
kindstringKind is the type of resource being referencedtrue
namestringName is the name of resource being referencedtrue
apiGroupstringAPIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.false
- - -

- PostgresCluster.spec.backups.pgbackrest.repos[index].volume.volumeClaimSpec.selector - ↩ Parent -

- - - -selector is a label query over volumes to consider for binding. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.backups.pgbackrest.repos[index].volume.volumeClaimSpec.selector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.backups.pgbackrest.configuration[index] - ↩ Parent -

- - - -Projection that may be projected along with other supported volume types - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
configMapobjectconfigMap information about the configMap data to projectfalse
downwardAPIobjectdownwardAPI information about the downwardAPI data to projectfalse
secretobjectsecret information about the secret data to projectfalse
serviceAccountTokenobjectserviceAccountToken is information about the serviceAccountToken data to projectfalse
- - -

- PostgresCluster.spec.backups.pgbackrest.configuration[index].configMap - ↩ Parent -

- - - -configMap information about the configMap data to project - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
items[]objectitems if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.false
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#namesfalse
optionalbooleanoptional specify whether the ConfigMap or its keys must be definedfalse
- - -

- PostgresCluster.spec.backups.pgbackrest.configuration[index].configMap.items[index] - ↩ Parent -

- - - -Maps a string key to a path within a volume. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the key to project.true
pathstringpath is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.true
modeintegermode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.false
- - -

- PostgresCluster.spec.backups.pgbackrest.configuration[index].downwardAPI - ↩ Parent -

- - - -downwardAPI information about the downwardAPI data to project - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
items[]objectItems is a list of DownwardAPIVolume filefalse
- - -

- PostgresCluster.spec.backups.pgbackrest.configuration[index].downwardAPI.items[index] - ↩ Parent -

- - - -DownwardAPIVolumeFile represents information to create the file containing the pod field - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
pathstringRequired: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'true
fieldRefobjectRequired: Selects a field of the pod: only annotations, labels, name and namespace are supported.false
modeintegerOptional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.false
resourceFieldRefobjectSelects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.false
- - -

- PostgresCluster.spec.backups.pgbackrest.configuration[index].downwardAPI.items[index].fieldRef - ↩ Parent -

- - - -Required: Selects a field of the pod: only annotations, labels, name and namespace are supported. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
fieldPathstringPath of the field to select in the specified API version.true
apiVersionstringVersion of the schema the FieldPath is written in terms of, defaults to "v1".false
- - -

- PostgresCluster.spec.backups.pgbackrest.configuration[index].downwardAPI.items[index].resourceFieldRef - ↩ Parent -

- - - -Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
resourcestringRequired: resource to selecttrue
containerNamestringContainer name: required for volumes, optional for env varsfalse
divisorint or stringSpecifies the output format of the exposed resources, defaults to "1"false
- - -

- PostgresCluster.spec.backups.pgbackrest.configuration[index].secret - ↩ Parent -

- - - -secret information about the secret data to project - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
items[]objectitems if unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.false
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#namesfalse
optionalbooleanoptional field specify whether the Secret or its key must be definedfalse
- - -

- PostgresCluster.spec.backups.pgbackrest.configuration[index].secret.items[index] - ↩ Parent -

- - - -Maps a string key to a path within a volume. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the key to project.true
pathstringpath is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.true
modeintegermode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.false
- - -

- PostgresCluster.spec.backups.pgbackrest.configuration[index].serviceAccountToken - ↩ Parent -

- - - -serviceAccountToken is information about the serviceAccountToken data to project - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
pathstringpath is the path relative to the mount point of the file to project the token into.true
audiencestringaudience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver.false
expirationSecondsintegerexpirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes.false
- - -

- PostgresCluster.spec.backups.pgbackrest.jobs - ↩ Parent -

- - - -Jobs field allows configuration for all backup jobs - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
affinityobjectScheduling constraints of pgBackRest backup Job pods. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-nodefalse
priorityClassNamestringPriority class name for the pgBackRest backup Job pods. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/false
resourcesobjectResource limits for backup jobs. Includes manual, scheduled and replica create backupsfalse
tolerations[]objectTolerations of pgBackRest backup Job pods. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-tolerationfalse
ttlSecondsAfterFinishedintegerLimit the lifetime of a Job that has finished. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobfalse
- - -

- PostgresCluster.spec.backups.pgbackrest.jobs.affinity - ↩ Parent -

- - - -Scheduling constraints of pgBackRest backup Job pods. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
nodeAffinityobjectDescribes node affinity scheduling rules for the pod.false
podAffinityobjectDescribes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).false
podAntiAffinityobjectDescribes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).false
- - -

- PostgresCluster.spec.backups.pgbackrest.jobs.affinity.nodeAffinity - ↩ Parent -

- - - -Describes node affinity scheduling rules for the pod. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecutionobjectIf the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.false
- - -

- PostgresCluster.spec.backups.pgbackrest.jobs.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
preferenceobjectA node selector term, associated with the corresponding weight.true
weightintegerWeight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.true
- - -

- PostgresCluster.spec.backups.pgbackrest.jobs.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference - ↩ Parent -

- - - -A node selector term, associated with the corresponding weight. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectA list of node selector requirements by node's labels.false
matchFields[]objectA list of node selector requirements by node's fields.false
- - -

- PostgresCluster.spec.backups.pgbackrest.jobs.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference.matchExpressions[index] - ↩ Parent -

- - - -A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.backups.pgbackrest.jobs.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference.matchFields[index] - ↩ Parent -

- - - -A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.backups.pgbackrest.jobs.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution - ↩ Parent -

- - - -If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
nodeSelectorTerms[]objectRequired. A list of node selector terms. The terms are ORed.true
- - -

- PostgresCluster.spec.backups.pgbackrest.jobs.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index] - ↩ Parent -

- - - -A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectA list of node selector requirements by node's labels.false
matchFields[]objectA list of node selector requirements by node's fields.false
- - -

- PostgresCluster.spec.backups.pgbackrest.jobs.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index].matchExpressions[index] - ↩ Parent -

- - - -A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.backups.pgbackrest.jobs.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index].matchFields[index] - ↩ Parent -

- - - -A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.backups.pgbackrest.jobs.affinity.podAffinity - ↩ Parent -

- - - -Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecution[]objectIf the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.false
- - -

- PostgresCluster.spec.backups.pgbackrest.jobs.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
podAffinityTermobjectRequired. A pod affinity term, associated with the corresponding weight.true
weightintegerweight associated with matching the corresponding podAffinityTerm, in the range 1-100.true
- - -

- PostgresCluster.spec.backups.pgbackrest.jobs.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm - ↩ Parent -

- - - -Required. A pod affinity term, associated with the corresponding weight. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
- - -

- PostgresCluster.spec.backups.pgbackrest.jobs.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector - ↩ Parent -

- - - -A label query over a set of resources, in this case pods. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.backups.pgbackrest.jobs.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.backups.pgbackrest.jobs.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector - ↩ Parent -

- - - -A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.backups.pgbackrest.jobs.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.backups.pgbackrest.jobs.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
- - -

- PostgresCluster.spec.backups.pgbackrest.jobs.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector - ↩ Parent -

- - - -A label query over a set of resources, in this case pods. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.backups.pgbackrest.jobs.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.backups.pgbackrest.jobs.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector - ↩ Parent -

- - - -A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.backups.pgbackrest.jobs.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.backups.pgbackrest.jobs.affinity.podAntiAffinity - ↩ Parent -

- - - -Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecution[]objectIf the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.false
- - -

- PostgresCluster.spec.backups.pgbackrest.jobs.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
podAffinityTermobjectRequired. A pod affinity term, associated with the corresponding weight.true
weightintegerweight associated with matching the corresponding podAffinityTerm, in the range 1-100.true
- - -

- PostgresCluster.spec.backups.pgbackrest.jobs.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm - ↩ Parent -

- - - -Required. A pod affinity term, associated with the corresponding weight. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
- - -

- PostgresCluster.spec.backups.pgbackrest.jobs.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector - ↩ Parent -

- - - -A label query over a set of resources, in this case pods. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.backups.pgbackrest.jobs.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.backups.pgbackrest.jobs.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector - ↩ Parent -

- - - -A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.backups.pgbackrest.jobs.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.backups.pgbackrest.jobs.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
- - -

- PostgresCluster.spec.backups.pgbackrest.jobs.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector - ↩ Parent -

- - - -A label query over a set of resources, in this case pods. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.backups.pgbackrest.jobs.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.backups.pgbackrest.jobs.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector - ↩ Parent -

- - - -A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.backups.pgbackrest.jobs.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.backups.pgbackrest.jobs.resources - ↩ Parent -

- - - -Resource limits for backup jobs. Includes manual, scheduled and replica create backups - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
limitsmap[string]int or stringLimits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
requestsmap[string]int or stringRequests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
- - -

- PostgresCluster.spec.backups.pgbackrest.jobs.tolerations[index] - ↩ Parent -

- - - -The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
effectstringEffect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.false
keystringKey is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.false
operatorstringOperator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.false
tolerationSecondsintegerTolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.false
valuestringValue is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.false
- - -

- PostgresCluster.spec.backups.pgbackrest.manual - ↩ Parent -

- - - -Defines details for manual pgBackRest backup Jobs - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
repoNamestringThe name of the pgBackRest repo to run the backup command against.true
options[]stringCommand line options to include when running the pgBackRest backup command. https://pgbackrest.org/command.html#command-backupfalse
- - -

- PostgresCluster.spec.backups.pgbackrest.metadata - ↩ Parent -

- - - -Metadata contains metadata for custom resources - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
annotationsmap[string]stringfalse
labelsmap[string]stringfalse
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost - ↩ Parent -

- - - -Defines configuration for a pgBackRest dedicated repository host. This section is only applicable if at least one "volume" (i.e. PVC-based) repository is defined in the "repos" section, therefore enabling a dedicated repository host Deployment. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
affinityobjectScheduling constraints of the Dedicated repo host pod. Changing this value causes repo host to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-nodefalse
priorityClassNamestringPriority class name for the pgBackRest repo host pod. Changing this value causes PostgreSQL to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/false
resourcesobjectResource requirements for a pgBackRest repository hostfalse
sshConfigMapobjectConfigMap containing custom SSH configuration. Deprecated: Repository hosts use mTLS for encryption, authentication, and authorization.false
sshSecretobjectSecret containing custom SSH keys. Deprecated: Repository hosts use mTLS for encryption, authentication, and authorization.false
tolerations[]objectTolerations of a PgBackRest repo host pod. Changing this value causes a restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-tolerationfalse
topologySpreadConstraints[]objectTopology spread constraints of a Dedicated repo host pod. Changing this value causes the repo host to restart. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/false
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.affinity - ↩ Parent -

- - - -Scheduling constraints of the Dedicated repo host pod. Changing this value causes repo host to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
nodeAffinityobjectDescribes node affinity scheduling rules for the pod.false
podAffinityobjectDescribes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).false
podAntiAffinityobjectDescribes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).false
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.affinity.nodeAffinity - ↩ Parent -

- - - -Describes node affinity scheduling rules for the pod. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecutionobjectIf the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.false
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
preferenceobjectA node selector term, associated with the corresponding weight.true
weightintegerWeight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.true
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference - ↩ Parent -

- - - -A node selector term, associated with the corresponding weight. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectA list of node selector requirements by node's labels.false
matchFields[]objectA list of node selector requirements by node's fields.false
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference.matchExpressions[index] - ↩ Parent -

- - - -A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference.matchFields[index] - ↩ Parent -

- - - -A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution - ↩ Parent -

- - - -If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
nodeSelectorTerms[]objectRequired. A list of node selector terms. The terms are ORed.true
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index] - ↩ Parent -

- - - -A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectA list of node selector requirements by node's labels.false
matchFields[]objectA list of node selector requirements by node's fields.false
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index].matchExpressions[index] - ↩ Parent -

- - - -A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index].matchFields[index] - ↩ Parent -

- - - -A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.affinity.podAffinity - ↩ Parent -

- - - -Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecution[]objectIf the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.false
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
podAffinityTermobjectRequired. A pod affinity term, associated with the corresponding weight.true
weightintegerweight associated with matching the corresponding podAffinityTerm, in the range 1-100.true
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm - ↩ Parent -

- - - -Required. A pod affinity term, associated with the corresponding weight. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector - ↩ Parent -

- - - -A label query over a set of resources, in this case pods. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector - ↩ Parent -

- - - -A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector - ↩ Parent -

- - - -A label query over a set of resources, in this case pods. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector - ↩ Parent -

- - - -A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.affinity.podAntiAffinity - ↩ Parent -

- - - -Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecution[]objectIf the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.false
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
podAffinityTermobjectRequired. A pod affinity term, associated with the corresponding weight.true
weightintegerweight associated with matching the corresponding podAffinityTerm, in the range 1-100.true
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm - ↩ Parent -

- - - -Required. A pod affinity term, associated with the corresponding weight. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector - ↩ Parent -

- - - -A label query over a set of resources, in this case pods. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector - ↩ Parent -

- - - -A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector - ↩ Parent -

- - - -A label query over a set of resources, in this case pods. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector - ↩ Parent -

- - - -A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.resources - ↩ Parent -

- - - -Resource requirements for a pgBackRest repository host - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
limitsmap[string]int or stringLimits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
requestsmap[string]int or stringRequests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.sshConfigMap - ↩ Parent -

- - - -ConfigMap containing custom SSH configuration. Deprecated: Repository hosts use mTLS for encryption, authentication, and authorization. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
items[]objectitems if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.false
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#namesfalse
optionalbooleanoptional specify whether the ConfigMap or its keys must be definedfalse
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.sshConfigMap.items[index] - ↩ Parent -

- - - -Maps a string key to a path within a volume. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the key to project.true
pathstringpath is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.true
modeintegermode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.false
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.sshSecret - ↩ Parent -

- - - -Secret containing custom SSH keys. Deprecated: Repository hosts use mTLS for encryption, authentication, and authorization. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
items[]objectitems if unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.false
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#namesfalse
optionalbooleanoptional field specify whether the Secret or its key must be definedfalse
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.sshSecret.items[index] - ↩ Parent -

- - - -Maps a string key to a path within a volume. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the key to project.true
pathstringpath is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.true
modeintegermode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.false
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.tolerations[index] - ↩ Parent -

- - - -The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
effectstringEffect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.false
keystringKey is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.false
operatorstringOperator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.false
tolerationSecondsintegerTolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.false
valuestringValue is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.false
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.topologySpreadConstraints[index] - ↩ Parent -

- - - -TopologySpreadConstraint specifies how to spread matching pods among the given topology. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
maxSkewintegerMaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. The global minimum is the minimum number of matching pods in an eligible domain or zero if the number of eligible domains is less than MinDomains. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 2/2/1: In this case, the global minimum is 1. | zone1 | zone2 | zone3 | | P P | P P | P | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It's a required field. Default value is 1 and 0 is not allowed.true
topologyKeystringTopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes match the node selector. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field.true
whenUnsatisfiablestringWhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assignment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won't make it *more* imbalanced. It's a required field.true
labelSelectorobjectLabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain.false
minDomainsintegerMinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule. - For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew. - This is an alpha field and requires enabling MinDomainsInPodTopologySpread feature gate.false
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.topologySpreadConstraints[index].labelSelector - ↩ Parent -

- - - -LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.backups.pgbackrest.repoHost.topologySpreadConstraints[index].labelSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.backups.pgbackrest.restore - ↩ Parent -

- - - -Defines details for performing an in-place restore using pgBackRest - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
enabledbooleanWhether or not in-place pgBackRest restores are enabled for this PostgresCluster.true
repoNamestringThe name of the pgBackRest repo within the source PostgresCluster that contains the backups that should be utilized to perform a pgBackRest restore when initializing the data source for the new PostgresCluster.true
affinityobjectScheduling constraints of the pgBackRest restore Job. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-nodefalse
clusterNamestringThe name of an existing PostgresCluster to use as the data source for the new PostgresCluster. Defaults to the name of the PostgresCluster being created if not provided.false
clusterNamespacestringThe namespace of the cluster specified as the data source using the clusterName field. Defaults to the namespace of the PostgresCluster being created if not provided.false
options[]stringCommand line options to include when running the pgBackRest restore command. https://pgbackrest.org/command.html#command-restorefalse
priorityClassNamestringPriority class name for the pgBackRest restore Job pod. Changing this value causes PostgreSQL to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/false
resourcesobjectResource requirements for the pgBackRest restore Job.false
tolerations[]objectTolerations of the pgBackRest restore Job. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-tolerationfalse
- - -

- PostgresCluster.spec.backups.pgbackrest.restore.affinity - ↩ Parent -

- - - -Scheduling constraints of the pgBackRest restore Job. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
nodeAffinityobjectDescribes node affinity scheduling rules for the pod.false
podAffinityobjectDescribes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).false
podAntiAffinityobjectDescribes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).false
- - -

- PostgresCluster.spec.backups.pgbackrest.restore.affinity.nodeAffinity - ↩ Parent -

- - - -Describes node affinity scheduling rules for the pod. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecutionobjectIf the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.false
- - -

- PostgresCluster.spec.backups.pgbackrest.restore.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
preferenceobjectA node selector term, associated with the corresponding weight.true
weightintegerWeight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.true
- - -

- PostgresCluster.spec.backups.pgbackrest.restore.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference - ↩ Parent -

- - - -A node selector term, associated with the corresponding weight. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectA list of node selector requirements by node's labels.false
matchFields[]objectA list of node selector requirements by node's fields.false
- - -

- PostgresCluster.spec.backups.pgbackrest.restore.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference.matchExpressions[index] - ↩ Parent -

- - - -A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.backups.pgbackrest.restore.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference.matchFields[index] - ↩ Parent -

- - - -A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.backups.pgbackrest.restore.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution - ↩ Parent -

- - - -If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
nodeSelectorTerms[]objectRequired. A list of node selector terms. The terms are ORed.true
- - -

- PostgresCluster.spec.backups.pgbackrest.restore.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index] - ↩ Parent -

- - - -A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectA list of node selector requirements by node's labels.false
matchFields[]objectA list of node selector requirements by node's fields.false
- - -

- PostgresCluster.spec.backups.pgbackrest.restore.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index].matchExpressions[index] - ↩ Parent -

- - - -A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.backups.pgbackrest.restore.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index].matchFields[index] - ↩ Parent -

- - - -A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.backups.pgbackrest.restore.affinity.podAffinity - ↩ Parent -

- - - -Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecution[]objectIf the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.false
- - -

- PostgresCluster.spec.backups.pgbackrest.restore.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
podAffinityTermobjectRequired. A pod affinity term, associated with the corresponding weight.true
weightintegerweight associated with matching the corresponding podAffinityTerm, in the range 1-100.true
- - -

- PostgresCluster.spec.backups.pgbackrest.restore.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm - ↩ Parent -

- - - -Required. A pod affinity term, associated with the corresponding weight. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
- - -

- PostgresCluster.spec.backups.pgbackrest.restore.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector - ↩ Parent -

- - - -A label query over a set of resources, in this case pods. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.backups.pgbackrest.restore.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.backups.pgbackrest.restore.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector - ↩ Parent -

- - - -A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.backups.pgbackrest.restore.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.backups.pgbackrest.restore.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
- - -

- PostgresCluster.spec.backups.pgbackrest.restore.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector - ↩ Parent -

- - - -A label query over a set of resources, in this case pods. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.backups.pgbackrest.restore.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.backups.pgbackrest.restore.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector - ↩ Parent -

- - - -A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.backups.pgbackrest.restore.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.backups.pgbackrest.restore.affinity.podAntiAffinity - ↩ Parent -

- - - -Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecution[]objectIf the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.false
- - -

- PostgresCluster.spec.backups.pgbackrest.restore.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
podAffinityTermobjectRequired. A pod affinity term, associated with the corresponding weight.true
weightintegerweight associated with matching the corresponding podAffinityTerm, in the range 1-100.true
- - -

- PostgresCluster.spec.backups.pgbackrest.restore.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm - ↩ Parent -

- - - -Required. A pod affinity term, associated with the corresponding weight. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
- - -

- PostgresCluster.spec.backups.pgbackrest.restore.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector - ↩ Parent -

- - - -A label query over a set of resources, in this case pods. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.backups.pgbackrest.restore.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.backups.pgbackrest.restore.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector - ↩ Parent -

- - - -A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.backups.pgbackrest.restore.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.backups.pgbackrest.restore.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
- - -

- PostgresCluster.spec.backups.pgbackrest.restore.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector - ↩ Parent -

- - - -A label query over a set of resources, in this case pods. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.backups.pgbackrest.restore.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.backups.pgbackrest.restore.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector - ↩ Parent -

- - - -A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.backups.pgbackrest.restore.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.backups.pgbackrest.restore.resources - ↩ Parent -

- - - -Resource requirements for the pgBackRest restore Job. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
limitsmap[string]int or stringLimits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
requestsmap[string]int or stringRequests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
- - -

- PostgresCluster.spec.backups.pgbackrest.restore.tolerations[index] - ↩ Parent -

- - - -The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
effectstringEffect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.false
keystringKey is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.false
operatorstringOperator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.false
tolerationSecondsintegerTolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.false
valuestringValue is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.false
- - -

- PostgresCluster.spec.backups.pgbackrest.sidecars - ↩ Parent -

- - - -Configuration for pgBackRest sidecar containers - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
pgbackrestobjectDefines the configuration for the pgBackRest sidecar containerfalse
pgbackrestConfigobjectDefines the configuration for the pgBackRest config sidecar containerfalse
- - -

- PostgresCluster.spec.backups.pgbackrest.sidecars.pgbackrest - ↩ Parent -

- - - -Defines the configuration for the pgBackRest sidecar container - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
resourcesobjectResource requirements for a sidecar containerfalse
- - -

- PostgresCluster.spec.backups.pgbackrest.sidecars.pgbackrest.resources - ↩ Parent -

- - - -Resource requirements for a sidecar container - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
limitsmap[string]int or stringLimits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
requestsmap[string]int or stringRequests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
- - -

- PostgresCluster.spec.backups.pgbackrest.sidecars.pgbackrestConfig - ↩ Parent -

- - - -Defines the configuration for the pgBackRest config sidecar container - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
resourcesobjectResource requirements for a sidecar containerfalse
- - -

- PostgresCluster.spec.backups.pgbackrest.sidecars.pgbackrestConfig.resources - ↩ Parent -

- - - -Resource requirements for a sidecar container - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
limitsmap[string]int or stringLimits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
requestsmap[string]int or stringRequests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
- - -

- PostgresCluster.spec.instances[index] - ↩ Parent -

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
dataVolumeClaimSpecobjectDefines a PersistentVolumeClaim for PostgreSQL data. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumestrue
affinityobjectScheduling constraints of a PostgreSQL pod. Changing this value causes PostgreSQL to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-nodefalse
containers[]objectCustom sidecars for PostgreSQL instance pods. Changing this value causes PostgreSQL to restart.false
metadataobjectMetadata contains metadata for custom resourcesfalse
minAvailableint or stringMinimum number of pods that should be available at a time. Defaults to one when the replicas field is greater than one.false
namestringName that associates this set of PostgreSQL pods. This field is optional when only one instance set is defined. Each instance set in a cluster must have a unique name. The combined length of this and the cluster name must be 46 characters or less.false
priorityClassNamestringPriority class name for the PostgreSQL pod. Changing this value causes PostgreSQL to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/false
replicasintegerNumber of desired PostgreSQL pods.false
resourcesobjectCompute resources of a PostgreSQL container.false
sidecarsobjectConfiguration for instance sidecar containersfalse
tablespaceVolumes[]objectThe list of tablespaces volumes to mount for this postgrescluster This field requires enabling TablespaceVolumes feature gatefalse
tolerations[]objectTolerations of a PostgreSQL pod. Changing this value causes PostgreSQL to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-tolerationfalse
topologySpreadConstraints[]objectTopology spread constraints of a PostgreSQL pod. Changing this value causes PostgreSQL to restart. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/false
walVolumeClaimSpecobjectDefines a separate PersistentVolumeClaim for PostgreSQL's write-ahead log. More info: https://www.postgresql.org/docs/current/wal.htmlfalse
- - -

- PostgresCluster.spec.instances[index].dataVolumeClaimSpec - ↩ Parent -

- - - -Defines a PersistentVolumeClaim for PostgreSQL data. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
accessModes[]stringaccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1true
resourcesobjectresources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resourcestrue
dataSourceobjectdataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field.false
dataSourceRefobjectdataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.false
selectorobjectselector is a label query over volumes to consider for binding.false
storageClassNamestringstorageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1false
volumeModestringvolumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.false
volumeNamestringvolumeName is the binding reference to the PersistentVolume backing this claim.false
- - -

- PostgresCluster.spec.instances[index].dataVolumeClaimSpec.resources - ↩ Parent -

- - - -resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
requestsmap[string]int or stringRequests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/true
limitsmap[string]int or stringLimits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
- - -

- PostgresCluster.spec.instances[index].dataVolumeClaimSpec.dataSource - ↩ Parent -

- - - -dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
kindstringKind is the type of resource being referencedtrue
namestringName is the name of resource being referencedtrue
apiGroupstringAPIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.false
- - -

- PostgresCluster.spec.instances[index].dataVolumeClaimSpec.dataSourceRef - ↩ Parent -

- - - -dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
kindstringKind is the type of resource being referencedtrue
namestringName is the name of resource being referencedtrue
apiGroupstringAPIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.false
- - -

- PostgresCluster.spec.instances[index].dataVolumeClaimSpec.selector - ↩ Parent -

- - - -selector is a label query over volumes to consider for binding. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.instances[index].dataVolumeClaimSpec.selector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.instances[index].affinity - ↩ Parent -

- - - -Scheduling constraints of a PostgreSQL pod. Changing this value causes PostgreSQL to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
nodeAffinityobjectDescribes node affinity scheduling rules for the pod.false
podAffinityobjectDescribes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).false
podAntiAffinityobjectDescribes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).false
- - -

- PostgresCluster.spec.instances[index].affinity.nodeAffinity - ↩ Parent -

- - - -Describes node affinity scheduling rules for the pod. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecutionobjectIf the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.false
- - -

- PostgresCluster.spec.instances[index].affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
preferenceobjectA node selector term, associated with the corresponding weight.true
weightintegerWeight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.true
- - -

- PostgresCluster.spec.instances[index].affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference - ↩ Parent -

- - - -A node selector term, associated with the corresponding weight. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectA list of node selector requirements by node's labels.false
matchFields[]objectA list of node selector requirements by node's fields.false
- - -

- PostgresCluster.spec.instances[index].affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference.matchExpressions[index] - ↩ Parent -

- - - -A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.instances[index].affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference.matchFields[index] - ↩ Parent -

- - - -A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.instances[index].affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution - ↩ Parent -

- - - -If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
nodeSelectorTerms[]objectRequired. A list of node selector terms. The terms are ORed.true
- - -

- PostgresCluster.spec.instances[index].affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index] - ↩ Parent -

- - - -A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectA list of node selector requirements by node's labels.false
matchFields[]objectA list of node selector requirements by node's fields.false
- - -

- PostgresCluster.spec.instances[index].affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index].matchExpressions[index] - ↩ Parent -

- - - -A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.instances[index].affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index].matchFields[index] - ↩ Parent -

- - - -A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.instances[index].affinity.podAffinity - ↩ Parent -

- - - -Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecution[]objectIf the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.false
- - -

- PostgresCluster.spec.instances[index].affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
podAffinityTermobjectRequired. A pod affinity term, associated with the corresponding weight.true
weightintegerweight associated with matching the corresponding podAffinityTerm, in the range 1-100.true
- - -

- PostgresCluster.spec.instances[index].affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm - ↩ Parent -

- - - -Required. A pod affinity term, associated with the corresponding weight. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
- - -

- PostgresCluster.spec.instances[index].affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector - ↩ Parent -

- - - -A label query over a set of resources, in this case pods. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.instances[index].affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.instances[index].affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector - ↩ Parent -

- - - -A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.instances[index].affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.instances[index].affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
- - -

- PostgresCluster.spec.instances[index].affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector - ↩ Parent -

- - - -A label query over a set of resources, in this case pods. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.instances[index].affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.instances[index].affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector - ↩ Parent -

- - - -A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.instances[index].affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.instances[index].affinity.podAntiAffinity - ↩ Parent -

- - - -Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecution[]objectIf the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.false
- - -

- PostgresCluster.spec.instances[index].affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
podAffinityTermobjectRequired. A pod affinity term, associated with the corresponding weight.true
weightintegerweight associated with matching the corresponding podAffinityTerm, in the range 1-100.true
- - -

- PostgresCluster.spec.instances[index].affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm - ↩ Parent -

- - - -Required. A pod affinity term, associated with the corresponding weight. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
- - -

- PostgresCluster.spec.instances[index].affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector - ↩ Parent -

- - - -A label query over a set of resources, in this case pods. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.instances[index].affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.instances[index].affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector - ↩ Parent -

- - - -A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.instances[index].affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.instances[index].affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
- - -

- PostgresCluster.spec.instances[index].affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector - ↩ Parent -

- - - -A label query over a set of resources, in this case pods. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.instances[index].affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.instances[index].affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector - ↩ Parent -

- - - -A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.instances[index].affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.instances[index].containers[index] - ↩ Parent -

- - - -A single application container that you want to run within a pod. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
namestringName of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.true
args[]stringArguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shellfalse
command[]stringEntrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shellfalse
env[]objectList of environment variables to set in the container. Cannot be updated.false
envFrom[]objectList of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.false
imagestringContainer image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.false
imagePullPolicystringImage pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-imagesfalse
lifecycleobjectActions that the management system should take in response to container lifecycle events. Cannot be updated.false
livenessProbeobjectPeriodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probesfalse
ports[]objectList of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated.false
readinessProbeobjectPeriodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probesfalse
resourcesobjectCompute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
securityContextobjectSecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/false
startupProbeobjectStartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probesfalse
stdinbooleanWhether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.false
stdinOncebooleanWhether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is falsefalse
terminationMessagePathstringOptional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.false
terminationMessagePolicystringIndicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.false
ttybooleanWhether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.false
volumeDevices[]objectvolumeDevices is the list of block devices to be used by the container.false
volumeMounts[]objectPod volumes to mount into the container's filesystem. Cannot be updated.false
workingDirstringContainer's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.false
- - -

- PostgresCluster.spec.instances[index].containers[index].env[index] - ↩ Parent -

- - - -EnvVar represents an environment variable present in a Container. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
namestringName of the environment variable. Must be a C_IDENTIFIER.true
valuestringVariable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".false
valueFromobjectSource for the environment variable's value. Cannot be used if value is not empty.false
- - -

- PostgresCluster.spec.instances[index].containers[index].env[index].valueFrom - ↩ Parent -

- - - -Source for the environment variable's value. Cannot be used if value is not empty. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
configMapKeyRefobjectSelects a key of a ConfigMap.false
fieldRefobjectSelects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.false
resourceFieldRefobjectSelects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.false
secretKeyRefobjectSelects a key of a secret in the pod's namespacefalse
- - -

- PostgresCluster.spec.instances[index].containers[index].env[index].valueFrom.configMapKeyRef - ↩ Parent -

- - - -Selects a key of a ConfigMap. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringThe key to select.true
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#namesfalse
optionalbooleanSpecify whether the ConfigMap or its key must be definedfalse
- - -

- PostgresCluster.spec.instances[index].containers[index].env[index].valueFrom.fieldRef - ↩ Parent -

- - - -Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
fieldPathstringPath of the field to select in the specified API version.true
apiVersionstringVersion of the schema the FieldPath is written in terms of, defaults to "v1".false
- - -

- PostgresCluster.spec.instances[index].containers[index].env[index].valueFrom.resourceFieldRef - ↩ Parent -

- - - -Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
resourcestringRequired: resource to selecttrue
containerNamestringContainer name: required for volumes, optional for env varsfalse
divisorint or stringSpecifies the output format of the exposed resources, defaults to "1"false
- - -

- PostgresCluster.spec.instances[index].containers[index].env[index].valueFrom.secretKeyRef - ↩ Parent -

- - - -Selects a key of a secret in the pod's namespace - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringThe key of the secret to select from. Must be a valid secret key.true
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#namesfalse
optionalbooleanSpecify whether the Secret or its key must be definedfalse
- - -

- PostgresCluster.spec.instances[index].containers[index].envFrom[index] - ↩ Parent -

- - - -EnvFromSource represents the source of a set of ConfigMaps - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
configMapRefobjectThe ConfigMap to select fromfalse
prefixstringAn optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.false
secretRefobjectThe Secret to select fromfalse
- - -

- PostgresCluster.spec.instances[index].containers[index].envFrom[index].configMapRef - ↩ Parent -

- - - -The ConfigMap to select from - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#namesfalse
optionalbooleanSpecify whether the ConfigMap must be definedfalse
- - -

- PostgresCluster.spec.instances[index].containers[index].envFrom[index].secretRef - ↩ Parent -

- - - -The Secret to select from - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#namesfalse
optionalbooleanSpecify whether the Secret must be definedfalse
- - -

- PostgresCluster.spec.instances[index].containers[index].lifecycle - ↩ Parent -

- - - -Actions that the management system should take in response to container lifecycle events. Cannot be updated. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
postStartobjectPostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooksfalse
preStopobjectPreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod's termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooksfalse
- - -

- PostgresCluster.spec.instances[index].containers[index].lifecycle.postStart - ↩ Parent -

- - - -PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
execobjectExec specifies the action to take.false
httpGetobjectHTTPGet specifies the http request to perform.false
tcpSocketobjectDeprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified.false
- - -

- PostgresCluster.spec.instances[index].containers[index].lifecycle.postStart.exec - ↩ Parent -

- - - -Exec specifies the action to take. - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
command[]stringCommand is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.false
- - -

- PostgresCluster.spec.instances[index].containers[index].lifecycle.postStart.httpGet - ↩ Parent -

- - - -HTTPGet specifies the http request to perform. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
portint or stringName or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.true
hoststringHost name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.false
httpHeaders[]objectCustom headers to set in the request. HTTP allows repeated headers.false
pathstringPath to access on the HTTP server.false
schemestringScheme to use for connecting to the host. Defaults to HTTP.false
- - -

- PostgresCluster.spec.instances[index].containers[index].lifecycle.postStart.httpGet.httpHeaders[index] - ↩ Parent -

- - - -HTTPHeader describes a custom header to be used in HTTP probes - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
namestringThe header field nametrue
valuestringThe header field valuetrue
- - -

- PostgresCluster.spec.instances[index].containers[index].lifecycle.postStart.tcpSocket - ↩ Parent -

- - - -Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
portint or stringNumber or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.true
hoststringOptional: Host name to connect to, defaults to the pod IP.false
- - -

- PostgresCluster.spec.instances[index].containers[index].lifecycle.preStop - ↩ Parent -

- - - -PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod's termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
execobjectExec specifies the action to take.false
httpGetobjectHTTPGet specifies the http request to perform.false
tcpSocketobjectDeprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified.false
- - -

- PostgresCluster.spec.instances[index].containers[index].lifecycle.preStop.exec - ↩ Parent -

- - - -Exec specifies the action to take. - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
command[]stringCommand is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.false
- - -

- PostgresCluster.spec.instances[index].containers[index].lifecycle.preStop.httpGet - ↩ Parent -

- - - -HTTPGet specifies the http request to perform. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
portint or stringName or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.true
hoststringHost name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.false
httpHeaders[]objectCustom headers to set in the request. HTTP allows repeated headers.false
pathstringPath to access on the HTTP server.false
schemestringScheme to use for connecting to the host. Defaults to HTTP.false
- - -

- PostgresCluster.spec.instances[index].containers[index].lifecycle.preStop.httpGet.httpHeaders[index] - ↩ Parent -

- - - -HTTPHeader describes a custom header to be used in HTTP probes - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
namestringThe header field nametrue
valuestringThe header field valuetrue
- - -

- PostgresCluster.spec.instances[index].containers[index].lifecycle.preStop.tcpSocket - ↩ Parent -

- - - -Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
portint or stringNumber or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.true
hoststringOptional: Host name to connect to, defaults to the pod IP.false
- - -

- PostgresCluster.spec.instances[index].containers[index].livenessProbe - ↩ Parent -

- - - -Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
execobjectExec specifies the action to take.false
failureThresholdintegerMinimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.false
grpcobjectGRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate.false
httpGetobjectHTTPGet specifies the http request to perform.false
initialDelaySecondsintegerNumber of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probesfalse
periodSecondsintegerHow often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.false
successThresholdintegerMinimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.false
tcpSocketobjectTCPSocket specifies an action involving a TCP port.false
terminationGracePeriodSecondsintegerOptional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.false
timeoutSecondsintegerNumber of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probesfalse
- - -

- PostgresCluster.spec.instances[index].containers[index].livenessProbe.exec - ↩ Parent -

- - - -Exec specifies the action to take. - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
command[]stringCommand is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.false
- - -

- PostgresCluster.spec.instances[index].containers[index].livenessProbe.grpc - ↩ Parent -

- - - -GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
portintegerPort number of the gRPC service. Number must be in the range 1 to 65535.true
servicestringService is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - If this is not specified, the default behavior is defined by gRPC.false
- - -

- PostgresCluster.spec.instances[index].containers[index].livenessProbe.httpGet - ↩ Parent -

- - - -HTTPGet specifies the http request to perform. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
portint or stringName or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.true
hoststringHost name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.false
httpHeaders[]objectCustom headers to set in the request. HTTP allows repeated headers.false
pathstringPath to access on the HTTP server.false
schemestringScheme to use for connecting to the host. Defaults to HTTP.false
- - -

- PostgresCluster.spec.instances[index].containers[index].livenessProbe.httpGet.httpHeaders[index] - ↩ Parent -

- - - -HTTPHeader describes a custom header to be used in HTTP probes - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
namestringThe header field nametrue
valuestringThe header field valuetrue
- - -

- PostgresCluster.spec.instances[index].containers[index].livenessProbe.tcpSocket - ↩ Parent -

- - - -TCPSocket specifies an action involving a TCP port. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
portint or stringNumber or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.true
hoststringOptional: Host name to connect to, defaults to the pod IP.false
- - -

- PostgresCluster.spec.instances[index].containers[index].ports[index] - ↩ Parent -

- - - -ContainerPort represents a network port in a single container. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
containerPortintegerNumber of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536.true
hostIPstringWhat host IP to bind the external port to.false
hostPortintegerNumber of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this.false
namestringIf specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services.false
protocolstringProtocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP".false
- - -

- PostgresCluster.spec.instances[index].containers[index].readinessProbe - ↩ Parent -

- - - -Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
execobjectExec specifies the action to take.false
failureThresholdintegerMinimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.false
grpcobjectGRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate.false
httpGetobjectHTTPGet specifies the http request to perform.false
initialDelaySecondsintegerNumber of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probesfalse
periodSecondsintegerHow often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.false
successThresholdintegerMinimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.false
tcpSocketobjectTCPSocket specifies an action involving a TCP port.false
terminationGracePeriodSecondsintegerOptional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.false
timeoutSecondsintegerNumber of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probesfalse
- - -

- PostgresCluster.spec.instances[index].containers[index].readinessProbe.exec - ↩ Parent -

- - - -Exec specifies the action to take. - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
command[]stringCommand is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.false
- - -

- PostgresCluster.spec.instances[index].containers[index].readinessProbe.grpc - ↩ Parent -

- - - -GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
portintegerPort number of the gRPC service. Number must be in the range 1 to 65535.true
servicestringService is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - If this is not specified, the default behavior is defined by gRPC.false
- - -

- PostgresCluster.spec.instances[index].containers[index].readinessProbe.httpGet - ↩ Parent -

- - - -HTTPGet specifies the http request to perform. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
portint or stringName or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.true
hoststringHost name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.false
httpHeaders[]objectCustom headers to set in the request. HTTP allows repeated headers.false
pathstringPath to access on the HTTP server.false
schemestringScheme to use for connecting to the host. Defaults to HTTP.false
- - -

- PostgresCluster.spec.instances[index].containers[index].readinessProbe.httpGet.httpHeaders[index] - ↩ Parent -

- - - -HTTPHeader describes a custom header to be used in HTTP probes - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
namestringThe header field nametrue
valuestringThe header field valuetrue
- - -

- PostgresCluster.spec.instances[index].containers[index].readinessProbe.tcpSocket - ↩ Parent -

- - - -TCPSocket specifies an action involving a TCP port. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
portint or stringNumber or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.true
hoststringOptional: Host name to connect to, defaults to the pod IP.false
- - -

- PostgresCluster.spec.instances[index].containers[index].resources - ↩ Parent -

- - - -Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
limitsmap[string]int or stringLimits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
requestsmap[string]int or stringRequests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
- - -

- PostgresCluster.spec.instances[index].containers[index].securityContext - ↩ Parent -

- - - -SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
allowPrivilegeEscalationbooleanAllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows.false
capabilitiesobjectThe capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. Note that this field cannot be set when spec.os.name is windows.false
privilegedbooleanRun container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. Note that this field cannot be set when spec.os.name is windows.false
procMountstringprocMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows.false
readOnlyRootFilesystembooleanWhether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows.false
runAsGroupintegerThe GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows.false
runAsNonRootbooleanIndicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.false
runAsUserintegerThe UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows.false
seLinuxOptionsobjectThe SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows.false
seccompProfileobjectThe seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows.false
windowsOptionsobjectThe Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux.false
- - -

- PostgresCluster.spec.instances[index].containers[index].securityContext.capabilities - ↩ Parent -

- - - -The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. Note that this field cannot be set when spec.os.name is windows. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
add[]stringAdded capabilitiesfalse
drop[]stringRemoved capabilitiesfalse
- - -

- PostgresCluster.spec.instances[index].containers[index].securityContext.seLinuxOptions - ↩ Parent -

- - - -The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
levelstringLevel is SELinux level label that applies to the container.false
rolestringRole is a SELinux role label that applies to the container.false
typestringType is a SELinux type label that applies to the container.false
userstringUser is a SELinux user label that applies to the container.false
- - -

- PostgresCluster.spec.instances[index].containers[index].securityContext.seccompProfile - ↩ Parent -

- - - -The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
typestringtype indicates which kind of seccomp profile will be applied. Valid options are: Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied.true
localhostProfilestringlocalhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is "Localhost".false
- - -

- PostgresCluster.spec.instances[index].containers[index].securityContext.windowsOptions - ↩ Parent -

- - - -The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
gmsaCredentialSpecstringGMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field.false
gmsaCredentialSpecNamestringGMSACredentialSpecName is the name of the GMSA credential spec to use.false
hostProcessbooleanHostProcess determines if a container should be run as a 'Host Process' container. This field is alpha-level and will only be honored by components that enable the WindowsHostProcessContainers feature flag. Setting this field without the feature flag will result in errors when validating the Pod. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.false
runAsUserNamestringThe UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.false
- - -

- PostgresCluster.spec.instances[index].containers[index].startupProbe - ↩ Parent -

- - - -StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
execobjectExec specifies the action to take.false
failureThresholdintegerMinimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.false
grpcobjectGRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate.false
httpGetobjectHTTPGet specifies the http request to perform.false
initialDelaySecondsintegerNumber of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probesfalse
periodSecondsintegerHow often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.false
successThresholdintegerMinimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.false
tcpSocketobjectTCPSocket specifies an action involving a TCP port.false
terminationGracePeriodSecondsintegerOptional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.false
timeoutSecondsintegerNumber of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probesfalse
- - -

- PostgresCluster.spec.instances[index].containers[index].startupProbe.exec - ↩ Parent -

- - - -Exec specifies the action to take. - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
command[]stringCommand is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.false
- - -

- PostgresCluster.spec.instances[index].containers[index].startupProbe.grpc - ↩ Parent -

- - - -GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
portintegerPort number of the gRPC service. Number must be in the range 1 to 65535.true
servicestringService is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - If this is not specified, the default behavior is defined by gRPC.false
- - -

- PostgresCluster.spec.instances[index].containers[index].startupProbe.httpGet - ↩ Parent -

- - - -HTTPGet specifies the http request to perform. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
portint or stringName or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.true
hoststringHost name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.false
httpHeaders[]objectCustom headers to set in the request. HTTP allows repeated headers.false
pathstringPath to access on the HTTP server.false
schemestringScheme to use for connecting to the host. Defaults to HTTP.false
- - -

- PostgresCluster.spec.instances[index].containers[index].startupProbe.httpGet.httpHeaders[index] - ↩ Parent -

- - - -HTTPHeader describes a custom header to be used in HTTP probes - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
namestringThe header field nametrue
valuestringThe header field valuetrue
- - -

- PostgresCluster.spec.instances[index].containers[index].startupProbe.tcpSocket - ↩ Parent -

- - - -TCPSocket specifies an action involving a TCP port. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
portint or stringNumber or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.true
hoststringOptional: Host name to connect to, defaults to the pod IP.false
- - -

- PostgresCluster.spec.instances[index].containers[index].volumeDevices[index] - ↩ Parent -

- - - -volumeDevice describes a mapping of a raw block device within a container. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
devicePathstringdevicePath is the path inside of the container that the device will be mapped to.true
namestringname must match the name of a persistentVolumeClaim in the podtrue
- - -

- PostgresCluster.spec.instances[index].containers[index].volumeMounts[index] - ↩ Parent -

- - - -VolumeMount describes a mounting of a Volume within a container. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
mountPathstringPath within the container at which the volume should be mounted. Must not contain ':'.true
namestringThis must match the Name of a Volume.true
mountPropagationstringmountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.false
readOnlybooleanMounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.false
subPathstringPath within the volume from which the container's volume should be mounted. Defaults to "" (volume's root).false
subPathExprstringExpanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive.false
- - -

- PostgresCluster.spec.instances[index].metadata - ↩ Parent -

- - - -Metadata contains metadata for custom resources - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
annotationsmap[string]stringfalse
labelsmap[string]stringfalse
- - -

- PostgresCluster.spec.instances[index].resources - ↩ Parent -

- - - -Compute resources of a PostgreSQL container. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
limitsmap[string]int or stringLimits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
requestsmap[string]int or stringRequests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
- - -

- PostgresCluster.spec.instances[index].sidecars - ↩ Parent -

- - - -Configuration for instance sidecar containers - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
replicaCertCopyobjectDefines the configuration for the replica cert copy sidecar containerfalse
- - -

- PostgresCluster.spec.instances[index].sidecars.replicaCertCopy - ↩ Parent -

- - - -Defines the configuration for the replica cert copy sidecar container - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
resourcesobjectResource requirements for a sidecar containerfalse
- - -

- PostgresCluster.spec.instances[index].sidecars.replicaCertCopy.resources - ↩ Parent -

- - - -Resource requirements for a sidecar container - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
limitsmap[string]int or stringLimits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
requestsmap[string]int or stringRequests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
- - -

- PostgresCluster.spec.instances[index].tablespaceVolumes[index] - ↩ Parent -

- - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
dataVolumeClaimSpecobjectDefines a PersistentVolumeClaim for a tablespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumestrue
namestringThe name for the tablespace, used as the path name for the volume. Must be unique in the instance set since they become the directory names.true
- - -

- PostgresCluster.spec.instances[index].tablespaceVolumes[index].dataVolumeClaimSpec - ↩ Parent -

- - - -Defines a PersistentVolumeClaim for a tablespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
accessModes[]stringaccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1false
dataSourceobjectdataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field.false
dataSourceRefobjectdataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.false
resourcesobjectresources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resourcesfalse
selectorobjectselector is a label query over volumes to consider for binding.false
storageClassNamestringstorageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1false
volumeModestringvolumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.false
volumeNamestringvolumeName is the binding reference to the PersistentVolume backing this claim.false
- - -

- PostgresCluster.spec.instances[index].tablespaceVolumes[index].dataVolumeClaimSpec.dataSource - ↩ Parent -

- - - -dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
kindstringKind is the type of resource being referencedtrue
namestringName is the name of resource being referencedtrue
apiGroupstringAPIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.false
- - -

- PostgresCluster.spec.instances[index].tablespaceVolumes[index].dataVolumeClaimSpec.dataSourceRef - ↩ Parent -

- - - -dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
kindstringKind is the type of resource being referencedtrue
namestringName is the name of resource being referencedtrue
apiGroupstringAPIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.false
- - -

- PostgresCluster.spec.instances[index].tablespaceVolumes[index].dataVolumeClaimSpec.resources - ↩ Parent -

- - - -resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
limitsmap[string]int or stringLimits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
requestsmap[string]int or stringRequests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
- - -

- PostgresCluster.spec.instances[index].tablespaceVolumes[index].dataVolumeClaimSpec.selector - ↩ Parent -

- - - -selector is a label query over volumes to consider for binding. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.instances[index].tablespaceVolumes[index].dataVolumeClaimSpec.selector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.instances[index].tolerations[index] - ↩ Parent -

- - - -The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
effectstringEffect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.false
keystringKey is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.false
operatorstringOperator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.false
tolerationSecondsintegerTolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.false
valuestringValue is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.false
- - -

- PostgresCluster.spec.instances[index].topologySpreadConstraints[index] - ↩ Parent -

- - - -TopologySpreadConstraint specifies how to spread matching pods among the given topology. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
maxSkewintegerMaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. The global minimum is the minimum number of matching pods in an eligible domain or zero if the number of eligible domains is less than MinDomains. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 2/2/1: In this case, the global minimum is 1. | zone1 | zone2 | zone3 | | P P | P P | P | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It's a required field. Default value is 1 and 0 is not allowed.true
topologyKeystringTopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes match the node selector. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field.true
whenUnsatisfiablestringWhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assignment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won't make it *more* imbalanced. It's a required field.true
labelSelectorobjectLabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain.false
minDomainsintegerMinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule. - For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew. - This is an alpha field and requires enabling MinDomainsInPodTopologySpread feature gate.false
- - -

- PostgresCluster.spec.instances[index].topologySpreadConstraints[index].labelSelector - ↩ Parent -

- - - -LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.instances[index].topologySpreadConstraints[index].labelSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.instances[index].walVolumeClaimSpec - ↩ Parent -

- - - -Defines a separate PersistentVolumeClaim for PostgreSQL's write-ahead log. More info: https://www.postgresql.org/docs/current/wal.html - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
accessModes[]stringaccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1true
resourcesobjectresources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resourcestrue
dataSourceobjectdataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field.false
dataSourceRefobjectdataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.false
selectorobjectselector is a label query over volumes to consider for binding.false
storageClassNamestringstorageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1false
volumeModestringvolumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.false
volumeNamestringvolumeName is the binding reference to the PersistentVolume backing this claim.false
- - -

- PostgresCluster.spec.instances[index].walVolumeClaimSpec.resources - ↩ Parent -

- - - -resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
requestsmap[string]int or stringRequests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/true
limitsmap[string]int or stringLimits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
- - -

- PostgresCluster.spec.instances[index].walVolumeClaimSpec.dataSource - ↩ Parent -

- - - -dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
kindstringKind is the type of resource being referencedtrue
namestringName is the name of resource being referencedtrue
apiGroupstringAPIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.false
- - -

- PostgresCluster.spec.instances[index].walVolumeClaimSpec.dataSourceRef - ↩ Parent -

- - - -dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
kindstringKind is the type of resource being referencedtrue
namestringName is the name of resource being referencedtrue
apiGroupstringAPIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.false
- - -

- PostgresCluster.spec.instances[index].walVolumeClaimSpec.selector - ↩ Parent -

- - - -selector is a label query over volumes to consider for binding. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.instances[index].walVolumeClaimSpec.selector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.config - ↩ Parent -

- - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
files[]objectfalse
- - -

- PostgresCluster.spec.config.files[index] - ↩ Parent -

- - - -Projection that may be projected along with other supported volume types - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
configMapobjectconfigMap information about the configMap data to projectfalse
downwardAPIobjectdownwardAPI information about the downwardAPI data to projectfalse
secretobjectsecret information about the secret data to projectfalse
serviceAccountTokenobjectserviceAccountToken is information about the serviceAccountToken data to projectfalse
- - -

- PostgresCluster.spec.config.files[index].configMap - ↩ Parent -

- - - -configMap information about the configMap data to project - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
items[]objectitems if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.false
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#namesfalse
optionalbooleanoptional specify whether the ConfigMap or its keys must be definedfalse
- - -

- PostgresCluster.spec.config.files[index].configMap.items[index] - ↩ Parent -

- - - -Maps a string key to a path within a volume. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the key to project.true
pathstringpath is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.true
modeintegermode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.false
- - -

- PostgresCluster.spec.config.files[index].downwardAPI - ↩ Parent -

- - - -downwardAPI information about the downwardAPI data to project - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
items[]objectItems is a list of DownwardAPIVolume filefalse
- - -

- PostgresCluster.spec.config.files[index].downwardAPI.items[index] - ↩ Parent -

- - - -DownwardAPIVolumeFile represents information to create the file containing the pod field - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
pathstringRequired: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'true
fieldRefobjectRequired: Selects a field of the pod: only annotations, labels, name and namespace are supported.false
modeintegerOptional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.false
resourceFieldRefobjectSelects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.false
- - -

- PostgresCluster.spec.config.files[index].downwardAPI.items[index].fieldRef - ↩ Parent -

- - - -Required: Selects a field of the pod: only annotations, labels, name and namespace are supported. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
fieldPathstringPath of the field to select in the specified API version.true
apiVersionstringVersion of the schema the FieldPath is written in terms of, defaults to "v1".false
- - -

- PostgresCluster.spec.config.files[index].downwardAPI.items[index].resourceFieldRef - ↩ Parent -

- - - -Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
resourcestringRequired: resource to selecttrue
containerNamestringContainer name: required for volumes, optional for env varsfalse
divisorint or stringSpecifies the output format of the exposed resources, defaults to "1"false
- - -

- PostgresCluster.spec.config.files[index].secret - ↩ Parent -

- - - -secret information about the secret data to project - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
items[]objectitems if unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.false
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#namesfalse
optionalbooleanoptional field specify whether the Secret or its key must be definedfalse
- - -

- PostgresCluster.spec.config.files[index].secret.items[index] - ↩ Parent -

- - - -Maps a string key to a path within a volume. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the key to project.true
pathstringpath is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.true
modeintegermode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.false
- - -

- PostgresCluster.spec.config.files[index].serviceAccountToken - ↩ Parent -

- - - -serviceAccountToken is information about the serviceAccountToken data to project - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
pathstringpath is the path relative to the mount point of the file to project the token into.true
audiencestringaudience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver.false
expirationSecondsintegerexpirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes.false
- - -

- PostgresCluster.spec.customReplicationTLSSecret - ↩ Parent -

- - - -The secret containing the replication client certificates and keys for secure connections to the PostgreSQL server. It will need to contain the client TLS certificate, TLS key and the Certificate Authority certificate with the data keys set to tls.crt, tls.key and ca.crt, respectively. NOTE: If CustomReplicationClientTLSSecret is provided, CustomTLSSecret MUST be provided and the ca.crt provided must be the same. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
items[]objectitems if unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.false
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#namesfalse
optionalbooleanoptional field specify whether the Secret or its key must be definedfalse
- - -

- PostgresCluster.spec.customReplicationTLSSecret.items[index] - ↩ Parent -

- - - -Maps a string key to a path within a volume. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the key to project.true
pathstringpath is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.true
modeintegermode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.false
- - -

- PostgresCluster.spec.customTLSSecret - ↩ Parent -

- - - -The secret containing the Certificates and Keys to encrypt PostgreSQL traffic will need to contain the server TLS certificate, TLS key and the Certificate Authority certificate with the data keys set to tls.crt, tls.key and ca.crt, respectively. It will then be mounted as a volume projection to the '/pgconf/tls' directory. For more information on Kubernetes secret projections, please see https://k8s.io/docs/concepts/configuration/secret/#projection-of-secret-keys-to-specific-paths NOTE: If CustomTLSSecret is provided, CustomReplicationClientTLSSecret MUST be provided and the ca.crt provided must be the same. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
items[]objectitems if unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.false
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#namesfalse
optionalbooleanoptional field specify whether the Secret or its key must be definedfalse
- - -

- PostgresCluster.spec.customTLSSecret.items[index] - ↩ Parent -

- - - -Maps a string key to a path within a volume. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the key to project.true
pathstringpath is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.true
modeintegermode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.false
- - -

- PostgresCluster.spec.dataSource - ↩ Parent -

- - - -Specifies a data source for bootstrapping the PostgreSQL cluster. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
pgbackrestobjectDefines a pgBackRest cloud-based data source that can be used to pre-populate the the PostgreSQL data directory for a new PostgreSQL cluster using a pgBackRest restore. The PGBackRest field is incompatible with the PostgresCluster field: only one data source can be used for pre-populating a new PostgreSQL clusterfalse
postgresClusterobjectDefines a pgBackRest data source that can be used to pre-populate the PostgreSQL data directory for a new PostgreSQL cluster using a pgBackRest restore. The PGBackRest field is incompatible with the PostgresCluster field: only one data source can be used for pre-populating a new PostgreSQL clusterfalse
volumesobjectDefines any existing volumes to reuse for this PostgresCluster.false
- - -

- PostgresCluster.spec.dataSource.pgbackrest - ↩ Parent -

- - - -Defines a pgBackRest cloud-based data source that can be used to pre-populate the the PostgreSQL data directory for a new PostgreSQL cluster using a pgBackRest restore. The PGBackRest field is incompatible with the PostgresCluster field: only one data source can be used for pre-populating a new PostgreSQL cluster - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
repoobjectDefines a pgBackRest repositorytrue
stanzastringThe name of an existing pgBackRest stanza to use as the data source for the new PostgresCluster. Defaults to `db` if not provided.true
affinityobjectScheduling constraints of the pgBackRest restore Job. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-nodefalse
configuration[]objectProjected volumes containing custom pgBackRest configuration. These files are mounted under "/etc/pgbackrest/conf.d" alongside any pgBackRest configuration generated by the PostgreSQL Operator: https://pgbackrest.org/configuration.htmlfalse
globalmap[string]stringGlobal pgBackRest configuration settings. These settings are included in the "global" section of the pgBackRest configuration generated by the PostgreSQL Operator, and then mounted under "/etc/pgbackrest/conf.d": https://pgbackrest.org/configuration.htmlfalse
options[]stringCommand line options to include when running the pgBackRest restore command. https://pgbackrest.org/command.html#command-restorefalse
priorityClassNamestringPriority class name for the pgBackRest restore Job pod. Changing this value causes PostgreSQL to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/false
resourcesobjectResource requirements for the pgBackRest restore Job.false
tolerations[]objectTolerations of the pgBackRest restore Job. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-tolerationfalse
- - -

- PostgresCluster.spec.dataSource.pgbackrest.repo - ↩ Parent -

- - - -Defines a pgBackRest repository - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
namestringThe name of the the repositorytrue
azureobjectRepresents a pgBackRest repository that is created using Azure storagefalse
gcsobjectRepresents a pgBackRest repository that is created using Google Cloud Storagefalse
s3objectRepoS3 represents a pgBackRest repository that is created using AWS S3 (or S3-compatible) storagefalse
schedulesobjectDefines the schedules for the pgBackRest backups Full, Differential and Incremental backup types are supported: https://pgbackrest.org/user-guide.html#concept/backupfalse
volumeobjectRepresents a pgBackRest repository that is created using a PersistentVolumeClaimfalse
- - -

- PostgresCluster.spec.dataSource.pgbackrest.repo.azure - ↩ Parent -

- - - -Represents a pgBackRest repository that is created using Azure storage - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
containerstringThe Azure container utilized for the repositorytrue
- - -

- PostgresCluster.spec.dataSource.pgbackrest.repo.gcs - ↩ Parent -

- - - -Represents a pgBackRest repository that is created using Google Cloud Storage - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
bucketstringThe GCS bucket utilized for the repositorytrue
- - -

- PostgresCluster.spec.dataSource.pgbackrest.repo.s3 - ↩ Parent -

- - - -RepoS3 represents a pgBackRest repository that is created using AWS S3 (or S3-compatible) storage - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
bucketstringThe S3 bucket utilized for the repositorytrue
endpointstringA valid endpoint corresponding to the specified regiontrue
regionstringThe region corresponding to the S3 buckettrue
- - -

- PostgresCluster.spec.dataSource.pgbackrest.repo.schedules - ↩ Parent -

- - - -Defines the schedules for the pgBackRest backups Full, Differential and Incremental backup types are supported: https://pgbackrest.org/user-guide.html#concept/backup - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
differentialstringDefines the Cron schedule for a differential pgBackRest backup. Follows the standard Cron schedule syntax: https://k8s.io/docs/concepts/workloads/controllers/cron-jobs/#cron-schedule-syntaxfalse
fullstringDefines the Cron schedule for a full pgBackRest backup. Follows the standard Cron schedule syntax: https://k8s.io/docs/concepts/workloads/controllers/cron-jobs/#cron-schedule-syntaxfalse
incrementalstringDefines the Cron schedule for an incremental pgBackRest backup. Follows the standard Cron schedule syntax: https://k8s.io/docs/concepts/workloads/controllers/cron-jobs/#cron-schedule-syntaxfalse
- - -

- PostgresCluster.spec.dataSource.pgbackrest.repo.volume - ↩ Parent -

- - - -Represents a pgBackRest repository that is created using a PersistentVolumeClaim - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
volumeClaimSpecobjectDefines a PersistentVolumeClaim spec used to create and/or bind a volumetrue
- - -

- PostgresCluster.spec.dataSource.pgbackrest.repo.volume.volumeClaimSpec - ↩ Parent -

- - - -Defines a PersistentVolumeClaim spec used to create and/or bind a volume - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
accessModes[]stringaccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1false
dataSourceobjectdataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field.false
dataSourceRefobjectdataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.false
resourcesobjectresources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resourcesfalse
selectorobjectselector is a label query over volumes to consider for binding.false
storageClassNamestringstorageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1false
volumeModestringvolumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.false
volumeNamestringvolumeName is the binding reference to the PersistentVolume backing this claim.false
- - -

- PostgresCluster.spec.dataSource.pgbackrest.repo.volume.volumeClaimSpec.dataSource - ↩ Parent -

- - - -dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
kindstringKind is the type of resource being referencedtrue
namestringName is the name of resource being referencedtrue
apiGroupstringAPIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.false
- - -

- PostgresCluster.spec.dataSource.pgbackrest.repo.volume.volumeClaimSpec.dataSourceRef - ↩ Parent -

- - - -dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
kindstringKind is the type of resource being referencedtrue
namestringName is the name of resource being referencedtrue
apiGroupstringAPIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.false
- - -

- PostgresCluster.spec.dataSource.pgbackrest.repo.volume.volumeClaimSpec.resources - ↩ Parent -

- - - -resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
limitsmap[string]int or stringLimits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
requestsmap[string]int or stringRequests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
- - -

- PostgresCluster.spec.dataSource.pgbackrest.repo.volume.volumeClaimSpec.selector - ↩ Parent -

- - - -selector is a label query over volumes to consider for binding. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.dataSource.pgbackrest.repo.volume.volumeClaimSpec.selector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.dataSource.pgbackrest.affinity - ↩ Parent -

- - - -Scheduling constraints of the pgBackRest restore Job. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
nodeAffinityobjectDescribes node affinity scheduling rules for the pod.false
podAffinityobjectDescribes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).false
podAntiAffinityobjectDescribes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).false
- - -

- PostgresCluster.spec.dataSource.pgbackrest.affinity.nodeAffinity - ↩ Parent -

- - - -Describes node affinity scheduling rules for the pod. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecutionobjectIf the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.false
- - -

- PostgresCluster.spec.dataSource.pgbackrest.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
preferenceobjectA node selector term, associated with the corresponding weight.true
weightintegerWeight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.true
- - -

- PostgresCluster.spec.dataSource.pgbackrest.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference - ↩ Parent -

- - - -A node selector term, associated with the corresponding weight. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectA list of node selector requirements by node's labels.false
matchFields[]objectA list of node selector requirements by node's fields.false
- - -

- PostgresCluster.spec.dataSource.pgbackrest.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference.matchExpressions[index] - ↩ Parent -

- - - -A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.dataSource.pgbackrest.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference.matchFields[index] - ↩ Parent -

- - - -A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.dataSource.pgbackrest.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution - ↩ Parent -

- - - -If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
nodeSelectorTerms[]objectRequired. A list of node selector terms. The terms are ORed.true
- - -

- PostgresCluster.spec.dataSource.pgbackrest.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index] - ↩ Parent -

- - - -A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectA list of node selector requirements by node's labels.false
matchFields[]objectA list of node selector requirements by node's fields.false
- - -

- PostgresCluster.spec.dataSource.pgbackrest.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index].matchExpressions[index] - ↩ Parent -

- - - -A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.dataSource.pgbackrest.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index].matchFields[index] - ↩ Parent -

- - - -A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.dataSource.pgbackrest.affinity.podAffinity - ↩ Parent -

- - - -Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecution[]objectIf the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.false
- - -

- PostgresCluster.spec.dataSource.pgbackrest.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
podAffinityTermobjectRequired. A pod affinity term, associated with the corresponding weight.true
weightintegerweight associated with matching the corresponding podAffinityTerm, in the range 1-100.true
- - -

- PostgresCluster.spec.dataSource.pgbackrest.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm - ↩ Parent -

- - - -Required. A pod affinity term, associated with the corresponding weight. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
- - -

- PostgresCluster.spec.dataSource.pgbackrest.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector - ↩ Parent -

- - - -A label query over a set of resources, in this case pods. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.dataSource.pgbackrest.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.dataSource.pgbackrest.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector - ↩ Parent -

- - - -A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.dataSource.pgbackrest.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.dataSource.pgbackrest.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
- - -

- PostgresCluster.spec.dataSource.pgbackrest.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector - ↩ Parent -

- - - -A label query over a set of resources, in this case pods. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.dataSource.pgbackrest.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.dataSource.pgbackrest.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector - ↩ Parent -

- - - -A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.dataSource.pgbackrest.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.dataSource.pgbackrest.affinity.podAntiAffinity - ↩ Parent -

- - - -Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecution[]objectIf the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.false
- - -

- PostgresCluster.spec.dataSource.pgbackrest.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
podAffinityTermobjectRequired. A pod affinity term, associated with the corresponding weight.true
weightintegerweight associated with matching the corresponding podAffinityTerm, in the range 1-100.true
- - -

- PostgresCluster.spec.dataSource.pgbackrest.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm - ↩ Parent -

- - - -Required. A pod affinity term, associated with the corresponding weight. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
- - -

- PostgresCluster.spec.dataSource.pgbackrest.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector - ↩ Parent -

- - - -A label query over a set of resources, in this case pods. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.dataSource.pgbackrest.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.dataSource.pgbackrest.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector - ↩ Parent -

- - - -A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.dataSource.pgbackrest.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.dataSource.pgbackrest.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
- - -

- PostgresCluster.spec.dataSource.pgbackrest.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector - ↩ Parent -

- - - -A label query over a set of resources, in this case pods. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.dataSource.pgbackrest.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.dataSource.pgbackrest.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector - ↩ Parent -

- - - -A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.dataSource.pgbackrest.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.dataSource.pgbackrest.configuration[index] - ↩ Parent -

- - - -Projection that may be projected along with other supported volume types - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
configMapobjectconfigMap information about the configMap data to projectfalse
downwardAPIobjectdownwardAPI information about the downwardAPI data to projectfalse
secretobjectsecret information about the secret data to projectfalse
serviceAccountTokenobjectserviceAccountToken is information about the serviceAccountToken data to projectfalse
- - -

- PostgresCluster.spec.dataSource.pgbackrest.configuration[index].configMap - ↩ Parent -

- - - -configMap information about the configMap data to project - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
items[]objectitems if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.false
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#namesfalse
optionalbooleanoptional specify whether the ConfigMap or its keys must be definedfalse
- - -

- PostgresCluster.spec.dataSource.pgbackrest.configuration[index].configMap.items[index] - ↩ Parent -

- - - -Maps a string key to a path within a volume. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the key to project.true
pathstringpath is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.true
modeintegermode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.false
- - -

- PostgresCluster.spec.dataSource.pgbackrest.configuration[index].downwardAPI - ↩ Parent -

- - - -downwardAPI information about the downwardAPI data to project - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
items[]objectItems is a list of DownwardAPIVolume filefalse
- - -

- PostgresCluster.spec.dataSource.pgbackrest.configuration[index].downwardAPI.items[index] - ↩ Parent -

- - - -DownwardAPIVolumeFile represents information to create the file containing the pod field - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
pathstringRequired: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'true
fieldRefobjectRequired: Selects a field of the pod: only annotations, labels, name and namespace are supported.false
modeintegerOptional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.false
resourceFieldRefobjectSelects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.false
- - -

- PostgresCluster.spec.dataSource.pgbackrest.configuration[index].downwardAPI.items[index].fieldRef - ↩ Parent -

- - - -Required: Selects a field of the pod: only annotations, labels, name and namespace are supported. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
fieldPathstringPath of the field to select in the specified API version.true
apiVersionstringVersion of the schema the FieldPath is written in terms of, defaults to "v1".false
- - -

- PostgresCluster.spec.dataSource.pgbackrest.configuration[index].downwardAPI.items[index].resourceFieldRef - ↩ Parent -

- - - -Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
resourcestringRequired: resource to selecttrue
containerNamestringContainer name: required for volumes, optional for env varsfalse
divisorint or stringSpecifies the output format of the exposed resources, defaults to "1"false
- - -

- PostgresCluster.spec.dataSource.pgbackrest.configuration[index].secret - ↩ Parent -

- - - -secret information about the secret data to project - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
items[]objectitems if unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.false
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#namesfalse
optionalbooleanoptional field specify whether the Secret or its key must be definedfalse
- - -

- PostgresCluster.spec.dataSource.pgbackrest.configuration[index].secret.items[index] - ↩ Parent -

- - - -Maps a string key to a path within a volume. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the key to project.true
pathstringpath is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.true
modeintegermode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.false
- - -

- PostgresCluster.spec.dataSource.pgbackrest.configuration[index].serviceAccountToken - ↩ Parent -

- - - -serviceAccountToken is information about the serviceAccountToken data to project - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
pathstringpath is the path relative to the mount point of the file to project the token into.true
audiencestringaudience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver.false
expirationSecondsintegerexpirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes.false
- - -

- PostgresCluster.spec.dataSource.pgbackrest.resources - ↩ Parent -

- - - -Resource requirements for the pgBackRest restore Job. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
limitsmap[string]int or stringLimits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
requestsmap[string]int or stringRequests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
- - -

- PostgresCluster.spec.dataSource.pgbackrest.tolerations[index] - ↩ Parent -

- - - -The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
effectstringEffect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.false
keystringKey is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.false
operatorstringOperator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.false
tolerationSecondsintegerTolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.false
valuestringValue is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.false
- - -

- PostgresCluster.spec.dataSource.postgresCluster - ↩ Parent -

- - - -Defines a pgBackRest data source that can be used to pre-populate the PostgreSQL data directory for a new PostgreSQL cluster using a pgBackRest restore. The PGBackRest field is incompatible with the PostgresCluster field: only one data source can be used for pre-populating a new PostgreSQL cluster - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
repoNamestringThe name of the pgBackRest repo within the source PostgresCluster that contains the backups that should be utilized to perform a pgBackRest restore when initializing the data source for the new PostgresCluster.true
affinityobjectScheduling constraints of the pgBackRest restore Job. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-nodefalse
clusterNamestringThe name of an existing PostgresCluster to use as the data source for the new PostgresCluster. Defaults to the name of the PostgresCluster being created if not provided.false
clusterNamespacestringThe namespace of the cluster specified as the data source using the clusterName field. Defaults to the namespace of the PostgresCluster being created if not provided.false
options[]stringCommand line options to include when running the pgBackRest restore command. https://pgbackrest.org/command.html#command-restorefalse
priorityClassNamestringPriority class name for the pgBackRest restore Job pod. Changing this value causes PostgreSQL to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/false
resourcesobjectResource requirements for the pgBackRest restore Job.false
tolerations[]objectTolerations of the pgBackRest restore Job. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-tolerationfalse
- - -

- PostgresCluster.spec.dataSource.postgresCluster.affinity - ↩ Parent -

- - - -Scheduling constraints of the pgBackRest restore Job. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
nodeAffinityobjectDescribes node affinity scheduling rules for the pod.false
podAffinityobjectDescribes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).false
podAntiAffinityobjectDescribes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).false
- - -

- PostgresCluster.spec.dataSource.postgresCluster.affinity.nodeAffinity - ↩ Parent -

- - - -Describes node affinity scheduling rules for the pod. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecutionobjectIf the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.false
- - -

- PostgresCluster.spec.dataSource.postgresCluster.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
preferenceobjectA node selector term, associated with the corresponding weight.true
weightintegerWeight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.true
- - -

- PostgresCluster.spec.dataSource.postgresCluster.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference - ↩ Parent -

- - - -A node selector term, associated with the corresponding weight. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectA list of node selector requirements by node's labels.false
matchFields[]objectA list of node selector requirements by node's fields.false
- - -

- PostgresCluster.spec.dataSource.postgresCluster.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference.matchExpressions[index] - ↩ Parent -

- - - -A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.dataSource.postgresCluster.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference.matchFields[index] - ↩ Parent -

- - - -A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.dataSource.postgresCluster.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution - ↩ Parent -

- - - -If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
nodeSelectorTerms[]objectRequired. A list of node selector terms. The terms are ORed.true
- - -

- PostgresCluster.spec.dataSource.postgresCluster.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index] - ↩ Parent -

- - - -A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectA list of node selector requirements by node's labels.false
matchFields[]objectA list of node selector requirements by node's fields.false
- - -

- PostgresCluster.spec.dataSource.postgresCluster.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index].matchExpressions[index] - ↩ Parent -

- - - -A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.dataSource.postgresCluster.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index].matchFields[index] - ↩ Parent -

- - - -A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.dataSource.postgresCluster.affinity.podAffinity - ↩ Parent -

- - - -Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecution[]objectIf the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.false
- - -

- PostgresCluster.spec.dataSource.postgresCluster.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
podAffinityTermobjectRequired. A pod affinity term, associated with the corresponding weight.true
weightintegerweight associated with matching the corresponding podAffinityTerm, in the range 1-100.true
- - -

- PostgresCluster.spec.dataSource.postgresCluster.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm - ↩ Parent -

- - - -Required. A pod affinity term, associated with the corresponding weight. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
- - -

- PostgresCluster.spec.dataSource.postgresCluster.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector - ↩ Parent -

- - - -A label query over a set of resources, in this case pods. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.dataSource.postgresCluster.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.dataSource.postgresCluster.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector - ↩ Parent -

- - - -A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.dataSource.postgresCluster.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.dataSource.postgresCluster.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
- - -

- PostgresCluster.spec.dataSource.postgresCluster.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector - ↩ Parent -

- - - -A label query over a set of resources, in this case pods. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.dataSource.postgresCluster.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.dataSource.postgresCluster.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector - ↩ Parent -

- - - -A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.dataSource.postgresCluster.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.dataSource.postgresCluster.affinity.podAntiAffinity - ↩ Parent -

- - - -Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecution[]objectIf the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.false
- - -

- PostgresCluster.spec.dataSource.postgresCluster.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
podAffinityTermobjectRequired. A pod affinity term, associated with the corresponding weight.true
weightintegerweight associated with matching the corresponding podAffinityTerm, in the range 1-100.true
- - -

- PostgresCluster.spec.dataSource.postgresCluster.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm - ↩ Parent -

- - - -Required. A pod affinity term, associated with the corresponding weight. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
- - -

- PostgresCluster.spec.dataSource.postgresCluster.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector - ↩ Parent -

- - - -A label query over a set of resources, in this case pods. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.dataSource.postgresCluster.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.dataSource.postgresCluster.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector - ↩ Parent -

- - - -A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.dataSource.postgresCluster.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.dataSource.postgresCluster.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
- - -

- PostgresCluster.spec.dataSource.postgresCluster.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector - ↩ Parent -

- - - -A label query over a set of resources, in this case pods. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.dataSource.postgresCluster.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.dataSource.postgresCluster.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector - ↩ Parent -

- - - -A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.dataSource.postgresCluster.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.dataSource.postgresCluster.resources - ↩ Parent -

- - - -Resource requirements for the pgBackRest restore Job. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
limitsmap[string]int or stringLimits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
requestsmap[string]int or stringRequests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
- - -

- PostgresCluster.spec.dataSource.postgresCluster.tolerations[index] - ↩ Parent -

- - - -The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
effectstringEffect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.false
keystringKey is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.false
operatorstringOperator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.false
tolerationSecondsintegerTolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.false
valuestringValue is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.false
- - -

- PostgresCluster.spec.dataSource.volumes - ↩ Parent -

- - - -Defines any existing volumes to reuse for this PostgresCluster. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
pgBackRestVolumeobjectDefines the existing pgBackRest repo volume and directory to use in the current PostgresCluster.false
pgDataVolumeobjectDefines the existing pgData volume and directory to use in the current PostgresCluster.false
pgWALVolumeobjectDefines the existing pg_wal volume and directory to use in the current PostgresCluster. Note that a defined pg_wal volume MUST be accompanied by a pgData volume.false
- - -

- PostgresCluster.spec.dataSource.volumes.pgBackRestVolume - ↩ Parent -

- - - -Defines the existing pgBackRest repo volume and directory to use in the current PostgresCluster. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
pvcNamestringThe existing PVC name.true
directorystringThe existing directory. When not set, a move Job is not created for the associated volume.false
- - -

- PostgresCluster.spec.dataSource.volumes.pgDataVolume - ↩ Parent -

- - - -Defines the existing pgData volume and directory to use in the current PostgresCluster. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
pvcNamestringThe existing PVC name.true
directorystringThe existing directory. When not set, a move Job is not created for the associated volume.false
- - -

- PostgresCluster.spec.dataSource.volumes.pgWALVolume - ↩ Parent -

- - - -Defines the existing pg_wal volume and directory to use in the current PostgresCluster. Note that a defined pg_wal volume MUST be accompanied by a pgData volume. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
pvcNamestringThe existing PVC name.true
directorystringThe existing directory. When not set, a move Job is not created for the associated volume.false
- - -

- PostgresCluster.spec.databaseInitSQL - ↩ Parent -

- - - -DatabaseInitSQL defines a ConfigMap containing custom SQL that will be run after the cluster is initialized. This ConfigMap must be in the same namespace as the cluster. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringKey is the ConfigMap data key that points to a SQL stringtrue
namestringName is the name of a ConfigMaptrue
- - -

- PostgresCluster.spec.imagePullSecrets[index] - ↩ Parent -

- - - -LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#namesfalse
- - -

- PostgresCluster.spec.metadata - ↩ Parent -

- - - -Metadata contains metadata for custom resources - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
annotationsmap[string]stringfalse
labelsmap[string]stringfalse
- - -

- PostgresCluster.spec.monitoring - ↩ Parent -

- - - -The specification of monitoring tools that connect to PostgreSQL - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
pgmonitorobjectPGMonitorSpec defines the desired state of the pgMonitor tool suitefalse
- - -

- PostgresCluster.spec.monitoring.pgmonitor - ↩ Parent -

- - - -PGMonitorSpec defines the desired state of the pgMonitor tool suite - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
exporterobjectfalse
- - -

- PostgresCluster.spec.monitoring.pgmonitor.exporter - ↩ Parent -

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
configuration[]objectProjected volumes containing custom PostgreSQL Exporter configuration. Currently supports the customization of PostgreSQL Exporter queries. If a "queries.yml" file is detected in any volume projected using this field, it will be loaded using the "extend.query-path" flag: https://github.com/prometheus-community/postgres_exporter#flags Changing the values of field causes PostgreSQL and the exporter to restart.false
customTLSSecretobjectProjected secret containing custom TLS certificates to encrypt output from the exporter web serverfalse
imagestringThe image name to use for crunchy-postgres-exporter containers. The image may also be set using the RELATED_IMAGE_PGEXPORTER environment variable.false
resourcesobjectChanging this value causes PostgreSQL and the exporter to restart. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containersfalse
- - -

- PostgresCluster.spec.monitoring.pgmonitor.exporter.configuration[index] - ↩ Parent -

- - - -Projection that may be projected along with other supported volume types - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
configMapobjectconfigMap information about the configMap data to projectfalse
downwardAPIobjectdownwardAPI information about the downwardAPI data to projectfalse
secretobjectsecret information about the secret data to projectfalse
serviceAccountTokenobjectserviceAccountToken is information about the serviceAccountToken data to projectfalse
- - -

- PostgresCluster.spec.monitoring.pgmonitor.exporter.configuration[index].configMap - ↩ Parent -

- - - -configMap information about the configMap data to project - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
items[]objectitems if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.false
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#namesfalse
optionalbooleanoptional specify whether the ConfigMap or its keys must be definedfalse
- - -

- PostgresCluster.spec.monitoring.pgmonitor.exporter.configuration[index].configMap.items[index] - ↩ Parent -

- - - -Maps a string key to a path within a volume. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the key to project.true
pathstringpath is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.true
modeintegermode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.false
- - -

- PostgresCluster.spec.monitoring.pgmonitor.exporter.configuration[index].downwardAPI - ↩ Parent -

- - - -downwardAPI information about the downwardAPI data to project - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
items[]objectItems is a list of DownwardAPIVolume filefalse
- - -

- PostgresCluster.spec.monitoring.pgmonitor.exporter.configuration[index].downwardAPI.items[index] - ↩ Parent -

- - - -DownwardAPIVolumeFile represents information to create the file containing the pod field - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
pathstringRequired: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'true
fieldRefobjectRequired: Selects a field of the pod: only annotations, labels, name and namespace are supported.false
modeintegerOptional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.false
resourceFieldRefobjectSelects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.false
- - -

- PostgresCluster.spec.monitoring.pgmonitor.exporter.configuration[index].downwardAPI.items[index].fieldRef - ↩ Parent -

- - - -Required: Selects a field of the pod: only annotations, labels, name and namespace are supported. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
fieldPathstringPath of the field to select in the specified API version.true
apiVersionstringVersion of the schema the FieldPath is written in terms of, defaults to "v1".false
- - -

- PostgresCluster.spec.monitoring.pgmonitor.exporter.configuration[index].downwardAPI.items[index].resourceFieldRef - ↩ Parent -

- - - -Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
resourcestringRequired: resource to selecttrue
containerNamestringContainer name: required for volumes, optional for env varsfalse
divisorint or stringSpecifies the output format of the exposed resources, defaults to "1"false
- - -

- PostgresCluster.spec.monitoring.pgmonitor.exporter.configuration[index].secret - ↩ Parent -

- - - -secret information about the secret data to project - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
items[]objectitems if unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.false
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#namesfalse
optionalbooleanoptional field specify whether the Secret or its key must be definedfalse
- - -

- PostgresCluster.spec.monitoring.pgmonitor.exporter.configuration[index].secret.items[index] - ↩ Parent -

- - - -Maps a string key to a path within a volume. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the key to project.true
pathstringpath is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.true
modeintegermode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.false
- - -

- PostgresCluster.spec.monitoring.pgmonitor.exporter.configuration[index].serviceAccountToken - ↩ Parent -

- - - -serviceAccountToken is information about the serviceAccountToken data to project - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
pathstringpath is the path relative to the mount point of the file to project the token into.true
audiencestringaudience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver.false
expirationSecondsintegerexpirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes.false
- - -

- PostgresCluster.spec.monitoring.pgmonitor.exporter.customTLSSecret - ↩ Parent -

- - - -Projected secret containing custom TLS certificates to encrypt output from the exporter web server - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
items[]objectitems if unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.false
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#namesfalse
optionalbooleanoptional field specify whether the Secret or its key must be definedfalse
- - -

- PostgresCluster.spec.monitoring.pgmonitor.exporter.customTLSSecret.items[index] - ↩ Parent -

- - - -Maps a string key to a path within a volume. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the key to project.true
pathstringpath is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.true
modeintegermode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.false
- - -

- PostgresCluster.spec.monitoring.pgmonitor.exporter.resources - ↩ Parent -

- - - -Changing this value causes PostgreSQL and the exporter to restart. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
limitsmap[string]int or stringLimits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
requestsmap[string]int or stringRequests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
- - -

- PostgresCluster.spec.patroni - ↩ Parent -

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
dynamicConfigurationobjectPatroni dynamic configuration settings. Changes to this value will be automatically reloaded without validation. Changes to certain PostgreSQL parameters cause PostgreSQL to restart. More info: https://patroni.readthedocs.io/en/latest/SETTINGS.htmlfalse
leaderLeaseDurationSecondsintegerTTL of the cluster leader lock. "Think of it as the length of time before initiation of the automatic failover process." Changing this value causes PostgreSQL to restart.false
portintegerThe port on which Patroni should listen. Changing this value causes PostgreSQL to restart.false
switchoverobjectSwitchover gives options to perform ad hoc switchovers in a PostgresCluster.false
syncPeriodSecondsintegerThe interval for refreshing the leader lock and applying dynamicConfiguration. Must be less than leaderLeaseDurationSeconds. Changing this value causes PostgreSQL to restart.false
- - -

- PostgresCluster.spec.patroni.switchover - ↩ Parent -

- - - -Switchover gives options to perform ad hoc switchovers in a PostgresCluster. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
enabledbooleanWhether or not the operator should allow switchovers in a PostgresClustertrue
targetInstancestringThe instance that should become primary during a switchover. This field is optional when Type is "Switchover" and required when Type is "Failover". When it is not specified, a healthy replica is automatically selected.false
typeenumType of switchover to perform. Valid options are Switchover and Failover. "Switchover" changes the primary instance of a healthy PostgresCluster. "Failover" forces a particular instance to be primary, regardless of other factors. A TargetInstance must be specified to failover. NOTE: The Failover type is reserved as the "last resort" case.false
- - -

- PostgresCluster.spec.proxy - ↩ Parent -

- - - -The specification of a proxy that connects to PostgreSQL. - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
pgBouncerobjectDefines a PgBouncer proxy and connection pooler.true
- - -

- PostgresCluster.spec.proxy.pgBouncer - ↩ Parent -

- - - -Defines a PgBouncer proxy and connection pooler. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
affinityobjectScheduling constraints of a PgBouncer pod. Changing this value causes PgBouncer to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-nodefalse
configobjectConfiguration settings for the PgBouncer process. Changes to any of these values will be automatically reloaded without validation. Be careful, as you may put PgBouncer into an unusable state. More info: https://www.pgbouncer.org/usage.html#reloadfalse
containers[]objectCustom sidecars for a PgBouncer pod. Changing this value causes PgBouncer to restart.false
customTLSSecretobjectA secret projection containing a certificate and key with which to encrypt connections to PgBouncer. The "tls.crt", "tls.key", and "ca.crt" paths must be PEM-encoded certificates and keys. Changing this value causes PgBouncer to restart. More info: https://kubernetes.io/docs/concepts/configuration/secret/#projection-of-secret-keys-to-specific-pathsfalse
imagestringName of a container image that can run PgBouncer 1.15 or newer. Changing this value causes PgBouncer to restart. The image may also be set using the RELATED_IMAGE_PGBOUNCER environment variable. More info: https://kubernetes.io/docs/concepts/containers/imagesfalse
metadataobjectMetadata contains metadata for custom resourcesfalse
minAvailableint or stringMinimum number of pods that should be available at a time. Defaults to one when the replicas field is greater than one.false
portintegerPort on which PgBouncer should listen for client connections. Changing this value causes PgBouncer to restart.false
priorityClassNamestringPriority class name for the pgBouncer pod. Changing this value causes PostgreSQL to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/false
replicasintegerNumber of desired PgBouncer pods.false
resourcesobjectCompute resources of a PgBouncer container. Changing this value causes PgBouncer to restart. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containersfalse
serviceobjectSpecification of the service that exposes PgBouncer.false
sidecarsobjectConfiguration for pgBouncer sidecar containersfalse
tolerations[]objectTolerations of a PgBouncer pod. Changing this value causes PgBouncer to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-tolerationfalse
topologySpreadConstraints[]objectTopology spread constraints of a PgBouncer pod. Changing this value causes PgBouncer to restart. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/false
- - -

- PostgresCluster.spec.proxy.pgBouncer.affinity - ↩ Parent -

- - - -Scheduling constraints of a PgBouncer pod. Changing this value causes PgBouncer to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
nodeAffinityobjectDescribes node affinity scheduling rules for the pod.false
podAffinityobjectDescribes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).false
podAntiAffinityobjectDescribes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).false
- - -

- PostgresCluster.spec.proxy.pgBouncer.affinity.nodeAffinity - ↩ Parent -

- - - -Describes node affinity scheduling rules for the pod. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecutionobjectIf the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
preferenceobjectA node selector term, associated with the corresponding weight.true
weightintegerWeight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.true
- - -

- PostgresCluster.spec.proxy.pgBouncer.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference - ↩ Parent -

- - - -A node selector term, associated with the corresponding weight. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectA list of node selector requirements by node's labels.false
matchFields[]objectA list of node selector requirements by node's fields.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference.matchExpressions[index] - ↩ Parent -

- - - -A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference.matchFields[index] - ↩ Parent -

- - - -A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution - ↩ Parent -

- - - -If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
nodeSelectorTerms[]objectRequired. A list of node selector terms. The terms are ORed.true
- - -

- PostgresCluster.spec.proxy.pgBouncer.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index] - ↩ Parent -

- - - -A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectA list of node selector requirements by node's labels.false
matchFields[]objectA list of node selector requirements by node's fields.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index].matchExpressions[index] - ↩ Parent -

- - - -A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index].matchFields[index] - ↩ Parent -

- - - -A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.affinity.podAffinity - ↩ Parent -

- - - -Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecution[]objectIf the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
podAffinityTermobjectRequired. A pod affinity term, associated with the corresponding weight.true
weightintegerweight associated with matching the corresponding podAffinityTerm, in the range 1-100.true
- - -

- PostgresCluster.spec.proxy.pgBouncer.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm - ↩ Parent -

- - - -Required. A pod affinity term, associated with the corresponding weight. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
- - -

- PostgresCluster.spec.proxy.pgBouncer.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector - ↩ Parent -

- - - -A label query over a set of resources, in this case pods. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector - ↩ Parent -

- - - -A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
- - -

- PostgresCluster.spec.proxy.pgBouncer.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector - ↩ Parent -

- - - -A label query over a set of resources, in this case pods. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector - ↩ Parent -

- - - -A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.affinity.podAntiAffinity - ↩ Parent -

- - - -Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecution[]objectIf the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
podAffinityTermobjectRequired. A pod affinity term, associated with the corresponding weight.true
weightintegerweight associated with matching the corresponding podAffinityTerm, in the range 1-100.true
- - -

- PostgresCluster.spec.proxy.pgBouncer.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm - ↩ Parent -

- - - -Required. A pod affinity term, associated with the corresponding weight. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
- - -

- PostgresCluster.spec.proxy.pgBouncer.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector - ↩ Parent -

- - - -A label query over a set of resources, in this case pods. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector - ↩ Parent -

- - - -A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
- - -

- PostgresCluster.spec.proxy.pgBouncer.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector - ↩ Parent -

- - - -A label query over a set of resources, in this case pods. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector - ↩ Parent -

- - - -A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.config - ↩ Parent -

- - - -Configuration settings for the PgBouncer process. Changes to any of these values will be automatically reloaded without validation. Be careful, as you may put PgBouncer into an unusable state. More info: https://www.pgbouncer.org/usage.html#reload - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
databasesmap[string]stringPgBouncer database definitions. The key is the database requested by a client while the value is a libpq-styled connection string. The special key "*" acts as a fallback. When this field is empty, PgBouncer is configured with a single "*" entry that connects to the primary PostgreSQL instance. More info: https://www.pgbouncer.org/config.html#section-databasesfalse
files[]objectFiles to mount under "/etc/pgbouncer". When specified, settings in the "pgbouncer.ini" file are loaded before all others. From there, other files may be included by absolute path. Changing these references causes PgBouncer to restart, but changes to the file contents are automatically reloaded. More info: https://www.pgbouncer.org/config.html#include-directivefalse
globalmap[string]stringSettings that apply to the entire PgBouncer process. More info: https://www.pgbouncer.org/config.htmlfalse
usersmap[string]stringConnection settings specific to particular users. More info: https://www.pgbouncer.org/config.html#section-usersfalse
- - -

- PostgresCluster.spec.proxy.pgBouncer.config.files[index] - ↩ Parent -

- - - -Projection that may be projected along with other supported volume types - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
configMapobjectconfigMap information about the configMap data to projectfalse
downwardAPIobjectdownwardAPI information about the downwardAPI data to projectfalse
secretobjectsecret information about the secret data to projectfalse
serviceAccountTokenobjectserviceAccountToken is information about the serviceAccountToken data to projectfalse
- - -

- PostgresCluster.spec.proxy.pgBouncer.config.files[index].configMap - ↩ Parent -

- - - -configMap information about the configMap data to project - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
items[]objectitems if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.false
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#namesfalse
optionalbooleanoptional specify whether the ConfigMap or its keys must be definedfalse
- - -

- PostgresCluster.spec.proxy.pgBouncer.config.files[index].configMap.items[index] - ↩ Parent -

- - - -Maps a string key to a path within a volume. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the key to project.true
pathstringpath is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.true
modeintegermode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.config.files[index].downwardAPI - ↩ Parent -

- - - -downwardAPI information about the downwardAPI data to project - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
items[]objectItems is a list of DownwardAPIVolume filefalse
- - -

- PostgresCluster.spec.proxy.pgBouncer.config.files[index].downwardAPI.items[index] - ↩ Parent -

- - - -DownwardAPIVolumeFile represents information to create the file containing the pod field - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
pathstringRequired: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'true
fieldRefobjectRequired: Selects a field of the pod: only annotations, labels, name and namespace are supported.false
modeintegerOptional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.false
resourceFieldRefobjectSelects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.config.files[index].downwardAPI.items[index].fieldRef - ↩ Parent -

- - - -Required: Selects a field of the pod: only annotations, labels, name and namespace are supported. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
fieldPathstringPath of the field to select in the specified API version.true
apiVersionstringVersion of the schema the FieldPath is written in terms of, defaults to "v1".false
- - -

- PostgresCluster.spec.proxy.pgBouncer.config.files[index].downwardAPI.items[index].resourceFieldRef - ↩ Parent -

- - - -Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
resourcestringRequired: resource to selecttrue
containerNamestringContainer name: required for volumes, optional for env varsfalse
divisorint or stringSpecifies the output format of the exposed resources, defaults to "1"false
- - -

- PostgresCluster.spec.proxy.pgBouncer.config.files[index].secret - ↩ Parent -

- - - -secret information about the secret data to project - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
items[]objectitems if unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.false
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#namesfalse
optionalbooleanoptional field specify whether the Secret or its key must be definedfalse
- - -

- PostgresCluster.spec.proxy.pgBouncer.config.files[index].secret.items[index] - ↩ Parent -

- - - -Maps a string key to a path within a volume. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the key to project.true
pathstringpath is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.true
modeintegermode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.config.files[index].serviceAccountToken - ↩ Parent -

- - - -serviceAccountToken is information about the serviceAccountToken data to project - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
pathstringpath is the path relative to the mount point of the file to project the token into.true
audiencestringaudience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver.false
expirationSecondsintegerexpirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.containers[index] - ↩ Parent -

- - - -A single application container that you want to run within a pod. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
namestringName of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.true
args[]stringArguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shellfalse
command[]stringEntrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shellfalse
env[]objectList of environment variables to set in the container. Cannot be updated.false
envFrom[]objectList of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.false
imagestringContainer image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.false
imagePullPolicystringImage pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-imagesfalse
lifecycleobjectActions that the management system should take in response to container lifecycle events. Cannot be updated.false
livenessProbeobjectPeriodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probesfalse
ports[]objectList of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated.false
readinessProbeobjectPeriodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probesfalse
resourcesobjectCompute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
securityContextobjectSecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/false
startupProbeobjectStartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probesfalse
stdinbooleanWhether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.false
stdinOncebooleanWhether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is falsefalse
terminationMessagePathstringOptional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.false
terminationMessagePolicystringIndicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.false
ttybooleanWhether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.false
volumeDevices[]objectvolumeDevices is the list of block devices to be used by the container.false
volumeMounts[]objectPod volumes to mount into the container's filesystem. Cannot be updated.false
workingDirstringContainer's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.containers[index].env[index] - ↩ Parent -

- - - -EnvVar represents an environment variable present in a Container. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
namestringName of the environment variable. Must be a C_IDENTIFIER.true
valuestringVariable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".false
valueFromobjectSource for the environment variable's value. Cannot be used if value is not empty.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.containers[index].env[index].valueFrom - ↩ Parent -

- - - -Source for the environment variable's value. Cannot be used if value is not empty. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
configMapKeyRefobjectSelects a key of a ConfigMap.false
fieldRefobjectSelects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.false
resourceFieldRefobjectSelects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.false
secretKeyRefobjectSelects a key of a secret in the pod's namespacefalse
- - -

- PostgresCluster.spec.proxy.pgBouncer.containers[index].env[index].valueFrom.configMapKeyRef - ↩ Parent -

- - - -Selects a key of a ConfigMap. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringThe key to select.true
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#namesfalse
optionalbooleanSpecify whether the ConfigMap or its key must be definedfalse
- - -

- PostgresCluster.spec.proxy.pgBouncer.containers[index].env[index].valueFrom.fieldRef - ↩ Parent -

- - - -Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
fieldPathstringPath of the field to select in the specified API version.true
apiVersionstringVersion of the schema the FieldPath is written in terms of, defaults to "v1".false
- - -

- PostgresCluster.spec.proxy.pgBouncer.containers[index].env[index].valueFrom.resourceFieldRef - ↩ Parent -

- - - -Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
resourcestringRequired: resource to selecttrue
containerNamestringContainer name: required for volumes, optional for env varsfalse
divisorint or stringSpecifies the output format of the exposed resources, defaults to "1"false
- - -

- PostgresCluster.spec.proxy.pgBouncer.containers[index].env[index].valueFrom.secretKeyRef - ↩ Parent -

- - - -Selects a key of a secret in the pod's namespace - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringThe key of the secret to select from. Must be a valid secret key.true
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#namesfalse
optionalbooleanSpecify whether the Secret or its key must be definedfalse
- - -

- PostgresCluster.spec.proxy.pgBouncer.containers[index].envFrom[index] - ↩ Parent -

- - - -EnvFromSource represents the source of a set of ConfigMaps - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
configMapRefobjectThe ConfigMap to select fromfalse
prefixstringAn optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.false
secretRefobjectThe Secret to select fromfalse
- - -

- PostgresCluster.spec.proxy.pgBouncer.containers[index].envFrom[index].configMapRef - ↩ Parent -

- - - -The ConfigMap to select from - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#namesfalse
optionalbooleanSpecify whether the ConfigMap must be definedfalse
- - -

- PostgresCluster.spec.proxy.pgBouncer.containers[index].envFrom[index].secretRef - ↩ Parent -

- - - -The Secret to select from - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#namesfalse
optionalbooleanSpecify whether the Secret must be definedfalse
- - -

- PostgresCluster.spec.proxy.pgBouncer.containers[index].lifecycle - ↩ Parent -

- - - -Actions that the management system should take in response to container lifecycle events. Cannot be updated. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
postStartobjectPostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooksfalse
preStopobjectPreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod's termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooksfalse
- - -

- PostgresCluster.spec.proxy.pgBouncer.containers[index].lifecycle.postStart - ↩ Parent -

- - - -PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
execobjectExec specifies the action to take.false
httpGetobjectHTTPGet specifies the http request to perform.false
tcpSocketobjectDeprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.containers[index].lifecycle.postStart.exec - ↩ Parent -

- - - -Exec specifies the action to take. - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
command[]stringCommand is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.containers[index].lifecycle.postStart.httpGet - ↩ Parent -

- - - -HTTPGet specifies the http request to perform. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
portint or stringName or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.true
hoststringHost name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.false
httpHeaders[]objectCustom headers to set in the request. HTTP allows repeated headers.false
pathstringPath to access on the HTTP server.false
schemestringScheme to use for connecting to the host. Defaults to HTTP.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.containers[index].lifecycle.postStart.httpGet.httpHeaders[index] - ↩ Parent -

- - - -HTTPHeader describes a custom header to be used in HTTP probes - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
namestringThe header field nametrue
valuestringThe header field valuetrue
- - -

- PostgresCluster.spec.proxy.pgBouncer.containers[index].lifecycle.postStart.tcpSocket - ↩ Parent -

- - - -Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
portint or stringNumber or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.true
hoststringOptional: Host name to connect to, defaults to the pod IP.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.containers[index].lifecycle.preStop - ↩ Parent -

- - - -PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod's termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
execobjectExec specifies the action to take.false
httpGetobjectHTTPGet specifies the http request to perform.false
tcpSocketobjectDeprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.containers[index].lifecycle.preStop.exec - ↩ Parent -

- - - -Exec specifies the action to take. - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
command[]stringCommand is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.containers[index].lifecycle.preStop.httpGet - ↩ Parent -

- - - -HTTPGet specifies the http request to perform. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
portint or stringName or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.true
hoststringHost name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.false
httpHeaders[]objectCustom headers to set in the request. HTTP allows repeated headers.false
pathstringPath to access on the HTTP server.false
schemestringScheme to use for connecting to the host. Defaults to HTTP.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.containers[index].lifecycle.preStop.httpGet.httpHeaders[index] - ↩ Parent -

- - - -HTTPHeader describes a custom header to be used in HTTP probes - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
namestringThe header field nametrue
valuestringThe header field valuetrue
- - -

- PostgresCluster.spec.proxy.pgBouncer.containers[index].lifecycle.preStop.tcpSocket - ↩ Parent -

- - - -Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
portint or stringNumber or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.true
hoststringOptional: Host name to connect to, defaults to the pod IP.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.containers[index].livenessProbe - ↩ Parent -

- - - -Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
execobjectExec specifies the action to take.false
failureThresholdintegerMinimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.false
grpcobjectGRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate.false
httpGetobjectHTTPGet specifies the http request to perform.false
initialDelaySecondsintegerNumber of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probesfalse
periodSecondsintegerHow often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.false
successThresholdintegerMinimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.false
tcpSocketobjectTCPSocket specifies an action involving a TCP port.false
terminationGracePeriodSecondsintegerOptional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.false
timeoutSecondsintegerNumber of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probesfalse
- - -

- PostgresCluster.spec.proxy.pgBouncer.containers[index].livenessProbe.exec - ↩ Parent -

- - - -Exec specifies the action to take. - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
command[]stringCommand is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.containers[index].livenessProbe.grpc - ↩ Parent -

- - - -GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
portintegerPort number of the gRPC service. Number must be in the range 1 to 65535.true
servicestringService is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - If this is not specified, the default behavior is defined by gRPC.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.containers[index].livenessProbe.httpGet - ↩ Parent -

- - - -HTTPGet specifies the http request to perform. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
portint or stringName or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.true
hoststringHost name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.false
httpHeaders[]objectCustom headers to set in the request. HTTP allows repeated headers.false
pathstringPath to access on the HTTP server.false
schemestringScheme to use for connecting to the host. Defaults to HTTP.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.containers[index].livenessProbe.httpGet.httpHeaders[index] - ↩ Parent -

- - - -HTTPHeader describes a custom header to be used in HTTP probes - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
namestringThe header field nametrue
valuestringThe header field valuetrue
- - -

- PostgresCluster.spec.proxy.pgBouncer.containers[index].livenessProbe.tcpSocket - ↩ Parent -

- - - -TCPSocket specifies an action involving a TCP port. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
portint or stringNumber or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.true
hoststringOptional: Host name to connect to, defaults to the pod IP.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.containers[index].ports[index] - ↩ Parent -

- - - -ContainerPort represents a network port in a single container. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
containerPortintegerNumber of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536.true
hostIPstringWhat host IP to bind the external port to.false
hostPortintegerNumber of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this.false
namestringIf specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services.false
protocolstringProtocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP".false
- - -

- PostgresCluster.spec.proxy.pgBouncer.containers[index].readinessProbe - ↩ Parent -

- - - -Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
execobjectExec specifies the action to take.false
failureThresholdintegerMinimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.false
grpcobjectGRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate.false
httpGetobjectHTTPGet specifies the http request to perform.false
initialDelaySecondsintegerNumber of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probesfalse
periodSecondsintegerHow often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.false
successThresholdintegerMinimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.false
tcpSocketobjectTCPSocket specifies an action involving a TCP port.false
terminationGracePeriodSecondsintegerOptional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.false
timeoutSecondsintegerNumber of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probesfalse
- - -

- PostgresCluster.spec.proxy.pgBouncer.containers[index].readinessProbe.exec - ↩ Parent -

- - - -Exec specifies the action to take. - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
command[]stringCommand is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.containers[index].readinessProbe.grpc - ↩ Parent -

- - - -GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
portintegerPort number of the gRPC service. Number must be in the range 1 to 65535.true
servicestringService is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - If this is not specified, the default behavior is defined by gRPC.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.containers[index].readinessProbe.httpGet - ↩ Parent -

- - - -HTTPGet specifies the http request to perform. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
portint or stringName or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.true
hoststringHost name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.false
httpHeaders[]objectCustom headers to set in the request. HTTP allows repeated headers.false
pathstringPath to access on the HTTP server.false
schemestringScheme to use for connecting to the host. Defaults to HTTP.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.containers[index].readinessProbe.httpGet.httpHeaders[index] - ↩ Parent -

- - - -HTTPHeader describes a custom header to be used in HTTP probes - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
namestringThe header field nametrue
valuestringThe header field valuetrue
- - -

- PostgresCluster.spec.proxy.pgBouncer.containers[index].readinessProbe.tcpSocket - ↩ Parent -

- - - -TCPSocket specifies an action involving a TCP port. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
portint or stringNumber or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.true
hoststringOptional: Host name to connect to, defaults to the pod IP.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.containers[index].resources - ↩ Parent -

- - - -Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
limitsmap[string]int or stringLimits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
requestsmap[string]int or stringRequests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
- - -

- PostgresCluster.spec.proxy.pgBouncer.containers[index].securityContext - ↩ Parent -

- - - -SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
allowPrivilegeEscalationbooleanAllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows.false
capabilitiesobjectThe capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. Note that this field cannot be set when spec.os.name is windows.false
privilegedbooleanRun container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. Note that this field cannot be set when spec.os.name is windows.false
procMountstringprocMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows.false
readOnlyRootFilesystembooleanWhether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows.false
runAsGroupintegerThe GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows.false
runAsNonRootbooleanIndicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.false
runAsUserintegerThe UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows.false
seLinuxOptionsobjectThe SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows.false
seccompProfileobjectThe seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows.false
windowsOptionsobjectThe Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.containers[index].securityContext.capabilities - ↩ Parent -

- - - -The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. Note that this field cannot be set when spec.os.name is windows. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
add[]stringAdded capabilitiesfalse
drop[]stringRemoved capabilitiesfalse
- - -

- PostgresCluster.spec.proxy.pgBouncer.containers[index].securityContext.seLinuxOptions - ↩ Parent -

- - - -The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
levelstringLevel is SELinux level label that applies to the container.false
rolestringRole is a SELinux role label that applies to the container.false
typestringType is a SELinux type label that applies to the container.false
userstringUser is a SELinux user label that applies to the container.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.containers[index].securityContext.seccompProfile - ↩ Parent -

- - - -The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
typestringtype indicates which kind of seccomp profile will be applied. Valid options are: Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied.true
localhostProfilestringlocalhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is "Localhost".false
- - -

- PostgresCluster.spec.proxy.pgBouncer.containers[index].securityContext.windowsOptions - ↩ Parent -

- - - -The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
gmsaCredentialSpecstringGMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field.false
gmsaCredentialSpecNamestringGMSACredentialSpecName is the name of the GMSA credential spec to use.false
hostProcessbooleanHostProcess determines if a container should be run as a 'Host Process' container. This field is alpha-level and will only be honored by components that enable the WindowsHostProcessContainers feature flag. Setting this field without the feature flag will result in errors when validating the Pod. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.false
runAsUserNamestringThe UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.containers[index].startupProbe - ↩ Parent -

- - - -StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
execobjectExec specifies the action to take.false
failureThresholdintegerMinimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.false
grpcobjectGRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate.false
httpGetobjectHTTPGet specifies the http request to perform.false
initialDelaySecondsintegerNumber of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probesfalse
periodSecondsintegerHow often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.false
successThresholdintegerMinimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.false
tcpSocketobjectTCPSocket specifies an action involving a TCP port.false
terminationGracePeriodSecondsintegerOptional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.false
timeoutSecondsintegerNumber of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probesfalse
- - -

- PostgresCluster.spec.proxy.pgBouncer.containers[index].startupProbe.exec - ↩ Parent -

- - - -Exec specifies the action to take. - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
command[]stringCommand is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.containers[index].startupProbe.grpc - ↩ Parent -

- - - -GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
portintegerPort number of the gRPC service. Number must be in the range 1 to 65535.true
servicestringService is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - If this is not specified, the default behavior is defined by gRPC.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.containers[index].startupProbe.httpGet - ↩ Parent -

- - - -HTTPGet specifies the http request to perform. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
portint or stringName or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.true
hoststringHost name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.false
httpHeaders[]objectCustom headers to set in the request. HTTP allows repeated headers.false
pathstringPath to access on the HTTP server.false
schemestringScheme to use for connecting to the host. Defaults to HTTP.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.containers[index].startupProbe.httpGet.httpHeaders[index] - ↩ Parent -

- - - -HTTPHeader describes a custom header to be used in HTTP probes - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
namestringThe header field nametrue
valuestringThe header field valuetrue
- - -

- PostgresCluster.spec.proxy.pgBouncer.containers[index].startupProbe.tcpSocket - ↩ Parent -

- - - -TCPSocket specifies an action involving a TCP port. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
portint or stringNumber or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.true
hoststringOptional: Host name to connect to, defaults to the pod IP.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.containers[index].volumeDevices[index] - ↩ Parent -

- - - -volumeDevice describes a mapping of a raw block device within a container. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
devicePathstringdevicePath is the path inside of the container that the device will be mapped to.true
namestringname must match the name of a persistentVolumeClaim in the podtrue
- - -

- PostgresCluster.spec.proxy.pgBouncer.containers[index].volumeMounts[index] - ↩ Parent -

- - - -VolumeMount describes a mounting of a Volume within a container. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
mountPathstringPath within the container at which the volume should be mounted. Must not contain ':'.true
namestringThis must match the Name of a Volume.true
mountPropagationstringmountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.false
readOnlybooleanMounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.false
subPathstringPath within the volume from which the container's volume should be mounted. Defaults to "" (volume's root).false
subPathExprstringExpanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.customTLSSecret - ↩ Parent -

- - - -A secret projection containing a certificate and key with which to encrypt connections to PgBouncer. The "tls.crt", "tls.key", and "ca.crt" paths must be PEM-encoded certificates and keys. Changing this value causes PgBouncer to restart. More info: https://kubernetes.io/docs/concepts/configuration/secret/#projection-of-secret-keys-to-specific-paths - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
items[]objectitems if unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.false
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#namesfalse
optionalbooleanoptional field specify whether the Secret or its key must be definedfalse
- - -

- PostgresCluster.spec.proxy.pgBouncer.customTLSSecret.items[index] - ↩ Parent -

- - - -Maps a string key to a path within a volume. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the key to project.true
pathstringpath is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.true
modeintegermode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.metadata - ↩ Parent -

- - - -Metadata contains metadata for custom resources - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
annotationsmap[string]stringfalse
labelsmap[string]stringfalse
- - -

- PostgresCluster.spec.proxy.pgBouncer.resources - ↩ Parent -

- - - -Compute resources of a PgBouncer container. Changing this value causes PgBouncer to restart. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
limitsmap[string]int or stringLimits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
requestsmap[string]int or stringRequests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
- - -

- PostgresCluster.spec.proxy.pgBouncer.service - ↩ Parent -

- - - -Specification of the service that exposes PgBouncer. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
metadataobjectMetadata contains metadata for custom resourcesfalse
nodePortintegerThe port on which this service is exposed when type is NodePort or LoadBalancer. Value must be in-range and not in use or the operation will fail. If unspecified, a port will be allocated if this Service requires one. - https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeportfalse
typeenumMore info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-typesfalse
- - -

- PostgresCluster.spec.proxy.pgBouncer.service.metadata - ↩ Parent -

- - - -Metadata contains metadata for custom resources - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
annotationsmap[string]stringfalse
labelsmap[string]stringfalse
- - -

- PostgresCluster.spec.proxy.pgBouncer.sidecars - ↩ Parent -

- - - -Configuration for pgBouncer sidecar containers - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
pgbouncerConfigobjectDefines the configuration for the pgBouncer config sidecar containerfalse
- - -

- PostgresCluster.spec.proxy.pgBouncer.sidecars.pgbouncerConfig - ↩ Parent -

- - - -Defines the configuration for the pgBouncer config sidecar container - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
resourcesobjectResource requirements for a sidecar containerfalse
- - -

- PostgresCluster.spec.proxy.pgBouncer.sidecars.pgbouncerConfig.resources - ↩ Parent -

- - - -Resource requirements for a sidecar container - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
limitsmap[string]int or stringLimits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
requestsmap[string]int or stringRequests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
- - -

- PostgresCluster.spec.proxy.pgBouncer.tolerations[index] - ↩ Parent -

- - - -The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
effectstringEffect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.false
keystringKey is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.false
operatorstringOperator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.false
tolerationSecondsintegerTolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.false
valuestringValue is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.topologySpreadConstraints[index] - ↩ Parent -

- - - -TopologySpreadConstraint specifies how to spread matching pods among the given topology. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
maxSkewintegerMaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. The global minimum is the minimum number of matching pods in an eligible domain or zero if the number of eligible domains is less than MinDomains. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 2/2/1: In this case, the global minimum is 1. | zone1 | zone2 | zone3 | | P P | P P | P | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It's a required field. Default value is 1 and 0 is not allowed.true
topologyKeystringTopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes match the node selector. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field.true
whenUnsatisfiablestringWhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assignment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won't make it *more* imbalanced. It's a required field.true
labelSelectorobjectLabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain.false
minDomainsintegerMinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule. - For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew. - This is an alpha field and requires enabling MinDomainsInPodTopologySpread feature gate.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.topologySpreadConstraints[index].labelSelector - ↩ Parent -

- - - -LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.proxy.pgBouncer.topologySpreadConstraints[index].labelSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.service - ↩ Parent -

- - - -Specification of the service that exposes the PostgreSQL primary instance. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
metadataobjectMetadata contains metadata for custom resourcesfalse
nodePortintegerThe port on which this service is exposed when type is NodePort or LoadBalancer. Value must be in-range and not in use or the operation will fail. If unspecified, a port will be allocated if this Service requires one. - https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeportfalse
typeenumMore info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-typesfalse
- - -

- PostgresCluster.spec.service.metadata - ↩ Parent -

- - - -Metadata contains metadata for custom resources - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
annotationsmap[string]stringfalse
labelsmap[string]stringfalse
- - -

- PostgresCluster.spec.standby - ↩ Parent -

- - - -Run this cluster as a read-only copy of an existing cluster or archive. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
enabledbooleanWhether or not the PostgreSQL cluster should be read-only. When this is true, WAL files are applied from a pgBackRest repository or another PostgreSQL server.false
hoststringNetwork address of the PostgreSQL server to follow via streaming replication.false
portintegerNetwork port of the PostgreSQL server to follow via streaming replication.false
repoNamestringThe name of the pgBackRest repository to follow for WAL files.false
- - -

- PostgresCluster.spec.userInterface - ↩ Parent -

- - - -The specification of a user interface that connects to PostgreSQL. - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
pgAdminobjectDefines a pgAdmin user interface.true
- - -

- PostgresCluster.spec.userInterface.pgAdmin - ↩ Parent -

- - - -Defines a pgAdmin user interface. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
dataVolumeClaimSpecobjectDefines a PersistentVolumeClaim for pgAdmin data. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumestrue
affinityobjectScheduling constraints of a pgAdmin pod. Changing this value causes pgAdmin to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-nodefalse
configobjectConfiguration settings for the pgAdmin process. Changes to any of these values will be loaded without validation. Be careful, as you may put pgAdmin into an unusable state.false
imagestringName of a container image that can run pgAdmin 4. Changing this value causes pgAdmin to restart. The image may also be set using the RELATED_IMAGE_PGADMIN environment variable. More info: https://kubernetes.io/docs/concepts/containers/imagesfalse
metadataobjectMetadata contains metadata for custom resourcesfalse
priorityClassNamestringPriority class name for the pgAdmin pod. Changing this value causes pgAdmin to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/false
replicasintegerNumber of desired pgAdmin pods.false
resourcesobjectCompute resources of a pgAdmin container. Changing this value causes pgAdmin to restart. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containersfalse
serviceobjectSpecification of the service that exposes pgAdmin.false
tolerations[]objectTolerations of a pgAdmin pod. Changing this value causes pgAdmin to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-tolerationfalse
topologySpreadConstraints[]objectTopology spread constraints of a pgAdmin pod. Changing this value causes pgAdmin to restart. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/false
- - -

- PostgresCluster.spec.userInterface.pgAdmin.dataVolumeClaimSpec - ↩ Parent -

- - - -Defines a PersistentVolumeClaim for pgAdmin data. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
accessModes[]stringaccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1false
dataSourceobjectdataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field.false
dataSourceRefobjectdataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.false
resourcesobjectresources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resourcesfalse
selectorobjectselector is a label query over volumes to consider for binding.false
storageClassNamestringstorageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1false
volumeModestringvolumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.false
volumeNamestringvolumeName is the binding reference to the PersistentVolume backing this claim.false
- - -

- PostgresCluster.spec.userInterface.pgAdmin.dataVolumeClaimSpec.dataSource - ↩ Parent -

- - - -dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
kindstringKind is the type of resource being referencedtrue
namestringName is the name of resource being referencedtrue
apiGroupstringAPIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.false
- - -

- PostgresCluster.spec.userInterface.pgAdmin.dataVolumeClaimSpec.dataSourceRef - ↩ Parent -

- - - -dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
kindstringKind is the type of resource being referencedtrue
namestringName is the name of resource being referencedtrue
apiGroupstringAPIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.false
- - -

- PostgresCluster.spec.userInterface.pgAdmin.dataVolumeClaimSpec.resources - ↩ Parent -

- - - -resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
limitsmap[string]int or stringLimits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
requestsmap[string]int or stringRequests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
- - -

- PostgresCluster.spec.userInterface.pgAdmin.dataVolumeClaimSpec.selector - ↩ Parent -

- - - -selector is a label query over volumes to consider for binding. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.userInterface.pgAdmin.dataVolumeClaimSpec.selector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.userInterface.pgAdmin.affinity - ↩ Parent -

- - - -Scheduling constraints of a pgAdmin pod. Changing this value causes pgAdmin to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
nodeAffinityobjectDescribes node affinity scheduling rules for the pod.false
podAffinityobjectDescribes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).false
podAntiAffinityobjectDescribes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).false
- - -

- PostgresCluster.spec.userInterface.pgAdmin.affinity.nodeAffinity - ↩ Parent -

- - - -Describes node affinity scheduling rules for the pod. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecutionobjectIf the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.false
- - -

- PostgresCluster.spec.userInterface.pgAdmin.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
preferenceobjectA node selector term, associated with the corresponding weight.true
weightintegerWeight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.true
- - -

- PostgresCluster.spec.userInterface.pgAdmin.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference - ↩ Parent -

- - - -A node selector term, associated with the corresponding weight. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectA list of node selector requirements by node's labels.false
matchFields[]objectA list of node selector requirements by node's fields.false
- - -

- PostgresCluster.spec.userInterface.pgAdmin.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference.matchExpressions[index] - ↩ Parent -

- - - -A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.userInterface.pgAdmin.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference.matchFields[index] - ↩ Parent -

- - - -A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.userInterface.pgAdmin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution - ↩ Parent -

- - - -If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
nodeSelectorTerms[]objectRequired. A list of node selector terms. The terms are ORed.true
- - -

- PostgresCluster.spec.userInterface.pgAdmin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index] - ↩ Parent -

- - - -A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectA list of node selector requirements by node's labels.false
matchFields[]objectA list of node selector requirements by node's fields.false
- - -

- PostgresCluster.spec.userInterface.pgAdmin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index].matchExpressions[index] - ↩ Parent -

- - - -A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.userInterface.pgAdmin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index].matchFields[index] - ↩ Parent -

- - - -A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringThe label key that the selector applies to.true
operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.true
values[]stringAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.userInterface.pgAdmin.affinity.podAffinity - ↩ Parent -

- - - -Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecution[]objectIf the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.false
- - -

- PostgresCluster.spec.userInterface.pgAdmin.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
podAffinityTermobjectRequired. A pod affinity term, associated with the corresponding weight.true
weightintegerweight associated with matching the corresponding podAffinityTerm, in the range 1-100.true
- - -

- PostgresCluster.spec.userInterface.pgAdmin.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm - ↩ Parent -

- - - -Required. A pod affinity term, associated with the corresponding weight. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
- - -

- PostgresCluster.spec.userInterface.pgAdmin.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector - ↩ Parent -

- - - -A label query over a set of resources, in this case pods. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.userInterface.pgAdmin.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.userInterface.pgAdmin.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector - ↩ Parent -

- - - -A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.userInterface.pgAdmin.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.userInterface.pgAdmin.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
- - -

- PostgresCluster.spec.userInterface.pgAdmin.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector - ↩ Parent -

- - - -A label query over a set of resources, in this case pods. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.userInterface.pgAdmin.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.userInterface.pgAdmin.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector - ↩ Parent -

- - - -A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.userInterface.pgAdmin.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.userInterface.pgAdmin.affinity.podAntiAffinity - ↩ Parent -

- - - -Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
preferredDuringSchedulingIgnoredDuringExecution[]objectThe scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.false
requiredDuringSchedulingIgnoredDuringExecution[]objectIf the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.false
- - -

- PostgresCluster.spec.userInterface.pgAdmin.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
podAffinityTermobjectRequired. A pod affinity term, associated with the corresponding weight.true
weightintegerweight associated with matching the corresponding podAffinityTerm, in the range 1-100.true
- - -

- PostgresCluster.spec.userInterface.pgAdmin.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm - ↩ Parent -

- - - -Required. A pod affinity term, associated with the corresponding weight. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
- - -

- PostgresCluster.spec.userInterface.pgAdmin.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector - ↩ Parent -

- - - -A label query over a set of resources, in this case pods. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.userInterface.pgAdmin.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.userInterface.pgAdmin.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector - ↩ Parent -

- - - -A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.userInterface.pgAdmin.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.userInterface.pgAdmin.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index] - ↩ Parent -

- - - -Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.true
labelSelectorobjectA label query over a set of resources, in this case pods.false
namespaceSelectorobjectA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.false
namespaces[]stringnamespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".false
- - -

- PostgresCluster.spec.userInterface.pgAdmin.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector - ↩ Parent -

- - - -A label query over a set of resources, in this case pods. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.userInterface.pgAdmin.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.userInterface.pgAdmin.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector - ↩ Parent -

- - - -A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.userInterface.pgAdmin.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.userInterface.pgAdmin.config - ↩ Parent -

- - - -Configuration settings for the pgAdmin process. Changes to any of these values will be loaded without validation. Be careful, as you may put pgAdmin into an unusable state. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
files[]objectFiles allows the user to mount projected volumes into the pgAdmin container so that files can be referenced by pgAdmin as needed.false
ldapBindPasswordobjectA Secret containing the value for the LDAP_BIND_PASSWORD setting. More info: https://www.pgadmin.org/docs/pgadmin4/latest/ldap.htmlfalse
settingsobjectSettings for the pgAdmin server process. Keys should be uppercase and values must be constants. More info: https://www.pgadmin.org/docs/pgadmin4/latest/config_py.htmlfalse
- - -

- PostgresCluster.spec.userInterface.pgAdmin.config.files[index] - ↩ Parent -

- - - -Projection that may be projected along with other supported volume types - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
configMapobjectconfigMap information about the configMap data to projectfalse
downwardAPIobjectdownwardAPI information about the downwardAPI data to projectfalse
secretobjectsecret information about the secret data to projectfalse
serviceAccountTokenobjectserviceAccountToken is information about the serviceAccountToken data to projectfalse
- - -

- PostgresCluster.spec.userInterface.pgAdmin.config.files[index].configMap - ↩ Parent -

- - - -configMap information about the configMap data to project - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
items[]objectitems if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.false
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#namesfalse
optionalbooleanoptional specify whether the ConfigMap or its keys must be definedfalse
- - -

- PostgresCluster.spec.userInterface.pgAdmin.config.files[index].configMap.items[index] - ↩ Parent -

- - - -Maps a string key to a path within a volume. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the key to project.true
pathstringpath is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.true
modeintegermode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.false
- - -

- PostgresCluster.spec.userInterface.pgAdmin.config.files[index].downwardAPI - ↩ Parent -

- - - -downwardAPI information about the downwardAPI data to project - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
items[]objectItems is a list of DownwardAPIVolume filefalse
- - -

- PostgresCluster.spec.userInterface.pgAdmin.config.files[index].downwardAPI.items[index] - ↩ Parent -

- - - -DownwardAPIVolumeFile represents information to create the file containing the pod field - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
pathstringRequired: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'true
fieldRefobjectRequired: Selects a field of the pod: only annotations, labels, name and namespace are supported.false
modeintegerOptional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.false
resourceFieldRefobjectSelects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.false
- - -

- PostgresCluster.spec.userInterface.pgAdmin.config.files[index].downwardAPI.items[index].fieldRef - ↩ Parent -

- - - -Required: Selects a field of the pod: only annotations, labels, name and namespace are supported. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
fieldPathstringPath of the field to select in the specified API version.true
apiVersionstringVersion of the schema the FieldPath is written in terms of, defaults to "v1".false
- - -

- PostgresCluster.spec.userInterface.pgAdmin.config.files[index].downwardAPI.items[index].resourceFieldRef - ↩ Parent -

- - - -Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
resourcestringRequired: resource to selecttrue
containerNamestringContainer name: required for volumes, optional for env varsfalse
divisorint or stringSpecifies the output format of the exposed resources, defaults to "1"false
- - -

- PostgresCluster.spec.userInterface.pgAdmin.config.files[index].secret - ↩ Parent -

- - - -secret information about the secret data to project - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
items[]objectitems if unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.false
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#namesfalse
optionalbooleanoptional field specify whether the Secret or its key must be definedfalse
- - -

- PostgresCluster.spec.userInterface.pgAdmin.config.files[index].secret.items[index] - ↩ Parent -

- - - -Maps a string key to a path within a volume. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the key to project.true
pathstringpath is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.true
modeintegermode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.false
- - -

- PostgresCluster.spec.userInterface.pgAdmin.config.files[index].serviceAccountToken - ↩ Parent -

- - - -serviceAccountToken is information about the serviceAccountToken data to project - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
pathstringpath is the path relative to the mount point of the file to project the token into.true
audiencestringaudience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver.false
expirationSecondsintegerexpirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes.false
- - -

- PostgresCluster.spec.userInterface.pgAdmin.config.ldapBindPassword - ↩ Parent -

- - - -A Secret containing the value for the LDAP_BIND_PASSWORD setting. More info: https://www.pgadmin.org/docs/pgadmin4/latest/ldap.html - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringThe key of the secret to select from. Must be a valid secret key.true
namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#namesfalse
optionalbooleanSpecify whether the Secret or its key must be definedfalse
- - -

- PostgresCluster.spec.userInterface.pgAdmin.metadata - ↩ Parent -

- - - -Metadata contains metadata for custom resources - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
annotationsmap[string]stringfalse
labelsmap[string]stringfalse
- - -

- PostgresCluster.spec.userInterface.pgAdmin.resources - ↩ Parent -

- - - -Compute resources of a pgAdmin container. Changing this value causes pgAdmin to restart. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
limitsmap[string]int or stringLimits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
requestsmap[string]int or stringRequests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/false
- - -

- PostgresCluster.spec.userInterface.pgAdmin.service - ↩ Parent -

- - - -Specification of the service that exposes pgAdmin. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
metadataobjectMetadata contains metadata for custom resourcesfalse
nodePortintegerThe port on which this service is exposed when type is NodePort or LoadBalancer. Value must be in-range and not in use or the operation will fail. If unspecified, a port will be allocated if this Service requires one. - https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeportfalse
typeenumMore info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-typesfalse
- - -

- PostgresCluster.spec.userInterface.pgAdmin.service.metadata - ↩ Parent -

- - - -Metadata contains metadata for custom resources - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
annotationsmap[string]stringfalse
labelsmap[string]stringfalse
- - -

- PostgresCluster.spec.userInterface.pgAdmin.tolerations[index] - ↩ Parent -

- - - -The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
effectstringEffect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.false
keystringKey is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.false
operatorstringOperator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.false
tolerationSecondsintegerTolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.false
valuestringValue is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.false
- - -

- PostgresCluster.spec.userInterface.pgAdmin.topologySpreadConstraints[index] - ↩ Parent -

- - - -TopologySpreadConstraint specifies how to spread matching pods among the given topology. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
maxSkewintegerMaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. The global minimum is the minimum number of matching pods in an eligible domain or zero if the number of eligible domains is less than MinDomains. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 2/2/1: In this case, the global minimum is 1. | zone1 | zone2 | zone3 | | P P | P P | P | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It's a required field. Default value is 1 and 0 is not allowed.true
topologyKeystringTopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes match the node selector. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field.true
whenUnsatisfiablestringWhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assignment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won't make it *more* imbalanced. It's a required field.true
labelSelectorobjectLabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain.false
minDomainsintegerMinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule. - For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew. - This is an alpha field and requires enabling MinDomainsInPodTopologySpread feature gate.false
- - -

- PostgresCluster.spec.userInterface.pgAdmin.topologySpreadConstraints[index].labelSelector - ↩ Parent -

- - - -LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]objectmatchExpressions is a list of label selector requirements. The requirements are ANDed.false
matchLabelsmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.false
- - -

- PostgresCluster.spec.userInterface.pgAdmin.topologySpreadConstraints[index].labelSelector.matchExpressions[index] - ↩ Parent -

- - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystringkey is the label key that the selector applies to.true
operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.true
values[]stringvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.false
- - -

- PostgresCluster.spec.users[index] - ↩ Parent -

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
namestringThe name of this PostgreSQL user. The value may contain only lowercase letters, numbers, and hyphen so that it fits into Kubernetes metadata.true
databases[]stringDatabases to which this user can connect and create objects. Removing a database from this list does NOT revoke access. This field is ignored for the "postgres" user.false
optionsstringALTER ROLE options except for PASSWORD. This field is ignored for the "postgres" user. More info: https://www.postgresql.org/docs/current/role-attributes.htmlfalse
passwordobjectProperties of the password generated for this user.false
- - -

- PostgresCluster.spec.users[index].password - ↩ Parent -

- - - -Properties of the password generated for this user. - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
typeenumType of password to generate. Defaults to ASCII. Valid options are ASCII and AlphaNumeric. "ASCII" passwords contain letters, numbers, and symbols from the US-ASCII character set. "AlphaNumeric" passwords contain letters and numbers from the US-ASCII character set.true
- - -

- PostgresCluster.status - ↩ Parent -

- - - -PostgresClusterStatus defines the observed state of PostgresCluster - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
conditions[]objectconditions represent the observations of postgrescluster's current state. Known .status.conditions.type are: "PersistentVolumeResizing", "Progressing", "ProxyAvailable"false
databaseInitSQLstringDatabaseInitSQL state of custom database initialization in the clusterfalse
databaseRevisionstringIdentifies the databases that have been installed into PostgreSQL.false
instances[]objectCurrent state of PostgreSQL instances.false
monitoringobjectCurrent state of PostgreSQL cluster monitoring tool configurationfalse
observedGenerationintegerobservedGeneration represents the .metadata.generation on which the status was based.false
patroniobjectfalse
pgbackrestobjectStatus information for pgBackRestfalse
postgresVersionintegerStores the current PostgreSQL major version following a successful major PostgreSQL upgrade.false
proxyobjectCurrent state of the PostgreSQL proxy.false
startupInstancestringThe instance that should be started first when bootstrapping and/or starting a PostgresCluster.false
startupInstanceSetstringThe instance set associated with the startupInstancefalse
userInterfaceobjectCurrent state of the PostgreSQL user interface.false
usersRevisionstringIdentifies the users that have been installed into PostgreSQL.false
- - -

- PostgresCluster.status.conditions[index] - ↩ Parent -

- - - -Condition contains details for one aspect of the current state of this API Resource. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
lastTransitionTimestringlastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.true
messagestringmessage is a human readable message indicating details about the transition. This may be an empty string.true
reasonstringreason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty.true
statusenumstatus of the condition, one of True, False, Unknown.true
typestringtype of condition in CamelCase.true
observedGenerationintegerobservedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance.false
- - -

- PostgresCluster.status.instances[index] - ↩ Parent -

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
namestringtrue
readyReplicasintegerTotal number of ready pods.false
replicasintegerTotal number of pods.false
updatedReplicasintegerTotal number of pods that have the desired specification.false
- - -

- PostgresCluster.status.monitoring - ↩ Parent -

- - - -Current state of PostgreSQL cluster monitoring tool configuration - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
exporterConfigurationstringfalse
- - -

- PostgresCluster.status.patroni - ↩ Parent -

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
switchoverstringTracks the execution of the switchover requests.false
switchoverTimelineintegerTracks the current timeline during switchoversfalse
systemIdentifierstringThe PostgreSQL system identifier reported by Patroni.false
- - -

- PostgresCluster.status.pgbackrest - ↩ Parent -

- - - -Status information for pgBackRest - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
manualBackupobjectStatus information for manual backupsfalse
repoHostobjectStatus information for the pgBackRest dedicated repository hostfalse
repos[]objectStatus information for pgBackRest repositoriesfalse
restoreobjectStatus information for in-place restoresfalse
scheduledBackups[]objectStatus information for scheduled backupsfalse
- - -

- PostgresCluster.status.pgbackrest.manualBackup - ↩ Parent -

- - - -Status information for manual backups - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
finishedbooleanSpecifies whether or not the Job is finished executing (does not indicate success or failure).true
idstringA unique identifier for the manual backup as provided using the "pgbackrest-backup" annotation when initiating a backup.true
activeintegerThe number of actively running manual backup Pods.false
completionTimestringRepresents the time the manual backup Job was determined by the Job controller to be completed. This field is only set if the backup completed successfully. Additionally, it is represented in RFC3339 form and is in UTC.false
failedintegerThe number of Pods for the manual backup Job that reached the "Failed" phase.false
startTimestringRepresents the time the manual backup Job was acknowledged by the Job controller. It is represented in RFC3339 form and is in UTC.false
succeededintegerThe number of Pods for the manual backup Job that reached the "Succeeded" phase.false
- - -

- PostgresCluster.status.pgbackrest.repoHost - ↩ Parent -

- - - -Status information for the pgBackRest dedicated repository host - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
apiVersionstringAPIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resourcesfalse
kindstringKind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kindsfalse
readybooleanWhether or not the pgBackRest repository host is ready for usefalse
- - -

- PostgresCluster.status.pgbackrest.repos[index] - ↩ Parent -

- - - -RepoStatus the status of a pgBackRest repository - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
namestringThe name of the pgBackRest repositorytrue
boundbooleanWhether or not the pgBackRest repository PersistentVolumeClaim is bound to a volumefalse
replicaCreateBackupCompletebooleanReplicaCreateBackupReady indicates whether a backup exists in the repository as needed to bootstrap replicas.false
repoOptionsHashstringA hash of the required fields in the spec for defining an Azure, GCS or S3 repository, Utilizd to detect changes to these fields and then execute pgBackRest stanza-create commands accordingly.false
stanzaCreatedbooleanSpecifies whether or not a stanza has been successfully created for the repositoryfalse
volumestringThe name of the volume the containing the pgBackRest repositoryfalse
- - -

- PostgresCluster.status.pgbackrest.restore - ↩ Parent -

- - - -Status information for in-place restores - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
finishedbooleanSpecifies whether or not the Job is finished executing (does not indicate success or failure).true
idstringA unique identifier for the manual backup as provided using the "pgbackrest-backup" annotation when initiating a backup.true
activeintegerThe number of actively running manual backup Pods.false
completionTimestringRepresents the time the manual backup Job was determined by the Job controller to be completed. This field is only set if the backup completed successfully. Additionally, it is represented in RFC3339 form and is in UTC.false
failedintegerThe number of Pods for the manual backup Job that reached the "Failed" phase.false
startTimestringRepresents the time the manual backup Job was acknowledged by the Job controller. It is represented in RFC3339 form and is in UTC.false
succeededintegerThe number of Pods for the manual backup Job that reached the "Succeeded" phase.false
- - -

- PostgresCluster.status.pgbackrest.scheduledBackups[index] - ↩ Parent -

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
activeintegerThe number of actively running manual backup Pods.false
completionTimestringRepresents the time the manual backup Job was determined by the Job controller to be completed. This field is only set if the backup completed successfully. Additionally, it is represented in RFC3339 form and is in UTC.false
cronJobNamestringThe name of the associated pgBackRest scheduled backup CronJobfalse
failedintegerThe number of Pods for the manual backup Job that reached the "Failed" phase.false
repostringThe name of the associated pgBackRest repositoryfalse
startTimestringRepresents the time the manual backup Job was acknowledged by the Job controller. It is represented in RFC3339 form and is in UTC.false
succeededintegerThe number of Pods for the manual backup Job that reached the "Succeeded" phase.false
typestringThe pgBackRest backup type for this Jobfalse
- - -

- PostgresCluster.status.proxy - ↩ Parent -

- - - -Current state of the PostgreSQL proxy. - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
pgBouncerobjectfalse
- - -

- PostgresCluster.status.proxy.pgBouncer - ↩ Parent -

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
postgresRevisionstringIdentifies the revision of PgBouncer assets that have been installed into PostgreSQL.false
readyReplicasintegerTotal number of ready pods.false
replicasintegerTotal number of non-terminated pods.false
- - -

- PostgresCluster.status.userInterface - ↩ Parent -

- - - -Current state of the PostgreSQL user interface. - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
pgAdminobjectThe state of the pgAdmin user interface.false
- - -

- PostgresCluster.status.userInterface.pgAdmin - ↩ Parent -

- - - -The state of the pgAdmin user interface. - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
usersRevisionstringHash that indicates which users have been installed into pgAdmin.false
diff --git a/docs/content/releases/5.0.0.md b/docs/content/releases/5.0.0.md deleted file mode 100644 index 19955c73fe..0000000000 --- a/docs/content/releases/5.0.0.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: "5.0.0" -date: -draft: false -weight: 900 ---- - -Crunchy Data announces the release of the PGO, the open source Postgres Operator, 5.0.0 on June 30, 2021. - -To get started with PGO 5.0.0, we invite you to read through the [quickstart]({{< relref "quickstart/_index.md" >}}). We also encourage you to work through the [PGO tutorial]({{< relref "tutorial/_index.md" >}}). - -PGO 5.0.0 is a major release of the Postgres Operator. The focus of this release was to take the features from the previous versions of PGO, add in some new features, and allow you to deploy Kubernetes native Postgres through a fully declarative, GitOps style workflow. As with previous versions, PGO 5.0 makes it easy to deploy production ready, cloud native Postgres. - -Postgres clusters are now fully managed through a custom resource called [`postgrescluster.postgres-operator.crunchydata.com`]({{< relref "references/crd.md" >}}). You can also view the various attributes of the custom resource using `kubectl explain postgrescluster.postgres-operator.crunchydata.com` or `kubectl explain postgrescluster`. The custom resource can be edited at any time, and all of the changes are rolled out in a minimally disruptive way. - -There are [a set of examples](https://github.com/CrunchyData/postgres-operator-examples/fork) for how to use Kustomize and Helm with PGO 5.0. This example set will grow and we encourage you to contribute to it. - -PGO 5.0 continues to support the Postgres architecture that was built up in previous releases. This means that Postgres clusters are deployed without a single-point-of-failure and can continue operating even if PGO is unavailable. PGO 5.0 includes support for Postgres high availability, backup management, disaster recovery, monitoring, full customizability, database cloning, connection pooling, security, running with locked down container settings, and more. - -PGO 5.0 also continuously monitors your environment to ensure all of the components you want deployed are available. For example, if PGO detects that your connection pooler is missing, it will recreate it as you specified in the custom resource. PGO 5.0 can watch for Postgres clusters in all Kubernetes namespaces or be isolated to individual namespaces. - -As PGO 5.0 is a major release, it is not backwards compatible with PGO 4.x. However, you can run PGO 4.x and PGO 5.0 in the same Kubernetes cluster, which allows you to migrate Postgres clusters from 4.x to 5.0. - -## Changes - -Beyond being fully declarative, PGO 5.0 has some notable changes that you should be aware of. These include: - -- The minimum Kubernetes version is now 1.18. The minimum OpenShift version is 4.5. This release drops support for OpenShift 3.11. - - We recommend running the latest bug fix releases of Kubernetes. -- The removal of the `pgo` client. This may be reintroduced in a later release, but all actions on a Postgres cluster can be accomplished using `kubectl`, `oc`, or your preferred Kubernetes management tool (e.g. ArgoCD). -- A fully defined `status` subresource is now available within the `postgrescluster` custom resource that provides direct insight into the current status of a PostgreSQL cluster. -- Native Kubernetes eventing is now utilized to generate and record events related to the creation and management of PostgreSQL clusters. -- Postgres instances now use Kubernetes Statefulsets. -- Scheduled backups now use Kubernetes CronJobs. -- Connections to Postgres require TLS. You can bring your own TLS infrastructure, otherwise PGO provides it for you. -- Custom configurations for all components can be set directly on the `postgrescluster` custom resource. - -## Features - -In addition to supporting the PGO 4.x feature set, the PGO 5.0.0 adds the following new features: - -- Postgres minor version (bug fix) updates can be applied without having to update PGO. You only need to update the `image` attribute in the custom resource. -- Adds support for Azure Blob Storage for storing backups. This is in addition to using Kubernetes storage, Amazon S3 (or S3-equivalents like MinIO), and Google Cloud Storage (GCS). -- Allows for backups to be stored in up to four different locations simultaneously. -- Backup locations can be changed during the lifetime of a Postgres cluster, e.g. moving from "posix" to "s3". diff --git a/docs/content/releases/5.0.1.md b/docs/content/releases/5.0.1.md deleted file mode 100644 index a8d11bbd5b..0000000000 --- a/docs/content/releases/5.0.1.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: "5.0.1" -date: -draft: false -weight: 899 ---- - -Crunchy Data announces the release of [Crunchy Postgres for Kubernetes](https://www.crunchydata.com/products/crunchy-postgresql-for-kubernetes/) 5.0.1. - -Crunchy Postgres for Kubernetes is powered by [PGO](https://github.com/CrunchyData/postgres-operator), the open source [Postgres Operator](https://github.com/CrunchyData/postgres-operator) from [Crunchy Data](https://www.crunchydata.com). [PGO](https://github.com/CrunchyData/postgres-operator) is released in conjunction with the [Crunchy Container Suite](https://github.com/CrunchyData/crunchy-containers/). - -Crunchy Postgres for Kubernetes 5.0.1 includes the following software versions upgrades: - -- [Patroni](https://patroni.readthedocs.io/) is now at 2.1.0. -- PL/Tcl is now included in the PostGIS (`crunchy-postgres-gis-ha`) container. - -Read more about how you can [get started]({{< relref "quickstart/_index.md" >}}) with Crunchy Postgres for Kubernetes. We recommend [forking the Postgres Operator examples](https://github.com/CrunchyData/postgres-operator-examples/fork) repo. - -## Features - -- Custom affinity rules and tolerations can now be added to pgBackRest restore Jobs. -- OLM bundles can now be generated for PGO 5. - -## Changes - -- The `replicas` value for an instance set must now be greater than `0`, and at least one instance set must now be defined for a `PostgresCluster`. This is to prevent the cluster from being scaled down to `0` instances, since doing so results in the inability to scale the cluster back up. -- Refreshed the PostgresCluster CRD documentation using the latest version of `crdoc` (`v0.3.0`). -- The PGO test suite now includes a test to validate image pull secrets. -- Related Image functionality has been implemented for the OLM installer as required to support offline deployments. -- The name of the PGO Deployment and ServiceAccount has been changed to `pgo` for all installers, allowing both PGO v4.x and PGO v5.x to be run in the same namespace. If you are using Kustomize to install PGO and are upgrading from PGO 5.0.0, please see the [Upgrade Guide]({{< relref "../upgrade/_index.md" >}}) for addtional steps that must be completed as a result of this change in order to ensure a successful upgrade. -- PGO now automatically detects whether or not it is running in an OpenShift environment. -- Postgres users and databases can be specified in `PostgresCluster.spec.users`. The credentials stored in the `{cluster}-pguser` Secret are still valid, but they are no longer reconciled. References to that Secret should be replaced with `{cluster}-pguser-{cluster}`. Once all references are updated, the old `{cluster}-pguser` Secret can be deleted. -- The built-in `postgres` superuser can now be managed the same way as other users. Specifying it in `PostgresCluster.spec.users` will give it a password, allowing it to connect over the network. -- PostgreSQL data and pgBackRest repo volumes are now reconciled using labels. - -## Fixes - -- It is now possible to customize `shared_preload_libraries` when monitoring is enabled. -- Fixed a typo in the description of the `openshift` field in the PostgresCluster CRD. -- When a new cluster is created using an existing PostgresCluster as its dataSource, the original primary for that cluster will now properly initialize as a replica following a switchover. This is fixed with the upgrade to Patroni 2.1.0). -- A consistent `startupInstance` name is now set in the PostgresCluster status when bootstrapping a new cluster using an existing PostgresCluster as its data source. -- It is now possible to properly customize the `pg_hba.conf` configuration file. diff --git a/docs/content/releases/5.0.2.md b/docs/content/releases/5.0.2.md deleted file mode 100644 index 372c1c60fb..0000000000 --- a/docs/content/releases/5.0.2.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -title: "5.0.2" -date: -draft: false -weight: 898 ---- - -Crunchy Data announces the release of [Crunchy Postgres for Kubernetes](https://www.crunchydata.com/products/crunchy-postgresql-for-kubernetes/) 5.0.2. - -Crunchy Postgres for Kubernetes is powered by [PGO](https://github.com/CrunchyData/postgres-operator), the open source [Postgres Operator](https://github.com/CrunchyData/postgres-operator) from [Crunchy Data](https://www.crunchydata.com). [PGO](https://github.com/CrunchyData/postgres-operator) is released in conjunction with the [Crunchy Container Suite](https://github.com/CrunchyData/crunchy-containers/). - -Crunchy Postgres for Kubernetes 5.0.2 includes the following software versions upgrades: - -- [PostgreSQL](https://www.postgresql.org) is updated to 13.4, 12.8, 11.13, and 10.18. -- PL/Tcl is now included in the PostGIS (`crunchy-postgres-gis-ha`) container. -- The [TimescaleDB](https://github.com/timescale/timescaledb) extension is now at version 2.4.0. -- The [set_user](https://github.com/pgaudit/set_user) extension is now at version 2.0.1. - -Read more about how you can [get started]({{< relref "quickstart/_index.md" >}}) with Crunchy Postgres for Kubernetes. We recommend [forking the Postgres Operator examples](https://github.com/CrunchyData/postgres-operator-examples/fork) repo. diff --git a/docs/content/releases/5.0.3.md b/docs/content/releases/5.0.3.md deleted file mode 100644 index c1349ab88d..0000000000 --- a/docs/content/releases/5.0.3.md +++ /dev/null @@ -1,93 +0,0 @@ ---- -title: "5.0.3" -date: -draft: false -weight: 897 ---- - - -Crunchy Data announces the release of [Crunchy Postgres for Kubernetes](https://www.crunchydata.com/products/crunchy-postgresql-for-kubernetes/) 5.0.3. - -Crunchy Postgres for Kubernetes is powered by [PGO](https://github.com/CrunchyData/postgres-operator), the open source [Postgres Operator](https://github.com/CrunchyData/postgres-operator) from [Crunchy Data](https://www.crunchydata.com). [PGO](https://github.com/CrunchyData/postgres-operator) is released in conjunction with the [Crunchy Container Suite](https://github.com/CrunchyData/container-suite). - -Crunchy Postgres for Kubernetes 5.0.3 includes the following software versions upgrades: - -- [PostgreSQL](https://www.postgresql.org) 14 is now available. -- [pgBackRest](https://pgbackrest.org/) is updated to version 2.35. -- [Patroni](https://patroni.readthedocs.io/) is updated to version 2.1.1. -- The [pgAudit](https://github.com/pgaudit/pgaudit) extension is now at version 1.6.0. -- The [pgAudit Analyze](https://github.com/pgaudit/pgaudit_analyze) extension is now at version 1.0.8. -- The [pgnodemx](https://github.com/CrunchyData/pgnodemx) extension is now at version 1.0.5. -- The [set_user](https://github.com/pgaudit/set_user) extension is now at version 3.0.0. -- The [wal2json](https://github.com/eulerto/wal2json) extension is now at version 2.4. - -Read more about how you can [get started]({{< relref "quickstart/_index.md" >}}) with Crunchy Postgres for Kubernetes. We recommend [forking the Postgres Operator examples](https://github.com/CrunchyData/postgres-operator-examples/fork) repo. - -## Features - -- The Postgres containers are renamed. `crunchy-postgres-ha` is now `crunchy-postgres`, and `crunchy-postgres-gis-ha` is now `crunchy-postgres-gis`. -- Some network filesystems are sensitive to Linux user and group permissions. Process GIDs can now be configured through `PostgresCluster.spec.supplementalGroups` for when your PVs don't advertise their [GID requirements](https://kubernetes.io/docs/tasks/configure-pod-container/configure-persistent-volume-storage/#access-control). -- A replica service is now automatically reconciled for access to Postgres replicas within a cluster. -- The Postgres primary service and PgBouncer service can now each be configured to have either a `ClusterIP`, `NodePort` or `LoadBalancer` service type. Suggested by Bryan A. S. (@bryanasdev000). -- [Pod Topology Spread Constraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/) can now be specified for Postgres instances, the pgBackRest dedicated repository host as well as PgBouncer. Suggested by Annette Clewett. -- Default topology spread constraints are included to ensure PGO always attempts to deploy a high availability cluster architecture. -- PGO can now execute a custom SQL script when initializing a Postgres cluster. -- Custom resource requests and limits are now configurable for all `init` containers, therefore ensuring the desired [Quality of Service (QoS)](https://kubernetes.io/docs/tasks/configure-pod-container/quality-service-pod/) class can be assigned to the various Pods comprising a cluster. -- Custom resource requests and limits are now configurable for all Jobs created for a `PostgresCluster`. -- A [Pod Priority Class](https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/) is configurable for the Pods created for a `PostgresCluster`. -- An `imagePullPolicy` can now be configured for Pods created for a `PostgresCluster`. -- Existing `PGDATA`, Write-Ahead Log (WAL) and pgBackRest repository volumes can now be migrated from PGO v4 to PGO v5 by specifying a `volumes` data source when creating a `PostgresCluster`. -- There is now a [migration guide available for moving Postgres clusters between PGO v4 to PGO v5]({{< relref "upgrade/v4tov5/_index.md" >}}). -- The pgAudit extension is now enabled by default in all clusters. -- There is now additional validation for PVC definitions within the `PostgresCluster` spec to ensure successful PVC reconciliation. -- Postgres server certificates are now automatically reloaded when they change. - -## Changes - -- The supplemental group `65534` is no longer applied by default. Upgrading the operator will perform a rolling update on all `PostgresCluster` custom resources to remove it. - - If you need this GID for your network filesystem, you should perform the following steps when upgrading: - - 1. Before deploying the new operator, deploy the new CRD. You can get the new CRD from the [Postgres Operator Examples](https://github.com/CrunchyData/postgres-operator-examples/fork) repository and executing the following command: - ```console - $ kubectl apply -k kustomize/install - ``` - - 2. Add the group to your existing `PostgresCluster` custom resource: - ```console - $ kubectl edit postgrescluster/hippo - - kind: PostgresCluster - … - spec: - supplementalGroups: - - 65534 - … - ``` - - _or_ - - ```console - $ kubectl patch postgrescluster/hippo --type=merge --patch='{"spec":{"supplementalGroups":[65534]}}' - ``` - - _or_ - - by modifying `spec.supplementalGroups` in your manifest. - - 3. Deploy the new operator. If you are using an up-to-date version of the manifest, you can run: - ```console - $ kubectl apply -k kustomize/install - ``` - -- A dedicated pgBackRest repository host is now only deployed if a `volume` repository is configured. This means that deployments that use only cloud-based (`s3`, `gcs`, `azure`) repos will no longer see a dedicated repository host, nor will `SSHD` run in within that Postgres cluster. As a result of this change, the `spec.backups.pgbackrest.repoHost.dedicated` section is removed from the `PostgresCluster` spec, and all settings within it are consolidated under the `spec.backups.pgbackrest.repoHost` section. When upgrading please update the `PostgresCluster` spec to ensure any settings from section `spec.backups.pgbackrest.repoHost.dedicated` are moved into section `spec.backups.pgbackrest.repoHost`. -- PgBouncer now uses SCRAM when authenticating into Postgres. -- Generated Postgres certificates include the FQDN and other local names of the primary Postgres service. To regenerate the certificate of an existing cluster, delete the `tls.key` field from its certificate secret. Suggested by @ackerr01. - -## Fixes - -- Validation for the PostgresCluster spec is updated to ensure at least one repo is always defined for section `spec.backups.pgbackrest.repos`. -- A restore will now complete successfully If `max_connections` and/or `max_worker_processes` is configured to a value higher than the default when backing up the Postgres database. Reported by Tiberiu Patrascu (@tpatrascu). -- The installation documentation now properly defines how to set the `PGO_TARGET_NAMESPACE` environment variable for a single namespace installation. -- Ensure the full allocation of shared memory is available to Postgres containers. Reported by Yuyang Zhang (@helloqiu). -- OpenShift auto-detection logic now looks for the presence of the `SecurityContextConstraints` API to avoid false positives when APIs with an `openshift.io` Group suffix are installed in non-OpenShift clusters. Reported by Jean-Daniel. diff --git a/docs/content/releases/5.0.4.md b/docs/content/releases/5.0.4.md deleted file mode 100644 index ea9e41bf74..0000000000 --- a/docs/content/releases/5.0.4.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: "5.0.4" -date: -draft: false -weight: 896 ---- - - -Crunchy Data announces the release of [Crunchy Postgres for Kubernetes](https://www.crunchydata.com/products/crunchy-postgresql-for-kubernetes/) 5.0.4. - -Crunchy Postgres for Kubernetes is powered by [PGO](https://github.com/CrunchyData/postgres-operator), the open source [Postgres Operator](https://github.com/CrunchyData/postgres-operator) from [Crunchy Data](https://www.crunchydata.com). [PGO](https://github.com/CrunchyData/postgres-operator) is released in conjunction with the [Crunchy Container Suite](https://github.com/CrunchyData/container-suite). - -Crunchy Postgres for Kubernetes 5.0.4 includes the following software versions upgrades: - -- [PostgreSQL](https://www.postgresql.org) versions 14.1, 13.5, 12.9, 11.14, and 10.19 are now available. -- [PostGIS](http://postgis.net/) version 3.1.4 is now available. -- [pgBackRest](https://pgbackrest.org/) is now at version 2.36. -- [PgBouncer](https://www.pgbouncer.org/) is now at version 1.16. -- The [pgAudit](https://github.com/pgaudit/pgaudit) extension is now at version 1.6.1. -- The [pgnodemx](https://github.com/CrunchyData/pgnodemx) extension is now at version 1.2.0. -- The [pg_partman](https://github.com/pgpartman/pg_partman) extension is now at version 4.6.0. -- The [TimescaleDB](https://github.com/timescale/timescaledb) extension is now at version 2.5.0. - -Read more about how you can [get started]({{< relref "quickstart/_index.md" >}}) with Crunchy Postgres for Kubernetes. We recommend [forking the Postgres Operator examples](https://github.com/CrunchyData/postgres-operator-examples/fork) repo. - -## Features - -- The JDBC connection string for the Postgres database and a PgBouncer instance is now available in the User Secret using `jdbc-uri` and `pgbouncer-jdbc-uri` respectively. -- Editing the `password` field of a User Secret now [changes a password]({{< relref "architecture/user-management.md" >}}#custom-passwords), instead of having to create a verifier. - -## Changes - -- [PostGIS](https://postgis.net/) is now automatically enabled when using the `crunchy-postgres-gis` container. -- The [Downward API](https://kubernetes.io/docs/tasks/inject-data-application/downward-api-volume-expose-pod-information/) is mounted to the `database` containers. -- [pgnodemx](https://github.com/CrunchyData/pgnodemx) can now be enabled and used without having to enable monitoring. -- The description of the `name` field for an instance set now states that a name is only optional when a single instance set is defined. - -## Fixes - -- Fix issue when performing a restore with PostgreSQL 14. Specifically, if there are mismatched PostgreSQL configuration parameters, PGO will resume replay and let PostgreSQL crash so PGO can ultimately fix it, vs. the restore pausing indefinitely. -- The pgBackRest Pod no longer automatically mounts the default Service Account. Reported by (@Shrivastava-Varsha). -- The Jobs that move data between volumes now have the correct Security Context set. -- The UBI 8 `crunchy-upgrade` container contains all recent PostgreSQL versions that can be upgraded. -- Ensure controller references are used for all objects that need them, instead of owner references. -- It is no longer necessary to have external WAL volumes enabled in order to upgrade a PGO v4 cluster to PGO v5 using the "Migrate From Backups" or "Migrate Using a Standby Cluster" upgrade methods. diff --git a/docs/content/releases/5.0.5.md b/docs/content/releases/5.0.5.md deleted file mode 100644 index 4504bf198a..0000000000 --- a/docs/content/releases/5.0.5.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -title: "5.0.5" -date: -draft: false -weight: 895 ---- - - -Crunchy Data announces the release of [Crunchy Postgres for Kubernetes](https://www.crunchydata.com/products/crunchy-postgresql-for-kubernetes/) 5.0.5. - -Crunchy Postgres for Kubernetes is powered by [PGO](https://github.com/CrunchyData/postgres-operator), the open source [Postgres Operator](https://github.com/CrunchyData/postgres-operator) from [Crunchy Data](https://www.crunchydata.com). [PGO](https://github.com/CrunchyData/postgres-operator) is released in conjunction with the [Crunchy Container Suite](https://github.com/CrunchyData/container-suite). - -Crunchy Postgres for Kubernetes 5.0.5 includes the following software versions upgrades: - -- [PostgreSQL](https://www.postgresql.org) versions 14.2, 13.6, 12.10, 11.15, and 10.20 are now available. - -Read more about how you can [get started]({{< relref "quickstart/_index.md" >}}) with Crunchy Postgres for Kubernetes. We recommend [forking the Postgres Operator examples](https://github.com/CrunchyData/postgres-operator-examples/fork) repo. - - -## Features - -- A S3, GCS or Azure data source can now be configured when bootstrapping a new PostgresCluster. This allows existing cloud-based pgBackRest repositories to be utilized to bootstrap new clusters, while also ensuring those new clusters create and utilize their own pgBackRest repository for archives and backups (rather than writing to the repo utilized to bootstrap the cluster). -- It is now possible to configure the number of workers for the PostgresCluster controller. - -## Fixes - -- Reduce scope of automatic OpenShift environment detection. This looks specifically for the existence of the `SecurityContextConstraint` API. -- An external IP is no longer copied to the primary service (e.g. `hippo-primary`) when the `LoadBalancer` service type has been configured for PostgreSQL. -- pgBackRest no longer logs to log `/tmp` emptyDir by default. Instead, pgBackRest logs to either the `PGDATA` volume (if running inside of a PG instance Pod) or a pgBackRest repository volume (if running inside a dedicated repo host Pod). -- All pgBackRest configuration resources are now copied from the source cluster when cloning a PG cluster. -- Image pull secrets are now set on directory move jobs. -- Resources are now properly set on the `nss-wrapper-init` container. diff --git a/docs/content/releases/5.1.0.md b/docs/content/releases/5.1.0.md deleted file mode 100644 index c7fc0940c9..0000000000 --- a/docs/content/releases/5.1.0.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -title: "5.1.0" -date: -draft: false -weight: 850 ---- - -Crunchy Data announces the release of [Crunchy Postgres for Kubernetes](https://www.crunchydata.com/products/crunchy-postgresql-for-kubernetes/) 5.1.0. - -Crunchy Postgres for Kubernetes is powered by [PGO](https://github.com/CrunchyData/postgres-operator), the open source [Postgres Operator](https://github.com/CrunchyData/postgres-operator) from [Crunchy Data](https://www.crunchydata.com). [PGO](https://github.com/CrunchyData/postgres-operator) is released in conjunction with the [Crunchy Container Suite](https://github.com/CrunchyData/container-suite). - -Crunchy Postgres for Kubernetes 5.1.0 includes the following software versions upgrades: - -- [Patroni](https://patroni.readthedocs.io/) is now at version 2.1.3. -- [pgAdmin 4](https://www.pgadmin.org/) is now at version 4.30 -- [pgBackRest](https://pgbackrest.org/) is updated to version 2.38. -- The [pgAudit](https://github.com/pgaudit/pgaudit) extension is now at version 1.6.2 (PG 14), 1.5.2 (PG 13), 1.4.3 (PG 12), 1.3.4 (PG 11) & 1.2.4 (PG 10). -- The [pgnodemx](https://github.com/CrunchyData/pgnodemx) extension is now at version 1.3.0. -- The [TimescaleDB](https://github.com/timescale/timescaledb) extension is now at version 2.6.0. - -Read more about how you can [get started]({{< relref "quickstart/_index.md" >}}) with Crunchy Postgres for Kubernetes. We recommend [forking the Postgres Operator examples](https://github.com/CrunchyData/postgres-operator-examples/fork) repo. - -## Major Features - -### pgAdmin 4 Integration - -PGO v5.1 reintroduces the pgAdmin 4 integration from [PGO v4](https://access.crunchydata.com/documentation/postgres-operator/4.7.5/architecture/pgadmin4/). v5.1 adds the [`spec.userInterace.pgAdmin`]({{< relref "references/crd.md" >}}#postgresclusterspecuserinterfacepgadmin) section to the `PostgresCluster` custom resource to enable pgAdmin 4 integration for a Postgres cluster. Any users defined in `spec.users` are are synced with pgAdmin 4, allowing for a seamless management experience. - -Please see the [pgAdmin 4 section](https://access.crunchydata.com/documentation/postgres-operator/v5/architecture/pgadmin4/) of the PGO documentation for more information about this integration. - -### Removal of SSH Requirement for Local Backups - -Previous versions of PGO relied on the use of `ssh` to take backups and store archive files on Kubernetes-managed storage. PGO v5.1 now uses mTLS to securely transfer and manage these files. - -The upgrade to pgBackRest TLS is seamless and transparent if using related image environment variables with your PGO Deployment (please see the [PostgresCluster CRD reference](https://access.crunchydata.com/documentation/postgres-operator/v5/references/crd/) for more information). This is because PGO will automatically handle updating all image tags across all existing PostgresCluster's following the upgrade to v5.1, seamlessly rolling out any new images as required for proper pgBackRest TLS functionality. - -If you are not using related image environment variables, and are instead explicitly defining images via the `image` fields in your PostgresCluster spec, then an additional step is required in order to ensure a seamless upgrade. Specifically, all `postgrescluster.spec.image` and `postgrescluster.spec.backups.pgbackrest.image` fields must first be updated to specify images containing pgBackRest 2.38. Therefore, prior to upgrading, please update all `postgrescluster.spec.image` and `postgrescluster.spec.backups.pgbackrest.image` fields to the latest versions of the `crunchy-postgres` and `crunchy-pgbackrest` containers available per the [Components and Compatibility guide](https://access.crunchydata.com/documentation/postgres-operator/v5/references/components/) (please note that the `crunchy-postgres` container should be updated to the latest version available for the major version of PostgreSQL currently being utilized within a cluster). - -In the event that PGO is upgraded to v5.1 _before_ updating your image tags, simply update any `image` fields in your PostgresCluster spec as soon as possible following the upgrade. - -## Features - -- Set [Pod Disruption Budgets]({{< relref "architecture/high-availability.md" >}}#pod-disruption-budgets) (PDBs) for both Postgres and PgBouncer instances. -- Postgres configuration changes requiring a database restart are now automatically rolled out to all instances in the cluster. -- Do not recreate instance Pods for changes that only require a Postgres restart. These types of changes are now applied more quickly. -- Support for [manual switchovers or failovers]({{< relref "tutorial/administrative-tasks.md">}}#changing-the-primary). -- Rotate PgBouncer TLS certificates without downtime. -- Add support for using Active Directory for securely authenticating with PostgreSQL using the GSSAPI. -- Support for using [AWS IAM roles with S3]({{< relref "tutorial/backups.md" >}}#using-an-aws-integrated-identity-provider-and-role) with backups when PGO is deployed in EKS. -- The characters used for password generation can now be controlled using the `postgrescluster.spec.users.password.type` parameter. Choices are `AlphaNumeric` and `ASCII`; defaults to `ASCII`. -- Introduction for automatically checking for updates for PGO and Postgres components. If an update is discovered, it is included in the PGO logs. - -## Changes - -- As a result of [a fix in PgBouncer v1.16](https://github.com/libusual/libusual/commit/ab960074cb7a), PGO no longer sets verbosity settings in the PgBouncer configuration to catch missing `%include` directives. Users can increase verbosity in their own configuration files to maintain the previous behavior. -- The Postgres `archive_timeout` setting now defaults to 60 seconds (`60s`), which matches the behavior from PGO v4. If you do not require for WAL files to be generated once a minute (e.g. generally idle system where a window of data-loss is acceptable or a development system), you can set this to `0`: - -```yaml -spec: - patroni: - dynamicConfiguration: - postgresql: - parameters: - archive_timeout: 0 -``` -- All Pods now have `enableServiceLinks` set to `false` in order to ensure injected environment variables do not conflict with the various applications running within. - -## Fixes - -- The names of CronJobs created for scheduled backups are shortened to `--` to allow for longer PostgresCluster names. diff --git a/docs/content/releases/5.1.1.md b/docs/content/releases/5.1.1.md deleted file mode 100644 index 0734b1083e..0000000000 --- a/docs/content/releases/5.1.1.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: "5.1.1" -date: -draft: false -weight: 849 ---- - -Crunchy Data announces the release of [Crunchy Postgres for Kubernetes](https://www.crunchydata.com/products/crunchy-postgresql-for-kubernetes/) 5.1.1. - -Crunchy Postgres for Kubernetes is powered by [PGO](https://github.com/CrunchyData/postgres-operator), the open source [Postgres Operator](https://github.com/CrunchyData/postgres-operator) from [Crunchy Data](https://www.crunchydata.com). [PGO](https://github.com/CrunchyData/postgres-operator) is released in conjunction with the [Crunchy Container Suite](https://github.com/CrunchyData/container-suite). - -Crunchy Postgres for Kubernetes 5.1.1 includes the following software versions upgrades: - -- [PostgreSQL](https://www.postgresql.org) versions 14.3, 13.7, 12.11, 11.16, and 10.21 are now available. -- [PostGIS](http://postgis.net/) version 3.2.1 is now available. -- The [pg_partman](https://github.com/pgpartman/pg_partman) extension is now at version 4.6.1. -- The [TimescaleDB](https://github.com/timescale/timescaledb) extension is now at version 2.6.1. - -Read more about how you can [get started]({{< relref "quickstart/_index.md" >}}) with Crunchy Postgres for Kubernetes. We recommend [forking the Postgres Operator examples](https://github.com/CrunchyData/postgres-operator-examples/fork) repo. - -## Fixes - -- It is now possible to perform major PostgreSQL version upgrades when using an external WAL directory. -- The documentation for pgAdmin 4 now clearly states that any pgAdmin user created by PGO will have a `@pgo` suffix. diff --git a/docs/content/releases/5.1.2.md b/docs/content/releases/5.1.2.md deleted file mode 100644 index ab8f2d69a6..0000000000 --- a/docs/content/releases/5.1.2.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: "5.1.2" -date: -draft: false -weight: 848 ---- - -Crunchy Data announces the release of [Crunchy Postgres for Kubernetes](https://www.crunchydata.com/products/crunchy-postgresql-for-kubernetes/) 5.1.2. - -Crunchy Postgres for Kubernetes is powered by [PGO](https://github.com/CrunchyData/postgres-operator), the open source [Postgres Operator](https://github.com/CrunchyData/postgres-operator) from [Crunchy Data](https://www.crunchydata.com). [PGO](https://github.com/CrunchyData/postgres-operator) is released in conjunction with the [Crunchy Container Suite](https://github.com/CrunchyData/container-suite). - -Crunchy Postgres for Kubernetes 5.1.2 includes the following software versions upgrades: - -- [PostgreSQL](https://www.postgresql.org) version 14.4 is now available. - -Read more about how you can [get started]({{< relref "quickstart/_index.md" >}}) with Crunchy Postgres for Kubernetes. We recommend [forking the Postgres Operator examples](https://github.com/CrunchyData/postgres-operator-examples/fork) repo. diff --git a/docs/content/releases/5.2.0.md b/docs/content/releases/5.2.0.md deleted file mode 100644 index a3bf374182..0000000000 --- a/docs/content/releases/5.2.0.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: "5.2.0" -date: -draft: false -weight: 847 ---- - -Crunchy Data announces the release of [Crunchy Postgres for Kubernetes](https://www.crunchydata.com/products/crunchy-postgresql-for-kubernetes/) 5.2.0. - -Crunchy Postgres for Kubernetes is powered by [PGO](https://github.com/CrunchyData/postgres-operator), the open source [Postgres Operator](https://github.com/CrunchyData/postgres-operator) from [Crunchy Data](https://www.crunchydata.com). [PGO](https://github.com/CrunchyData/postgres-operator) is released in conjunction with the [Crunchy Container Suite](https://github.com/CrunchyData/crunchy-containers). - -Read more about how you can [get started]({{< relref "quickstart/_index.md" >}}) with Crunchy Postgres for Kubernetes. We recommend [forking the Postgres Operator examples](https://github.com/CrunchyData/postgres-operator-examples/fork) repo. - -## Major Features - -This and all PGO v5 releases are compatible with a brand new `pgo` command line interface. -Please see the [`pgo` CLI documentation](https://access.crunchydata.com/documentation/postgres-operator-client/latest) -for its release notes and more details. - -## Features - -- Added the ability to customize and influence the scheduling of pgBackRest backup Jobs using `affinity` and `tolerations`. -- You can now pause the reconciliation and rollout of changes to a PostgreSQL cluster using the `spec.paused` field. -- Leaf certificates provisioned by PGO as part of a PostgreSQL cluster's TLS infrastructure are now automatically rotated prior to expiration. -- PGO now has support for feature gates. -- You can now add custom sidecars to both PostgreSQL instance Pods and PgBouncer Pods using the `spec.instances.containers` and `spec.proxy.pgBouncer.containers` fields. -- It is now possible to configure standby clusters to replicate from a remote primary using streaming replication. -- Added the ability to provide a custom `nodePort` for the primary PostgreSQL, pgBouncer and pgAdmin services. -- Added the ability to define custom labels and annotations for the primary PostgreSQL, pgBouncer and pgAdmin services. - -## Changes - -- All containers are now run with the minimum capabilities required by the container runtime. -- The PGO documentation now includes instructions for rotating the root TLS certificate. -- A `fsGroupChangePolicy` of `OnRootMismatch` is now set on all Pods. -- The `runAsNonRoot` security setting is on every container rather than every pod. - -## Fixes - -- A better timeout has been set for the `pg_ctl` `start` and `stop` commands that are run during a restore. -- A restore can now be re-attempted if PGO is unable to cleanly start or stop the database during a previous restore attempt. diff --git a/docs/content/releases/5.3.0.md b/docs/content/releases/5.3.0.md deleted file mode 100644 index d3bfafed10..0000000000 --- a/docs/content/releases/5.3.0.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: "5.3.0" -date: -draft: false -weight: 846 ---- - -Crunchy Data announces the release of [Crunchy Postgres for Kubernetes](https://www.crunchydata.com/products/crunchy-postgresql-for-kubernetes/) 5.3.0. - -Crunchy Postgres for Kubernetes is powered by [PGO](https://github.com/CrunchyData/postgres-operator), the open source [Postgres Operator](https://github.com/CrunchyData/postgres-operator) from [Crunchy Data](https://www.crunchydata.com). [PGO](https://github.com/CrunchyData/postgres-operator) is released in conjunction with the [Crunchy Container Suite](https://github.com/CrunchyData/crunchy-containers). - -Crunchy Postgres for Kubernetes 5.3.0 includes the following software versions upgrades: - -- [PostgreSQL](https://www.postgresql.org) version 15.1 is now available. -- [pgMonitor](https://github.com/CrunchyData/pgmonitor) is now at version 4.8.0. -- The [`controller-runtime`](https://github.com/kubernetes-sigs/controller-runtime) libraries have been updated to 0.12.3. -- [Go](https://go.dev/) 1.19 is now utilized to build Crunchy Postgres for Kubernetes. - -Additionally, the [pgo CLI](https://access.crunchydata.com/documentation/postgres-operator-client/latest) version 0.2.0 is now available. - -Read more about how you can [get started](https://access.crunchydata.com/documentation/postgres-operator/latest/quickstart/) with Crunchy Postgres for Kubernetes. We recommend [forking the Postgres Operator examples](https://github.com/CrunchyData/postgres-operator-examples/fork) repo. - -_**Note:** TimescaleDB and pgAdmin 4 are not currently supported for use with PostgeSQL 15_. - -## Features - -- PostgreSQL 15 support. -- Enable TLS for the PostgreSQL exporter using the new `spec.monitoring.pgmonitor.exporter.customTLSSecret` field. -- Configure pgBackRest for IPv6 environments using the `postgres-operator.crunchydata.com/pgbackrest-ip-version` annotation. -- Configure the [TTL](https://kubernetes.io/docs/concepts/workloads/controllers/job/#ttl-mechanism-for-finished-jobs) for pgBackRest backup Jobs. -- Use Helm's [OCI registry capability](https://helm.sh/docs/topics/registries/) to install Crunchy Postgres for Kubernetes. - -## Changes - -- JIT is now explicitly disabled for the monitoring user, allowing users to opt-into using JIT elsewhere in the database without impacting exporter functionality. Contributed by Kirill Petrov (@chobostar). -- PGO now logs both `stdout` and `stderr` when running a SQL file referenced via `spec.databaseInitSQL` during database initialization. Contributed by Jeff Martin (@jmartin127). -- The `pgnodemx` and `pg_stat_statements` extensions are now automatically upgraded. -- The `postgres-startup` init container now logs an error message if the version of PostgreSQL installed in the image does not match the PostgreSQL version specified using `spec.postgresVersion`. -- Limit the monitoring user to local connections using SCRAM authentication. Contributed by Scott Zelenka (@szelenka) -- Skip a scheduled backup when the prior one is still running. Contributed by Scott Zelenka (@szelenka) -- The`dataSource.volumes` migration strategy had been improved to better handle `PGDATA` directories with invalid permissions and a missing `postgresql.conf` file. - -## Fixes - -- A `psycopg2` error is no longer displayed when connecting to a database using pgAdmin 4. -- With the exception of the `--repo` option itself, PGO no longer prevents users from specifying pgBackRest options containing the string "repo" (e.g. `--repo1-retention-full`). -- PGO now properly filters Jobs by namespace when reconciling restore or data migrations Job, ensuring PostgresClusters with the same name can be created within different namespaces. -- The Major PostgreSQL Upgrades API (`PGUpgrade`) now properly handles clusters that have various extensions enabled. diff --git a/docs/content/releases/_index.md b/docs/content/releases/_index.md deleted file mode 100644 index 7ea3840539..0000000000 --- a/docs/content/releases/_index.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -title: "Release Notes" -date: -draft: false -weight: 103 ---- diff --git a/docs/content/support/_index.md b/docs/content/support/_index.md deleted file mode 100644 index 0999a7cca0..0000000000 --- a/docs/content/support/_index.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: "Support" -date: -draft: false -weight: 110 ---- - -There are a few options available for community support of the [PGO: the Postgres Operator](https://github.com/CrunchyData/postgres-operator): - -- **If you believe you have found a bug** or have a detailed feature request: please open [an issue on GitHub](https://github.com/CrunchyData/postgres-operator/issues/new/choose). The Postgres Operator community and the Crunchy Data team behind the PGO is generally active in responding to issues. -- **For general questions or community support**: please join the [PostgreSQL Operator community mailing list](https://groups.google.com/a/crunchydata.com/forum/#!forum/postgres-operator/join) at [https://groups.google.com/a/crunchydata.com/forum/#!forum/postgres-operator/join](https://groups.google.com/a/crunchydata.com/forum/#!forum/postgres-operator/join), - -In all cases, please be sure to provide as many details as possible in regards to your issue, including: - -- Your Platform (e.g. Kubernetes vX.YY.Z) -- Operator Version (e.g. {{< param operatorVersion >}}) -- A detailed description of the issue, as well as steps you took that lead up to the issue -- Any relevant logs -- Any additional information you can provide that you may find helpful - -For production and commercial support of the PostgreSQL Operator, please -[contact Crunchy Data](https://www.crunchydata.com/contact/) at [info@crunchydata.com](mailto:info@crunchydata.com) for information regarding an [Enterprise Support Subscription](https://www.crunchydata.com/about/value-of-subscription/). diff --git a/docs/content/tutorial/_index.md b/docs/content/tutorial/_index.md deleted file mode 100644 index db7477da91..0000000000 --- a/docs/content/tutorial/_index.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -title: "Tutorial" -date: -draft: false -weight: 20 ---- - -Ready to get started with [PGO](https://github.com/CrunchyData/postgres-operator), the [Postgres Operator](https://github.com/CrunchyData/postgres-operator) from [Crunchy Data](https://www.crunchydata.com)? Us too! - -This tutorial covers several concepts around day-to-day life managing a Postgres cluster with PGO. While going through and looking at various "HOWTOs" with PGO, we will also cover concepts and features that will help you have a successful cloud native Postgres journey! - -In this tutorial, you will learn: - -- How to create a Postgres cluster -- How to connect to a Postgres cluster -- How to scale and create a high availability (HA) Postgres cluster -- How to resize your cluster -- How to set up proper disaster recovery and manage backups and restores -- How to apply software updates to Postgres and other components -- How to set up connection pooling -- How to delete your cluster - -and more. - -You will also see: - -- How PGO helps your Postgres cluster achieve high availability -- How PGO can heal your Postgres cluster and ensure all objects are present and available -- How PGO sets up disaster recovery -- How to manage working with PGO in a single namespace or in a cluster-wide installation of PGO. - -[Let's get started]({{< relref "./getting-started.md" >}})! diff --git a/docs/content/tutorial/administrative-tasks.md b/docs/content/tutorial/administrative-tasks.md deleted file mode 100644 index 8946ef6db8..0000000000 --- a/docs/content/tutorial/administrative-tasks.md +++ /dev/null @@ -1,276 +0,0 @@ ---- -title: "Administrative Tasks" -date: -draft: false -weight: 105 ---- - -## Manually Restarting PostgreSQL - -There are times when you might need to manually restart PostgreSQL. This can be done by adding or updating a custom annotation to the cluster's `spec.metadata.annotations` section. PGO will notice the change and perform a [rolling restart]({{< relref "/architecture/high-availability.md" >}}#rolling-update). - -For example, if you have a cluster named `hippo` in the namespace `postgres-operator`, all you need to do is patch the hippo PostgresCluster with the following: - -```shell -kubectl patch postgrescluster/hippo -n postgres-operator --type merge \ - --patch '{"spec":{"metadata":{"annotations":{"restarted":"'"$(date)"'"}}}}' -``` - -Watch your hippo cluster: you will see the rolling update has been triggered and the restart has begun. - -## Shutdown - -You can shut down a Postgres cluster by setting the `spec.shutdown` attribute to `true`. You can do this by editing the manifest, or, in the case of the `hippo` cluster, executing a command like the below: - -``` -kubectl patch postgrescluster/hippo -n postgres-operator --type merge \ - --patch '{"spec":{"shutdown": true}}' -``` - -The effect of this is that all the Kubernetes workloads for this cluster are -scaled to 0. You can verify this with the following command: - -``` -kubectl get deploy,sts,cronjob --selector=postgres-operator.crunchydata.com/cluster=hippo - -NAME READY UP-TO-DATE AVAILABLE AGE -deployment.apps/hippo-pgbouncer 0/0 0 0 1h - -NAME READY AGE -statefulset.apps/hippo-00-lwgx 0/0 1h - -NAME SCHEDULE SUSPEND ACTIVE -cronjob.batch/hippo-repo1-full @daily True 0 -``` - -To turn a Postgres cluster that is shut down back on, you can set `spec.shutdown` to `false`. - -## Pausing Reconciliation and Rollout - -You can pause the Postgres cluster reconciliation process by setting the -`spec.paused` attribute to `true`. You can do this by editing the manifest, or, -in the case of the `hippo` cluster, executing a command like the below: - -``` -kubectl patch postgrescluster/hippo -n postgres-operator --type merge \ - --patch '{"spec":{"paused": true}}' -``` - -Pausing a cluster will suspend any changes to the cluster’s current state until -reconciliation is resumed. This allows you to fully control when changes to -the PostgresCluster spec are rolled out to the Postgres cluster. While paused, -no statuses are updated other than the "Progressing" condition. - -To resume reconciliation of a Postgres cluster, you can either set `spec.paused` -to `false` or remove the setting from your manifest. - -## Rotating TLS Certificates - -Credentials should be invalidated and replaced (rotated) as often as possible -to minimize the risk of their misuse. Unlike passwords, every TLS certificate -has an expiration, so replacing them is inevitable. - -In fact, PGO automatically rotates the client certificates that it manages *before* -the expiration date on the certificate. A new client certificate will be generated -after 2/3rds of its working duration; so, for instance, a PGO-created certificate -with an expiration date 12 months in the future will be replaced by PGO around the -eight month mark. This is done so that you do not have to worry about running into -problems or interruptions of service with an expired certificate. - -### Triggering a Certificate Rotation - -If you want to rotate a single client certificate, you can regenerate the certificate -of an existing cluster by deleting the `tls.key` field from its certificate Secret. - -Is it time to rotate your PGO root certificate? All you need to do is delete the `pgo-root-cacert` secret. PGO will regenerate it and roll it out seamlessly, ensuring your apps continue communicating with the Postgres cluster without having to update any configuration or deal with any downtime. - -```bash -kubectl delete secret pgo-root-cacert -``` - -{{% notice note %}} -PGO only updates secrets containing the generated root certificate. It does not touch custom certificates. -{{% /notice %}} - -### Rotating Custom TLS Certificates - -When you use your own TLS certificates with PGO, you are responsible for replacing them appropriately. -Here's how. - -PGO automatically detects and loads changes to the contents of PostgreSQL server -and replication Secrets without downtime. You or your certificate manager need -only replace the values in the Secret referenced by `spec.customTLSSecret`. - -If instead you change `spec.customTLSSecret` to refer to a new Secret or new fields, -PGO will perform a [rolling restart]({{< relref "/architecture/high-availability.md" >}}#rolling-update). - -{{% notice info %}} -When changing the PostgreSQL certificate authority, make sure to update -[`customReplicationTLSSecret`]({{< relref "/tutorial/customize-cluster.md" >}}#customize-tls) as well. -{{% /notice %}} - -PGO automatically notifies PgBouncer when there are changes to the contents of -PgBouncer certificate Secrets. Recent PgBouncer versions load those changes -without downtime, but versions prior to 1.16.0 need to be restarted manually. -There are a few ways to restart an older version PgBouncer to reload Secrets: - -1. Store the new certificates in a new Secret. Edit the PostgresCluster object - to refer to the new Secret, and PGO will perform a rolling restart of PgBouncer. - ```yaml - spec: - proxy: - pgBouncer: - customTLSSecret: - name: hippo.pgbouncer.new.tls - ``` - - _or_ - -2. Replace the old certificates in the current Secret. PGO doesn't notice when - the contents of your Secret change, so you need to trigger a rolling restart - of PgBouncer. Edit the PostgresCluster object to add a unique annotation. - The name and value are up to you, so long as the value differs from the - previous value. - ```yaml - spec: - proxy: - pgBouncer: - metadata: - annotations: - restarted: Q1-certs - ``` - - This `kubectl patch` command uses your local date and time: - - ```shell - kubectl patch postgrescluster/hippo --type merge \ - --patch '{"spec":{"proxy":{"pgBouncer":{"metadata":{"annotations":{"restarted":"'"$(date)"'"}}}}}}' - ``` - -## Changing the Primary - -There may be times when you want to change the primary in your HA cluster. This can be done -using the `patroni.switchover` section of the PostgresCluster spec. It allows -you to enable switchovers in your PostgresClusters, target a specific instance as the new -primary, and run a failover if your PostgresCluster has entered a bad state. - -Let's go through the process of performing a switchover! - -First you need to update your spec to prepare your cluster to change the primary. Edit your spec -to have the following fields: - -```yaml -spec: - patroni: - switchover: - enabled: true -``` - -After you apply this change, PGO will be looking for the trigger to perform a switchover in your -cluster. You will trigger the switchover by adding the `postgres-operator.crunchydata.com/trigger-switchover` -annotation to your custom resource. The best way to set this annotation is -with a timestamp, so you know when you initiated the change. - -For example, for our `hippo` cluster, we can run the following command to trigger the switchover: - -```shell -kubectl annotate -n postgres-operator postgrescluster hippo \ - postgres-operator.crunchydata.com/trigger-switchover="$(date)" -``` - -{{% notice tip %}} -If you want to perform another switchover you can re-run the annotation command and add the `--overwrite` flag: - -```shell -kubectl annotate -n postgres-operator postgrescluster hippo --overwrite \ - postgres-operator.crunchydata.com/trigger-switchover="$(date)" -``` -{{% /notice %}} - -PGO will detect this annotation and use the Patroni API to request a change to the current primary! - -The roles on your database instance Pods will start changing as Patroni works. The new primary -will have the `master` role label, and the old primary will be updated to `replica`. - -The status of the switch will be tracked using the `status.patroni.switchover` field. This will be set -to the value defined in your trigger annotation. If you use a timestamp as the annotation this is -another way to determine when the switchover was requested. - -After the instance Pod labels have been updated and `status.patroni.switchover` has been set, the -primary has been changed on your cluster! - -{{% notice info %}} -After changing the primary, we recommend that you disable switchovers by setting `spec.patroni.switchover.enabled` -to false or remove the field from your spec entirely. If the field is removed the corresponding -status will also be removed from the PostgresCluster. -{{% /notice %}} - - -#### Targeting an instance - -Another option you have when switching the primary is providing a target instance as the new -primary. This target instance will be used as the candidate when performing the switchover. -The `spec.patroni.switchover.targetInstance` field takes the name of the instance that you are switching to. - -This name can be found in a couple different places; one is as the name of the StatefulSet and -another is on the database Pod as the `postgres-operator.crunchydata.com/instance` label. The -following commands can help you determine who is the current primary and what name to use as the -`targetInstance`: - -```shell-session -$ kubectl get pods -l postgres-operator.crunchydata.com/cluster=hippo \ - -L postgres-operator.crunchydata.com/instance \ - -L postgres-operator.crunchydata.com/role - -NAME READY STATUS RESTARTS AGE INSTANCE ROLE -hippo-instance1-jdb5-0 3/3 Running 0 2m47s hippo-instance1-jdb5 master -hippo-instance1-wm5p-0 3/3 Running 0 2m47s hippo-instance1-wm5p replica -``` - -In our example cluster `hippo-instance1-jdb5` is currently the primary meaning we want to target -`hippo-instance1-wm5p` in the switchover. Now that you know which instance is currently the -primary and how to find your `targetInstance`, let's update your cluster spec: - -```yaml -spec: - patroni: - switchover: - enabled: true - targetInstance: hippo-instance1-wm5p -``` - -After applying this change you will once again need to trigger the switchover by annotating the -PostgresCluster (see above commands). You can verify the switchover has completed by checking the -Pod role labels and `status.patroni.switchover`. - -#### Failover - -Finally, we have the option to failover when your cluster has entered an unhealthy state. The -only spec change necessary to accomplish this is updating the `spec.patroni.switchover.type` -field to the `Failover` type. One note with this is that a `targetInstance` is required when -performing a failover. Based on the example cluster above, assuming `hippo-instance1-wm5p` is still -a replica, we can update the spec: - -```yaml -spec: - patroni: - switchover: - enabled: true - targetInstance: hippo-instance1-wm5p - type: Failover -``` - -Apply this spec change and your PostgresCluster will be prepared to perform the failover. Again -you will need to trigger the switchover by annotating the PostgresCluster (see above commands) -and verify that the Pod role labels and `status.patroni.switchover` are updated accordingly. - -{{% notice warning %}} -Errors encountered in the switchover process can leave your cluster in a bad -state. If you encounter issues, found in the operator logs, you can update the spec to fix the -issues and apply the change. Once the change has been applied, PGO will attempt to perform the -switchover again. -{{% /notice %}} - -## Next Steps - -We've covered a lot in terms of building, maintaining, scaling, customizing, restarting, and expanding our Postgres cluster. However, there may come a time where we need to [delete our Postgres cluster]({{< relref "delete-cluster.md" >}}). How do we do that? diff --git a/docs/content/tutorial/backup-management.md b/docs/content/tutorial/backup-management.md deleted file mode 100644 index 176c4fd435..0000000000 --- a/docs/content/tutorial/backup-management.md +++ /dev/null @@ -1,127 +0,0 @@ ---- -title: "Backup Management" -date: -draft: false -weight: 82 ---- - -In the [previous section]({{< relref "./backups.md" >}}), we looked at a brief overview of the full disaster recovery feature set that PGO provides and explored how to [configure backups for our Postgres cluster]({{< relref "./backups.md" >}}). - -Now that we have backups set up, lets look at some of the various backup management tasks we can perform. These include: - -- Setting up scheduled backups -- Setting backup retention policies -- Taking one-off / ad hoc backups - -## Managing Scheduled Backups - -PGO sets up your Postgres clusters so that they are continuously archiving the [write-ahead log](https://www.postgresql.org/docs/current/wal-intro.html): -your data is constantly being stored in your backup repository. Effectively, this is a backup! - -However, in a [disaster recovery]({{< relref "./disaster-recovery.md" >}}) scenario, you likely want to get your Postgres cluster back up and running as quickly as possible (e.g. a short "[recovery time objective (RTO)](https://en.wikipedia.org/wiki/Disaster_recovery#Recovery_Time_Objective)"). What helps accomplish this is to take periodic backups. This makes it faster to restore! - -[pgBackRest](https://pgbackrest.org/), the backup management tool used by PGO, provides different backup types to help both from a space management and RTO optimization perspective. These backup types include: - -- `full`: A backup of your entire Postgres cluster. This is the largest of all of the backup types. -- `differential`: A backup of all of the data since the last `full` backup. -- `incremental`: A backup of all of the data since the last `full`, `differential`, or `incremental` backup. - -Selecting the appropriate backup strategy for your Postgres cluster is outside the scope of this tutorial, but let's look at how we can set up scheduled backups. - -Backup schedules are stored in the `spec.backups.pgbackrest.repos.schedules` section. Each value in this section -accepts a [cron-formatted](https://docs.k8s.io/concepts/workloads/controllers/cron-jobs/#cron-schedule-syntax) string -that dictates the backup schedule. - -Let's say that our backup policy is to take a full backup weekly on Sunday at 1am and take differential backups daily at 1am on every day except Sunday. -We would want to add configuration to our spec that looks similar to: - -``` -spec: - backups: - pgbackrest: - repos: - - name: repo1 - schedules: - full: "0 1 * * 0" - differential: "0 1 * * 1-6" -``` - -To manage scheduled backups, PGO will create several Kubernetes [CronJobs](https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/) -that will perform backups on the specified periods. The backups will use the [configuration that you specified]({{< relref "./backups.md" >}}). - -Ensuring you take regularly scheduled backups is important to maintaining Postgres cluster health. -However, you don't need to keep all of your backups: this could cause you to run out of space! -As such, it's also important to set a backup retention policy. - -## Managing Backup Retention - -PGO lets you set backup retention on full and differential backups. When a full backup expires, -either through your retention policy or through manual expiration, pgBackRest will clean up any -backup and WAL files associated with it. For example, if you have a full backup with four associated -incremental backups, when the full backup expires, all of its incremental backups also expire. - -There are two different types of backup retention you can set: - -- `count`: This is based on the number of backups you want to keep. This is the default. -- `time`: This is based on the total number of days you would like to keep a backup. - -Let's look at an example where we keep full backups for 14 days. The most convenient way to do this -is through the `spec.backups.pgbackrest.global` section: - -``` -spec: - backups: - pgbackrest: - global: - repo1-retention-full: "14" - repo1-retention-full-type: time -``` - -The full list of available configuration options is in the [pgBackRest configuration](https://pgbackrest.org/configuration.html) guide. - -## Taking a One-Off Backup - -There are times where you may want to take a one-off backup, such as before major application changes -or updates. This is not your typical declarative action -- in fact a one-off backup is imperative -in its nature! -- but it is possible to take a one-off backup of your Postgres cluster with PGO. - -First, you need to configure the `spec.backups.pgbackrest.manual` section to be able to take a one-off backup. -This contains information about the type of backup you want to take and any other [pgBackRest configuration](https://pgbackrest.org/configuration.html) options. - -Let's configure the custom resource to take a one-off full backup: - -``` -spec: - backups: - pgbackrest: - manual: - repoName: repo1 - options: - - --type=full -``` - -This does not trigger the one-off backup -- you have to do that by adding the -`postgres-operator.crunchydata.com/pgbackrest-backup` annotation to your custom resource. -The best way to set this annotation is with a timestamp, so you know when you initialized the backup. - -For example, for our `hippo` cluster, we can run the following command to trigger the one-off backup: - -```shell -kubectl annotate -n postgres-operator postgrescluster hippo \ - postgres-operator.crunchydata.com/pgbackrest-backup="$(date)" -``` - -PGO will detect this annotation and create a new, one-off backup Job! - -If you intend to take one-off backups with similar settings in the future, you can leave those in the spec; just update the annotation to a different value the next time you are taking a backup. - -To re-run the command above, you will need to add the `--overwrite` flag so the annotation's value can be updated, i.e. - -```shell -kubectl annotate -n postgres-operator postgrescluster hippo --overwrite \ - postgres-operator.crunchydata.com/pgbackrest-backup="$(date)" -``` - -## Next Steps - -We've covered the fundamental tasks with managing backups. What about [restores]({{< relref "./disaster-recovery.md" >}})? Or [cloning data into new Postgres clusters]({{< relref "./disaster-recovery.md" >}})? Let's explore! diff --git a/docs/content/tutorial/backups.md b/docs/content/tutorial/backups.md deleted file mode 100644 index 0138cd1706..0000000000 --- a/docs/content/tutorial/backups.md +++ /dev/null @@ -1,397 +0,0 @@ ---- -title: "Backup Configuration" -date: -draft: false -weight: 80 ---- - -An important part of a healthy Postgres cluster is maintaining backups. PGO optimizes its use of open source [pgBackRest](https://pgbackrest.org/) to be able to support terabyte size databases. What's more, PGO makes it convenient to perform many common and advanced actions that can occur during the lifecycle of a database, including: - -- Setting automatic backup schedules and retention policies -- Backing data up to multiple locations - - Support for backup storage in Kubernetes, AWS S3 (or S3-compatible systems like MinIO), Google Cloud Storage (GCS), and Azure Blob Storage -- Taking one-off / ad hoc backups -- Performing a "point-in-time-recovery" -- Cloning data to a new instance - -and more. - -Let's explore the various disaster recovery features in PGO by first looking at how to set up backups. - -## Understanding Backup Configuration and Basic Operations - -The backup configuration for a PGO managed Postgres cluster resides in the -`spec.backups.pgbackrest` section of a custom resource. In addition to indicating which -version of pgBackRest to use, this section allows you to configure the fundamental -backup settings for your Postgres cluster, including: - -- `spec.backups.pgbackrest.configuration` - allows to add additional configuration and references to Secrets that are needed for configuration your backups. For example, this may reference a Secret that contains your S3 credentials. -- `spec.backups.pgbackrest.global` - a convenience to apply global [pgBackRest configuration](https://pgbackrest.org/configuration.html). An example of this may be setting the global pgBackRest logging level (e.g. `log-level-console: info`), or provide configuration to optimize performance. -- `spec.backups.pgbackrest.repos` - information on each specific pgBackRest backup repository. - This allows you to configure where and how your backups and WAL archive are stored. - You can keep backups in up to four (4) different locations! - -You can configure the `repos` section based on the backup storage system you are looking to use. Specifically, you configure your `repos` section according to the storage type you are using. There are four storage types available in `spec.backups.pgbackrest.repos`: - -| Storage Type | Description | -|--------------| ------------ | -| `azure` | For use with Azure Blob Storage. | -| `gcs` | For use with Google Cloud Storage (GCS). | -| `s3` | For use with Amazon S3 or any S3 compatible storage system such as MinIO. | -| `volume` | For use with a Kubernetes [Persistent Volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/). | - - -Regardless of the backup storage system you select, you **must** assign a name to `spec.backups.pgbackrest.repos.name`, e.g. `repo1`. pgBackRest follows the convention of assigning configuration to a specific repository using a `repoN` format, e.g. `repo1`, `repo2`, etc. You can customize your configuration based upon the name that you assign in the spec. We will cover this topic further in the multi-repository example. - -By default, backups are stored in a directory that follows the pattern `pgbackrest/repoN` where `N` is the number of the repo. This typically does not present issues when storing your backup information in a Kubernetes volume, but it can present complications if you are storing all of your backups in the same backup in a blob storage system like S3/GCS/Azure. You can avoid conflicts by setting the `repoN-path` variable in `spec.backups.pgbackrest.global`. The convention we recommend for setting this variable is `/pgbackrest/$NAMESPACE/$CLUSTER_NAME/repoN`. For example, if I have a cluster named `hippo` in the namespace `postgres-operator`, I would set the following: - -``` -spec: - backups: - pgbackrest: - global: - repo1-path: /pgbackrest/postgres-operator/hippo/repo1 -``` - -As mentioned earlier, you can store backups in up to four different repositories. You can also mix and match, e.g. you could store your backups in two different S3 repositories. Each storage type does have its own required attributes that you need to set. We will cover that later in this section. - -Now that we've covered the basics, let's learn how to set up our backup repositories! - -## Setting Up a Backup Repository - -As mentioned above, PGO, the Postgres Operator from Crunchy Data, supports multiple ways to store backups. Let's look into each method and see how you can ensure your backups and archives are being safely stored! - -## Using Kubernetes Volumes - -The simplest way to get started storing backups is to use a Kubernetes Volume. This was already configure as part of the [create a Postgres cluster]({{< relref "./create-cluster.md">}}) example. Let's take a closer look at some of that configuration: - -``` -- name: repo1 - volume: - volumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi -``` - -The one requirement of volume is that you need to fill out the `volumeClaimSpec` attribute. This attribute uses the same format as a [persistent volume claim](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) spec! In fact, we performed a similar set up when we [created a Postgres cluster]({{< relref "./create-cluster.md">}}). - -In the above example, we assume that the Kubernetes cluster is using a default storage class. If your cluster does not have a default storage class, or you wish to use a different storage class, you will have to set `spec.backups.pgbackrest.repos.volume.volumeClaimSpec.storageClassName`. - -## Using S3 - -Setting up backups in S3 requires a few additional modifications to your custom resource spec -and either -- the use of a Secret to protect your S3 credentials, or -- setting up identity providers in AWS to allow pgBackRest to assume a role with permissions. - -### Using S3 Credentials - -There is an example for creating a Postgres cluster that uses S3 for backups in the `kustomize/s3` directory in the [Postgres Operator examples](https://github.com/CrunchyData/postgres-operator-examples/fork) repository. In this directory, there is a file called `s3.conf.example`. Copy this example file to `s3.conf`: - -``` -cp s3.conf.example s3.conf -``` - -Note that `s3.conf` is protected from commit by a `.gitignore`. - -Open up `s3.conf`, you will see something similar to: - -``` -[global] -repo1-s3-key= -repo1-s3-key-secret= -``` - -Replace the values with your AWS S3 credentials and save. - -Now, open up `kustomize/s3/postgres.yaml`. In the `s3` section, you will see something similar to: - -``` -s3: - bucket: "" - endpoint: "" - region: "" -``` - -Again, replace these values with the values that match your S3 configuration. For `endpoint`, only use the domain and, if necessary, the port (e.g. `s3.us-east-2.amazonaws.com`). - -Note that `region` is required by S3, as does pgBackRest. If you are using a storage system with a S3 compatibility layer that does not require `region`, you can fill in region with a random value. - -If you are using MinIO, you may need to set the URI style to use `path` mode. You can do this from the global settings, e.g. for `repo1`: - -```yaml -spec: - backups: - pgbackrest: - global: - repo1-s3-uri-style: path -``` - -When your configuration is saved, you can deploy your cluster: - -``` -kubectl apply -k kustomize/s3 -``` - -Watch your cluster: you will see that your backups and archives are now being stored in S3! - -### Using an AWS-integrated identity provider and role - -If you deploy PostgresClusters to AWS Elastic Kubernetes Service, you can take advantage of their -IAM role integration. When you attach a certain annotation to your PostgresCluster spec, AWS will -automatically mount an AWS token and other needed environment variables. These environment -variables will then be used by pgBackRest to assume the identity of a role that has permissions -to upload to an S3 repository. - -This method requires [additional setup in AWS IAM](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html). -Use the procedure in the linked documentation for the first two steps described below: - -1. Create an OIDC provider for your EKS cluster. -2. Create an IAM policy for bucket access and an IAM role with a trust relationship with the -OIDC provider in step 1. - -The third step is to associate that IAM role with a ServiceAccount, but there's no need to -do that manually, as PGO does that for you. First, make a note of the IAM role's `ARN`. - -You can then make the following changes to the files in the `kustomize/s3` directory in the -[Postgres Operator examples](https://github.com/CrunchyData/postgres-operator-examples/fork) repository: - -1\. Add the `s3` section to the spec in `kustomize/s3/postgres.yaml` as discussed in the -[Using S3 Credentials](#using-s3-credentials) section above. In addition to that, add the required `eks.amazonaws.com/role-arn` -annotation to the PostgresCluster spec using the IAM `ARN` that you noted above. - -For instance, given an IAM role with the ARN `arn:aws:iam::123456768901:role/allow_bucket_access`, -you would add the following to the PostgresCluster spec: - -``` -spec: - metadata: - annotations: - eks.amazonaws.com/role-arn: "arn:aws:iam::123456768901:role/allow_bucket_access" -``` - -That `annotations` field will get propagated to the ServiceAccounts that require it automatically. - -2\. Copy the `s3.conf.example` file to `s3.conf`: - -``` -cp s3.conf.example s3.conf -``` - -Update that `kustomize/s3/s3.conf` file so that it looks like this: - -``` -[global] -repo1-s3-key-type=web-id -``` - -That `repo1-s3-key-type=web-id` line will tell -[pgBackRest](https://pgbackrest.org/configuration.html#section-repository/option-repo-s3-key-type) -to use the IAM integration. - -With those changes saved, you can deploy your cluster: - -``` -kubectl apply -k kustomize/s3 -``` - -And watch as it spins up and backs up to S3 using pgBackRest's IAM integration. - -## Using Google Cloud Storage (GCS) - -Similar to S3, setting up backups in Google Cloud Storage (GCS) requires a few additional modifications to your custom resource spec and the use of a Secret to protect your GCS credentials. - -There is an example for creating a Postgres cluster that uses GCS for backups in the `kustomize/gcs` directory in the [Postgres Operator examples](https://github.com/CrunchyData/postgres-operator-examples/fork) repository. In order to configure this example to use GCS for backups, you will need do two things. - -First, copy your GCS key secret (which is a JSON file) into `kustomize/gcs/gcs-key.json`. Note that a `.gitignore` directive prevents you from committing this file. - -Next, open the `postgres.yaml` file and edit `spec.backups.pgbackrest.repos.gcs.bucket` to the name of the GCS bucket that you want to back up to. - -Save this file, and then run: - -``` -kubectl apply -k kustomize/gcs -``` - -Watch your cluster: you will see that your backups and archives are now being stored in GCS! - -## Using Azure Blob Storage - -Similar to the above, setting up backups in Azure Blob Storage requires a few additional modifications to your custom resource spec and the use of a Secret to protect your Azure Storage credentials. - -There is an example for creating a Postgres cluster that uses Azure for backups in the `kustomize/azure` directory in the [Postgres Operator examples](https://github.com/CrunchyData/postgres-operator-examples/fork) repository. In this directory, there is a file called `azure.conf.example`. Copy this example file to `azure.conf`: - -``` -cp azure.conf.example azure.conf -``` - -Note that `azure.conf` is protected from commit by a `.gitignore`. - -Open up `azure.conf`, you will see something similar to: - -``` -[global] -repo1-azure-account= -repo1-azure-key= -``` - -Replace the values with your Azure credentials and save. - -Now, open up `kustomize/azure/postgres.yaml`. In the `azure` section, you will see something similar to: - -``` -azure: - container: "" -``` - -Again, replace these values with the values that match your Azure configuration. - -When your configuration is saved, you can deploy your cluster: - -``` -kubectl apply -k kustomize/azure -``` - -Watch your cluster: you will see that your backups and archives are now being stored in Azure! - -## Set Up Multiple Backup Repositories - -It is possible to store backups in multiple locations! For example, you may want to keep your backups both within your Kubernetes cluster and S3. There are many reasons for doing this: - -- It is typically faster to heal Postgres instances when your backups are closer -- You can set different backup retention policies based upon your available storage -- You want to ensure that your backups are distributed geographically - -and more. - -PGO lets you store your backups in up to four locations simultaneously. You can mix and match: for example, you can store backups both locally and in GCS, or store your backups in two different GCS repositories. It's up to you! - -There is an example in the [Postgres Operator examples](https://github.com/CrunchyData/postgres-operator-examples/fork) repository in the `kustomize/multi-backup-repo` folder that sets up backups in four different locations using each storage type. You can modify this example to match your desired backup topology. - -### Additional Notes - -While storing Postgres archives (write-ahead log [WAL] files) occurs in parallel when saving data to multiple pgBackRest repos, you cannot take parallel backups to different repos at the same time. PGO will ensure that all backups are taken serially. Future work in pgBackRest will address parallel backups to different repos. Please don't confuse this with parallel backup: pgBackRest does allow for backups to use parallel processes when storing them to a single repo! - -## Encryption - -You can encrypt your backups using AES-256 encryption using the CBC mode. This can be used independent of any encryption that may be supported by an external backup system. - -To encrypt your backups, you need to set the cipher type and provide a passphrase. The passphrase should be long and random (e.g. the pgBackRest documentation recommends `openssl rand -base64 48`). The passphrase should be kept in a Secret. - -Let's use our `hippo` cluster as an example. Let's create a new directory. First, create a file called `pgbackrest-secrets.conf` in this directory. It should look something like this: - -``` -[global] -repo1-cipher-pass=your-super-secure-encryption-key-passphrase -``` - -This contains the passphrase used to encrypt your data. - -Next, create a `kustomization.yaml` file that looks like this: - -```yaml -namespace: postgres-operator - -secretGenerator: -- name: hippo-pgbackrest-secrets - files: - - pgbackrest-secrets.conf - -generatorOptions: - disableNameSuffixHash: true - -resources: -- postgres.yaml -``` - -Finally, create the manifest for the Postgres cluster in a file named `postgres.yaml` that is similar to the following: - -```yaml -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: hippo -spec: - image: {{< param imageCrunchyPostgres >}} - postgresVersion: {{< param postgresVersion >}} - instances: - - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - backups: - pgbackrest: - image: {{< param imageCrunchyPGBackrest >}} - configuration: - - secret: - name: hippo-pgbackrest-secrets - global: - repo1-cipher-type: aes-256-cbc - repos: - - name: repo1 - volume: - volumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - -``` - -Notice the reference to the Secret that contains the encryption key: - -```yaml -spec: - backups: - pgbackrest: - configuration: - - secret: - name: hippo-pgbackrest-secrets -``` - -as well as the configuration for enabling AES-256 encryption using the CBC mode: - -```yaml -spec: - backups: - pgbackrest: - global: - repo1-cipher-type: aes-256-cbc -``` - -You can now create a Postgres cluster that has encrypted backups! - -### Limitations - -Currently the encryption settings cannot be changed on backups after they are established. - -## Custom Backup Configuration - -Most of your backup configuration can be configured through the `spec.backups.pgbackrest.global` attribute, or through information that you supply in the ConfigMap or Secret that you refer to in `spec.backups.pgbackrest.configuration`. You can also provide additional Secret values if need be, e.g. `repo1-cipher-pass` for encrypting backups. - -The full list of [pgBackRest configuration options](https://pgbackrest.org/configuration.html) is available here: - -[https://pgbackrest.org/configuration.html](https://pgbackrest.org/configuration.html) - -## IPv6 Support - -If you are running your cluster in an IPv6-only environment, you will need to add an annotation to your PostgresCluster so that PGO knows to set pgBackRest's `tls-server-address` to an IPv6 address. Otherwise, `tls-server-address` will be set to `0.0.0.0`, making pgBackRest inaccessible, and backups will not run. The annotation should be added as shown below: - -```yaml -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: hippo - annotations: - postgres-operator.crunchydata.com/pgbackrest-ip-version: IPv6 -``` - -## Next Steps - -We've now seen how to use PGO to get our backups and archives set up and safely stored. Now let's take a look at [backup management]({{< relref "./backup-management.md" >}}) and how we can do things such as set backup frequency, set retention policies, and even take one-off backups! diff --git a/docs/content/tutorial/connect-cluster.md b/docs/content/tutorial/connect-cluster.md deleted file mode 100644 index 513bf207b7..0000000000 --- a/docs/content/tutorial/connect-cluster.md +++ /dev/null @@ -1,200 +0,0 @@ ---- -title: "Connect to a Postgres Cluster" -date: -draft: false -weight: 30 ---- - -It's one thing to [create a Postgres cluster]({{< relref "./create-cluster.md" >}}); it's another thing to connect to it. Let's explore how PGO makes it possible to connect to a Postgres cluster! - -## Background: Services, Secrets, and TLS - -PGO creates a series of Kubernetes [Services](https://kubernetes.io/docs/concepts/services-networking/service/) to provide stable endpoints for connecting to your Postgres databases. These endpoints make it easy to provide a consistent way for your application to maintain connectivity to your data. To inspect what services are available, you can run the following command: - -``` -kubectl -n postgres-operator get svc --selector=postgres-operator.crunchydata.com/cluster=hippo -``` - -will yield something similar to: - -``` -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -hippo-ha ClusterIP 10.103.73.92 5432/TCP 3h14m -hippo-ha-config ClusterIP None 3h14m -hippo-pods ClusterIP None 3h14m -hippo-primary ClusterIP None 5432/TCP 3h14m -hippo-replicas ClusterIP 10.98.110.215 5432/TCP 3h14m -``` - -You do not need to worry about most of these Services, as they are used to help manage the overall health of your Postgres cluster. For the purposes of connecting to your database, the Service of interest is called `hippo-primary`. Thanks to PGO, you do not need to even worry about that, as that information is captured within a Secret! - -When your Postgres cluster is initialized, PGO will bootstrap a database and Postgres user that your application can access. This information is stored in a Secret named with the pattern `-pguser-`. For our `hippo` cluster, this Secret is called `hippo-pguser-hippo`. This Secret contains the information you need to connect your application to your Postgres database: - -- `user`: The name of the user account. -- `password`: The password for the user account. -- `dbname`: The name of the database that the user has access to by default. -- `host`: The name of the host of the database. - This references the [Service](https://kubernetes.io/docs/concepts/services-networking/service/) of the primary Postgres instance. -- `port`: The port that the database is listening on. -- `uri`: A [PostgreSQL connection URI](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING) - that provides all the information for logging into the Postgres database. -- `jdbc-uri`: A [PostgreSQL JDBC connection URI](https://jdbc.postgresql.org/documentation/use/) that provides - all the information for logging into the Postgres database via the JDBC driver. - -All connections are over TLS. PGO provides its own certificate authority (CA) to allow you to securely connect your applications to your Postgres clusters. This allows you to use the [`verify-full` "SSL mode"](https://www.postgresql.org/docs/current/libpq-ssl.html#LIBPQ-SSL-SSLMODE-STATEMENTS) of Postgres, which provides eavesdropping protection and prevents MITM attacks. You can also choose to bring your own CA, which is described later in this tutorial in the [Customize Cluster]({{< relref "./customize-cluster.md" >}}) section. - -### Modifying Service Type, NodePort Value and Metadata - -By default, PGO deploys Services with the `ClusterIP` Service type. Based on how you want to expose your database, -you may want to modify the Services to use a different -[Service type](https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types) -and [NodePort value](https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport). - -You can modify the Services that PGO manages from the following attributes: - -- `spec.service` - this manages the Service for connecting to a Postgres primary. -- `spec.proxy.pgBouncer.service` - this manages the Service for connecting to the PgBouncer connection pooler. -- `spec.userInterface.pgAdmin.service` - this manages the Service for connecting to the pgAdmin management tool. - -For example, say you want to set the Postgres primary to use a `NodePort` service, a specific `nodePort` value, and set -a specific annotation and label, you would add the following to your manifest: - -```yaml -spec: - service: - metadata: - annotations: - my-annotation: value1 - labels: - my-label: value2 - type: NodePort - nodePort: 32000 -``` - -For our `hippo` cluster, you would see the Service type and nodePort modification as well as the annotation and label. -For example: - -``` -kubectl -n postgres-operator get svc --selector=postgres-operator.crunchydata.com/cluster=hippo -``` - -will yield something similar to: - -``` -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -hippo-ha NodePort 10.105.57.191 5432:32000/TCP 48s -hippo-ha-config ClusterIP None 48s -hippo-pods ClusterIP None 48s -hippo-primary ClusterIP None 5432/TCP 48s -hippo-replicas ClusterIP 10.106.18.99 5432/TCP 48s -``` - -and the top of the output from running - -``` -kubectl -n postgres-operator describe svc hippo-ha -``` - -will show our custom annotation and label have been added: - -``` -Name: hippo-ha -Namespace: postgres-operator -Labels: my-label=value2 - postgres-operator.crunchydata.com/cluster=hippo - postgres-operator.crunchydata.com/patroni=hippo-ha -Annotations: my-annotation: value1 -``` - -Note that setting the `nodePort` value is not allowed when using the (default) `ClusterIP` type, and it must be in-range -and not otherwise in use or the operation will fail. Additionally, be aware that any annotations or labels provided here -will win in case of conflicts with any annotations or labels a user configures elsewhere. - -Finally, if you are exposing your Services externally and are relying on TLS -verification, you will need to use the [custom TLS]({{< relref "tutorial/customize-cluster.md" >}}#customize-tls) -features of PGO). - -## Connect an Application - -For this tutorial, we are going to connect [Keycloak](https://www.keycloak.org/), an open source -identity management application. Keycloak can be deployed on Kubernetes and is backed by a Postgres -database. While we provide an [example of deploying Keycloak and a PostgresCluster](https://github.com/CrunchyData/postgres-operator-examples/tree/main/kustomize/keycloak) -in the [Postgres Operator examples](https://github.com/CrunchyData/postgres-operator-examples) -repository, the manifest below deploys it using our `hippo` cluster that is already running: - -``` -kubectl apply --filename=- <}}) cluster! diff --git a/docs/content/tutorial/connection-pooling.md b/docs/content/tutorial/connection-pooling.md deleted file mode 100644 index ff9130374e..0000000000 --- a/docs/content/tutorial/connection-pooling.md +++ /dev/null @@ -1,239 +0,0 @@ ---- -title: "Connection Pooling" -date: -draft: false -weight: 100 ---- - -Connection pooling can be helpful for scaling and maintaining overall availability between your application and the database. PGO helps facilitate this by supporting the [PgBouncer](https://www.pgbouncer.org/) connection pooler and state manager. - -Let's look at how we can a connection pooler and connect it to our application! - -## Adding a Connection Pooler - -Let's look at how we can add a connection pooler using the `kustomize/keycloak` example in the [Postgres Operator examples](https://github.com/CrunchyData/postgres-operator-examples/fork) repository. - -Connection poolers are added using the `spec.proxy` section of the custom resource. Currently, the only connection pooler supported is [PgBouncer](https://www.pgbouncer.org/). - -The only required attribute for adding a PgBouncer connection pooler is to set the `spec.proxy.pgBouncer.image` attribute. In the `kustomize/keycloak/postgres.yaml` file, add the following YAML to the spec: - -``` -proxy: - pgBouncer: - image: {{< param imageCrunchyPGBouncer >}} -``` - -(You can also find an example of this in the `kustomize/examples/high-availability` example). - -Save your changes and run: - -``` -kubectl apply -k kustomize/keycloak -``` - -PGO will detect the change and create a new PgBouncer Deployment! - -That was fairly easy to set up, so now let's look at how we can connect our application to the connection pooler. - -## Connecting to a Connection Pooler - -When a connection pooler is deployed to the cluster, PGO adds additional information to the user Secrets to allow for applications to connect directly to the connection pooler. Recall that in this example, our user Secret is called `keycloakdb-pguser-keycloakdb`. Describe the user Secret: - -``` -kubectl -n postgres-operator describe secrets keycloakdb-pguser-keycloakdb -``` - -You should see that there are several new attributes included in this Secret that allow for you to connect to your Postgres instance via the connection pooler: - -- `pgbouncer-host`: The name of the host of the PgBouncer connection pooler. - This references the [Service](https://kubernetes.io/docs/concepts/services-networking/service/) of the PgBouncer connection pooler. -- `pgbouncer-port`: The port that the PgBouncer connection pooler is listening on. -- `pgbouncer-uri`: A [PostgreSQL connection URI](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING) - that provides all the information for logging into the Postgres database via the PgBouncer connection pooler. -- `pgbouncer-jdbc-uri`: A [PostgreSQL JDBC connection URI](https://jdbc.postgresql.org/documentation/use/) that provides - all the information for logging into the Postgres database via the PgBouncer connection pooler using the JDBC driver. - Note that by default, the connection string disable JDBC managing prepared transactions for - [optimal use with PgBouncer](https://www.pgbouncer.org/faq.html#how-to-use-prepared-statements-with-transaction-pooling). - -Open up the file in `kustomize/keycloak/keycloak.yaml`. Update the `DB_ADDR` and `DB_PORT` values to be the following: - -``` -- name: DB_ADDR - valueFrom: { secretKeyRef: { name: keycloakdb-pguser-keycloakdb, key: pgbouncer-host } } -- name: DB_PORT - valueFrom: { secretKeyRef: { name: keycloakdb-pguser-keycloakdb, key: pgbouncer-port } } -``` - -This changes Keycloak's configuration so that it will now connect through the connection pooler. - -Apply the changes: - -``` -kubectl apply -k kustomize/keycloak -``` - -Kubernetes will detect the changes and begin to deploy a new Keycloak Pod. When it is completed, Keycloak will now be connected to Postgres via the PgBouncer connection pooler! - -## TLS - -PGO deploys every cluster and component over TLS. This includes the PgBouncer connection pooler. If you are using your own [custom TLS setup]({{< relref "./customize-cluster.md" >}}#customize-tls), you will need to provide a Secret reference for a TLS key / certificate pair for PgBouncer in `spec.proxy.pgBouncer.customTLSSecret`. - -Your TLS certificate for PgBouncer should have a Common Name (CN) setting that matches the PgBouncer Service name. This is the name of the cluster suffixed with `-pgbouncer`. For example, for our `hippo` cluster this would be `hippo-pgbouncer`. For the `keycloakdb` example, it would be `keycloakdb-pgbouncer`. - -To customize the TLS for PgBouncer, you will need to create a Secret in the Namespace of your Postgres cluster that contains the TLS key (`tls.key`), TLS certificate (`tls.crt`) and the CA certificate (`ca.crt`) to use. The Secret should contain the following values: - -``` -data: - ca.crt: - tls.crt: - tls.key: -``` - -For example, if you have files named `ca.crt`, `keycloakdb-pgbouncer.key`, and `keycloakdb-pgbouncer.crt` stored on your local machine, you could run the following command: - -``` -kubectl create secret generic -n postgres-operator keycloakdb-pgbouncer.tls \ - --from-file=ca.crt=ca.crt \ - --from-file=tls.key=keycloakdb-pgbouncer.key \ - --from-file=tls.crt=keycloakdb-pgbouncer.crt -``` - -You can specify the custom TLS Secret in the `spec.proxy.pgBouncer.customTLSSecret.name` field in your `postgrescluster.postgres-operator.crunchydata.com` custom resource, e.g.: - -``` -spec: - proxy: - pgBouncer: - customTLSSecret: - name: keycloakdb-pgbouncer.tls -``` - -## Customizing - -The PgBouncer connection pooler is highly customizable, both from a configuration and Kubernetes deployment standpoint. Let's explore some of the customizations that you can do! - -### Configuration - -[PgBouncer configuration](https://www.pgbouncer.org/config.html) can be customized through `spec.proxy.pgBouncer.config`. After making configuration changes, PGO will roll them out to any PgBouncer instance and automatically issue a "reload". - -There are several ways you can customize the configuration: - -- `spec.proxy.pgBouncer.config.global`: Accepts key-value pairs that apply changes globally to PgBouncer. -- `spec.proxy.pgBouncer.config.databases`: Accepts key-value pairs that represent PgBouncer [database definitions](https://www.pgbouncer.org/config.html#section-databases). -- `spec.proxy.pgBouncer.config.users`: Accepts key-value pairs that represent [connection settings applied to specific users](https://www.pgbouncer.org/config.html#section-users). -- `spec.proxy.pgBouncer.config.files`: Accepts a list of files that are mounted in the `/etc/pgbouncer` directory and loaded before any other options are considered using PgBouncer's [include directive](https://www.pgbouncer.org/config.html#include-directive). - -For example, to set the connection pool mode to `transaction`, you would set the following configuration: - -``` -spec: - proxy: - pgBouncer: - config: - global: - pool_mode: transaction -``` - -For a reference on [PgBouncer configuration](https://www.pgbouncer.org/config.html) please see: - -[https://www.pgbouncer.org/config.html](https://www.pgbouncer.org/config.html) - -### Replicas - -PGO deploys one PgBouncer instance by default. You may want to run multiple PgBouncer instances to have some level of redundancy, though you still want to be mindful of how many connections are going to your Postgres database! - -You can manage the number of PgBouncer instances that are deployed through the `spec.proxy.pgBouncer.replicas` attribute. - -### Resources - -You can manage the CPU and memory resources given to a PgBouncer instance through the `spec.proxy.pgBouncer.resources` attribute. The layout of `spec.proxy.pgBouncer.resources` should be familiar: it follows the same pattern as the standard Kubernetes structure for setting [container resources](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/). - -For example, let's say we want to set some CPU and memory limits on our PgBouncer instances. We could add the following configuration: - -``` -spec: - proxy: - pgBouncer: - resources: - limits: - cpu: 200m - memory: 128Mi -``` - -As PGO deploys the PgBouncer instances using a [Deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) these changes are rolled out using a rolling update to minimize disruption between your application and Postgres instances! - -### Annotations / Labels - -You can apply custom annotations and labels to your PgBouncer instances through the `spec.proxy.pgBouncer.metadata.annotations` and `spec.proxy.pgBouncer.metadata.labels` attributes respectively. Note that any changes to either of these two attributes take precedence over any other custom labels you have added. - -### Pod Anti-Affinity / Pod Affinity / Node Affinity - -You can control the [pod anti-affinity, pod affinity, and node affinity](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) through the `spec.proxy.pgBouncer.affinity` attribute, specifically: - -- `spec.proxy.pgBouncer.affinity.nodeAffinity`: controls node affinity for the PgBouncer instances. -- `spec.proxy.pgBouncer.affinity.podAffinity`: controls Pod affinity for the PgBouncer instances. -- `spec.proxy.pgBouncer.affinity.podAntiAffinity`: controls Pod anti-affinity for the PgBouncer instances. - -Each of the above follows the [standard Kubernetes specification for setting affinity](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity). - -For example, to set a preferred Pod anti-affinity rule for the `kustomize/keycloak` example, you would want to add the following to your configuration: - -``` -spec: - proxy: - pgBouncer: - affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 1 - podAffinityTerm: - labelSelector: - matchLabels: - postgres-operator.crunchydata.com/cluster: keycloakdb - postgres-operator.crunchydata.com/role: pgbouncer - topologyKey: kubernetes.io/hostname -``` - -### Tolerations - -You can deploy PgBouncer instances to [Nodes with Taints](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) by setting [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) through `spec.proxy.pgBouncer.tolerations`. This attribute follows the Kubernetes standard tolerations layout. - -For example, if there were a set of Nodes with a Taint of `role=connection-poolers:NoSchedule` that you want to schedule your PgBouncer instances to, you could apply the following configuration: - -``` -spec: - proxy: - pgBouncer: - tolerations: - - effect: NoSchedule - key: role - operator: Equal - value: connection-poolers -``` - -Note that setting a toleration does not necessarily mean that the PgBouncer instances will be assigned to Nodes with those taints. [Tolerations act as a **key**: they allow for you to access Nodes](https://blog.crunchydata.com/blog/kubernetes-pod-tolerations-and-postgresql-deployment-strategies). If you want to ensure that your PgBouncer instances are deployed to specific nodes, you need to combine setting tolerations with node affinity. - -### Pod Spread Constraints - -Besides using affinity, anti-affinity and tolerations, you can also set [Topology Spread Constraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/) through `spec.proxy.pgBouncer.topologySpreadConstraints`. This attribute follows the Kubernetes standard topology spread contraint layout. - -For example, since each of of our pgBouncer Pods will have the standard `postgres-operator.crunchydata.com/role: pgbouncer` Label set, we can use this Label when determining the `maxSkew`. In the example below, since we have 3 nodes with a `maxSkew` of 1 and we've set `whenUnsatisfiable` to `ScheduleAnyway`, we should ideally see 1 Pod on each of the nodes, but our Pods can be distributed less evenly if other constraints keep this from happening. - -``` - proxy: - pgBouncer: - replicas: 3 - topologySpreadConstraints: - - maxSkew: 1 - topologyKey: my-node-label - whenUnsatisfiable: ScheduleAnyway - labelSelector: - matchLabels: - postgres-operator.crunchydata.com/role: pgbouncer -``` - -If you want to ensure that your PgBouncer instances are deployed more evenly (or not deployed at all), you need to update `whenUnsatisfiable` to `DoNotSchedule`. - -## Next Steps - -Now that we can enable connection pooling in a cluster, let’s explore some [administrative tasks]({{< relref "administrative-tasks.md" >}}) such as manually restarting PostgreSQL using PGO. How do we do that? diff --git a/docs/content/tutorial/create-cluster.md b/docs/content/tutorial/create-cluster.md deleted file mode 100644 index 46674d3bfd..0000000000 --- a/docs/content/tutorial/create-cluster.md +++ /dev/null @@ -1,88 +0,0 @@ ---- -title: "Create a Postgres Cluster" -date: -draft: false -weight: 20 ---- - -If you came here through the [quickstart]({{< relref "quickstart/_index.md" >}}), you may have already created a cluster. If you created a cluster by using the example in the `kustomize/postgres` directory, feel free to skip to connecting to a cluster, or read onward for a more in depth look into cluster creation! - -## Create a Postgres Cluster - -Creating a Postgres cluster is pretty simple. Using the example in the `kustomize/postgres` directory, all we have to do is run: - -``` -kubectl apply -k kustomize/postgres -``` - -and PGO will create a simple Postgres cluster named `hippo` in the `postgres-operator` namespace. You can track the status of your Postgres cluster using `kubectl describe` on the `postgresclusters.postgres-operator.crunchydata.com` custom resource: - -``` -kubectl -n postgres-operator describe postgresclusters.postgres-operator.crunchydata.com hippo -``` - -and you can track the state of the Postgres Pod using the following command: - -``` -kubectl -n postgres-operator get pods \ - --selector=postgres-operator.crunchydata.com/cluster=hippo,postgres-operator.crunchydata.com/instance -``` - -### What Just Happened? - -PGO created a Postgres cluster based on the information provided to it in the Kustomize manifests located in the `kustomize/postgres` directory. Let's better understand what happened by inspecting the `kustomize/postgres/postgres.yaml` file: - -``` -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: hippo -spec: - image: {{< param imageCrunchyPostgres >}} - postgresVersion: {{< param postgresVersion >}} - instances: - - name: instance1 - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - backups: - pgbackrest: - image: {{< param imageCrunchyPGBackrest >}} - repos: - - name: repo1 - volume: - volumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi -``` - -When we ran the `kubectl apply` command earlier, what we did was create a `PostgresCluster` custom resource in Kubernetes. PGO detected that we added a new `PostgresCluster` resource and started to create all the objects needed to run Postgres in Kubernetes! - -What else happened? PGO read the value from `metadata.name` to provide the Postgres cluster with the name `hippo`. Additionally, PGO knew which containers to use for Postgres and pgBackRest by looking at the values in `spec.image` and `spec.backups.pgbackrest.image` respectively. The value in `spec.postgresVersion` is important as it will help PGO track which major version of Postgres you are using. - -PGO knows how many Postgres instances to create through the `spec.instances` section of the manifest. While `name` is optional, we opted to give it the name `instance1`. We could have also created multiple replicas and instances during cluster initialization, but we will cover that more when we discuss how to [scale and create a HA Postgres cluster]({{< relref "./high-availability.md" >}}). - -A very important piece of your `PostgresCluster` custom resource is the `dataVolumeClaimSpec` section. This describes the storage that your Postgres instance will use. It is modeled after the [Persistent Volume Claim](https://kubernetes.io/docs/concepts/storage/persistent-volumes/). If you do not provide a `spec.instances.dataVolumeClaimSpec.storageClassName`, then the default storage class in your Kubernetes environment is used. - -As part of creating a Postgres cluster, we also specify information about our backup archive. PGO uses [pgBackRest](https://pgbackrest.org/), an open source backup and restore tool designed to handle terabyte-scale backups. As part of initializing our cluster, we can specify where we want our backups and archives ([write-ahead logs or WAL](https://www.postgresql.org/docs/current/wal-intro.html)) stored. We will talk about this portion of the `PostgresCluster` spec in greater depth in the [disaster recovery]({{< relref "./backups.md" >}}) section of this tutorial, and also see how we can store backups in Amazon S3, Google GCS, and Azure Blob Storage. - -## Troubleshooting - -### PostgreSQL / pgBackRest Pods Stuck in `Pending` Phase - -The most common occurrence of this is due to PVCs not being bound. Ensure that you have set up your storage options correctly in any `volumeClaimSpec`. You can always update your settings and reapply your changes with `kubectl apply`. - -Also ensure that you have enough persistent volumes available: your Kubernetes administrator may need to provision more. - -If you are on OpenShift, you may need to set `spec.openshift` to `true`. - - -## Next Steps - -We're up and running -- now let's [connect to our Postgres cluster]({{< relref "./connect-cluster.md" >}})! diff --git a/docs/content/tutorial/customize-cluster.md b/docs/content/tutorial/customize-cluster.md deleted file mode 100644 index d158e0160a..0000000000 --- a/docs/content/tutorial/customize-cluster.md +++ /dev/null @@ -1,467 +0,0 @@ ---- -title: "Customize a Postgres Cluster" -date: -draft: false -weight: 60 ---- - -Postgres is known for its ease of customization; PGO helps you to roll out changes efficiently and without disruption. After [resizing the resources]({{< relref "./resize-cluster.md" >}}) for our Postgres cluster in the previous step of this tutorial, lets see how we can tweak our Postgres configuration to optimize its usage of them. - -## Custom Postgres Configuration - -Part of the trick of managing multiple instances in a Postgres cluster is ensuring all of the configuration -changes are propagated to each of them. This is where PGO helps: when you make a Postgres configuration -change for a cluster, PGO will apply it to all of the Postgres instances. - -For example, in our previous step we added CPU and memory limits of `2.0` and `4Gi` respectively. Let's tweak some of the Postgres settings to better use our new resources. We can do this in the `spec.patroni.dynamicConfiguration` section. Here is an example updated manifest that tweaks several settings: - -``` -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: hippo -spec: - image: {{< param imageCrunchyPostgres >}} - postgresVersion: {{< param postgresVersion >}} - instances: - - name: instance1 - replicas: 2 - resources: - limits: - cpu: 2.0 - memory: 4Gi - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - backups: - pgbackrest: - image: {{< param imageCrunchyPGBackrest >}} - repos: - - name: repo1 - volume: - volumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - patroni: - dynamicConfiguration: - postgresql: - parameters: - max_parallel_workers: 2 - max_worker_processes: 2 - shared_buffers: 1GB - work_mem: 2MB -``` - -In particular, we added the following to `spec`: - -``` -patroni: - dynamicConfiguration: - postgresql: - parameters: - max_parallel_workers: 2 - max_worker_processes: 2 - shared_buffers: 1GB - work_mem: 2MB -``` - -Apply these updates to your Postgres cluster with the following command: - -``` -kubectl apply -k kustomize/postgres -``` - -PGO will go and apply these settings, restarting each Postgres instance when necessary. You can verify that the changes are present using the Postgres `SHOW` command, e.g. - -``` -SHOW work_mem; -``` - -should yield something similar to: - -``` - work_mem ----------- - 2MB -``` - -## Customize TLS - -All connections in PGO use TLS to encrypt communication between components. PGO sets up a PKI and certificate authority (CA) that allow you create verifiable endpoints. However, you may want to bring a different TLS infrastructure based upon your organizational requirements. The good news: PGO lets you do this! - -If you want to use the TLS infrastructure that PGO provides, you can skip the rest of this section and move on to learning how to [apply software updates]({{< relref "./update-cluster.md" >}}). - -### How to Customize TLS - -There are a few different TLS endpoints that can be customized for PGO, including those of the Postgres cluster and controlling how Postgres instances authenticate with each other. Let's look at how we can customize TLS by defining - -* a `spec.customTLSSecret`, used to both identify the cluster and encrypt communications; and -* a `spec.customReplicationTLSSecret`, used for replication authentication. - -(For more information on the `spec.customTLSSecret` and `spec.customReplicationTLSSecret` fields, see the [`PostgresCluster CRD`]({{< relref "references/crd.md" >}}).) - -To customize the TLS for a Postgres cluster, you will need to create two Secrets in the Namespace of your Postgres cluster. One of these Secrets will be the `customTLSSecret` and the other will be the `customReplicationTLSSecret`. Both secrets contain a TLS key (`tls.key`), TLS certificate (`tls.crt`) and CA certificate (`ca.crt`) to use. - -Note: If `spec.customTLSSecret` is provided you **must** also provide `spec.customReplicationTLSSecret` and both must contain the same `ca.crt`. - -The custom TLS and custom replication TLS Secrets should contain the following fields (though see below for a workaround if you cannot control the field names of the Secret's `data`): - -``` -data: - ca.crt: - tls.crt: - tls.key: -``` - -For example, if you have files named `ca.crt`, `hippo.key`, and `hippo.crt` stored on your local machine, you could run the following command to create a Secret from those files: - -``` -kubectl create secret generic -n postgres-operator hippo-cluster.tls \ - --from-file=ca.crt=ca.crt \ - --from-file=tls.key=hippo.key \ - --from-file=tls.crt=hippo.crt -``` - -After you create the Secrets, you can specify the custom TLS Secret in your `postgrescluster.postgres-operator.crunchydata.com` custom resource. For example, if you created a `hippo-cluster.tls` Secret and a `hippo-replication.tls` Secret, you would add them to your Postgres cluster: - -``` -spec: - customTLSSecret: - name: hippo-cluster.tls - customReplicationTLSSecret: - name: hippo-replication.tls -``` - -If you're unable to control the key-value pairs in the Secret, you can create a mapping to tell -the Postgres Operator what key holds the expected value. That would look similar to this: - -``` -spec: - customTLSSecret: - name: hippo.tls - items: - - key: - path: tls.crt - - key: - path: tls.key - - key: - path: ca.crt -``` - -For instance, if the `hippo.tls` Secret had the `tls.crt` in a key named `hippo-tls.crt`, the -`tls.key` in a key named `hippo-tls.key`, and the `ca.crt` in a key named `hippo-ca.crt`, -then your mapping would look like: - -``` -spec: - customTLSSecret: - name: hippo.tls - items: - - key: hippo-tls.crt - path: tls.crt - - key: hippo-tls.key - path: tls.key - - key: hippo-ca.crt - path: ca.crt -``` - -Note: Although the custom TLS and custom replication TLS Secrets share the same `ca.crt`, they do not share the same `tls.crt`: - -* Your `spec.customTLSSecret` TLS certificate should have a Common Name (CN) setting that matches the primary Service name. This is the name of the cluster suffixed with `-primary`. For example, for our `hippo` cluster this would be `hippo-primary`. -* Your `spec.customReplicationTLSSecret` TLS certificate should have a Common Name (CN) setting that matches `_crunchyrepl`, which is the preset replication user. - -As with the other changes, you can roll out the TLS customizations with `kubectl apply`. - -## Labels - -There are several ways to add your own custom Kubernetes [Labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) to your Postgres cluster. - -- Cluster: You can apply labels to any PGO managed object in a cluster by editing the `spec.metadata.labels` section of the custom resource. -- Postgres: You can apply labels to a Postgres instance set and its objects by editing `spec.instances.metadata.labels`. -- pgBackRest: You can apply labels to pgBackRest and its objects by editing `postgresclusters.spec.backups.pgbackrest.metadata.labels`. -- PgBouncer: You can apply labels to PgBouncer connection pooling instances by editing `spec.proxy.pgBouncer.metadata.labels`. - -## Annotations - -There are several ways to add your own custom Kubernetes [Annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) to your Postgres cluster. - -- Cluster: You can apply annotations to any PGO managed object in a cluster by editing the `spec.metadata.annotations` section of the custom resource. -- Postgres: You can apply annotations to a Postgres instance set and its objects by editing `spec.instances.metadata.annotations`. -- pgBackRest: You can apply annotations to pgBackRest and its objects by editing `spec.backups.pgbackrest.metadata.annotations`. -- PgBouncer: You can apply annotations to PgBouncer connection pooling instances by editing `spec.proxy.pgBouncer.metadata.annotations`. - -## Pod Priority Classes - -PGO allows you to use [pod priority classes](https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/) to indicate the relative importance of a pod by setting a `priorityClassName` field on your Postgres cluster. This can be done as follows: - -- Instances: Priority is defined per instance set and is applied to all Pods in that instance set by editing the `spec.instances.priorityClassName` section of the custom resource. -- Dedicated Repo Host: Priority defined under the repoHost section of the spec is applied to the dedicated repo host by editing the `spec.backups.pgbackrest.repoHost.priorityClassName` section of the custom resource. -- PgBouncer: Priority is defined under the pgBouncer section of the spec and will apply to all PgBouncer Pods by editing the `spec.proxy.pgBouncer.priorityClassName` section of the custom resource. -- Backup (manual and scheduled): Priority is defined under the `spec.backups.pgbackrest.jobs.priorityClassName` section and applies that priority to all pgBackRest backup Jobs (manual and scheduled). -- Restore (data source or in-place): Priority is defined for either a "data source" restore or an in-place restore by editing the `spec.dataSource.postgresCluster.priorityClassName` section of the custom resource. -- Data Migration: The priority defined for the first instance set in the spec (array position 0) is used for the PGDATA and WAL migration Jobs. The pgBackRest repo migration Job will use the priority class applied to the repoHost. - -## Separate WAL PVCs - -PostgreSQL commits transactions by storing changes in its [Write-Ahead Log (WAL)](https://www.postgresql.org/docs/current/wal-intro.html). Because the way WAL files are accessed and -utilized often differs from that of data files, and in high-performance situations, it can desirable to put WAL files on separate storage volume. With PGO, this can be done by adding -the `walVolumeClaimSpec` block to your desired instance in your PostgresCluster spec, either when your cluster is created or anytime thereafter: - -``` -spec: - instances: - - name: instance - walVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi -``` - -This volume can be removed later by removing the `walVolumeClaimSpec` section from the instance. Note that when changing the WAL directory, care is taken so as not to lose any WAL files. PGO only -deletes the PVC once there are no longer any WAL files on the previously configured volume. - -## Custom Sidecar Containers - -PGO allows you to configure custom -[sidecar Containers](https://kubernetes.io/docs/concepts/workloads/pods/#how-pods-manage-multiple-containers) -for your PostgreSQL instance and pgBouncer Pods. - -To use the custom sidecar features, you will need to enable -them via the PGO -[feature gate](https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/). - -PGO feature gates are enabled by setting the `PGO_FEATURE_GATES` environment -variable on the PGO Deployment. For a feature named 'FeatureName', that would -look like - -``` -PGO_FEATURE_GATES="FeatureName=true" -``` - -Please note that it is possible to enable more than one feature at a time as -this variable accepts a comma delimited list, for example: - -``` -PGO_FEATURE_GATES="FeatureName=true,FeatureName2=true,FeatureName3=true..." -``` - -{{% notice warning %}} -Any feature name added to `PGO_FEATURE_GATES` must be defined by PGO and must be -set to true or false. Any misconfiguration will prevent PGO from deploying. -See the [considerations](#considerations) below for additional guidance. -{{% /notice %}} - -### Custom Sidecar Containers for PostgreSQL Instance Pods - -To configure custom sidecar Containers for any of your PostgreSQL instance Pods -you will need to enable that feature via the PGO feature gate. - -As mentioned above, PGO feature gates are enabled by setting the `PGO_FEATURE_GATES` -environment variable on the PGO Deployment. For the PostgreSQL instance sidecar -container feature, that will be - -``` -PGO_FEATURE_GATES="InstanceSidecars=true" -``` - -Once this feature is enabled, you can add your custom -[Containers](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#container-v1-core) -as an array to `spec.instances.containers`. See the [custom sidecar example](#custom-sidecar-example) -below for more information! - -### Custom Sidecar Containers for pgBouncer Pods - -Similar to your PostgreSQL instance Pods, to configure custom sidecar Containers -for your pgBouncer Pods you will need to enable it via the PGO feature gate. - -As mentioned above, PGO feature gates are enabled by setting the `PGO_FEATURE_GATES` -environment variable on the PGO Deployment. For the pgBouncer custom sidecar -container feature, that will be - -``` -PGO_FEATURE_GATES="PGBouncerSidecars=true" -``` - -Once this feature is enabled, you can add your custom -[Containers](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#container-v1-core) -as an array to `spec.proxy.pgBouncer.containers`. See the [custom sidecar example](#custom-sidecar-example) -below for more information! - -### Custom Sidecar Example - -As a simple example, consider - -``` -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: sidecar-hippo -spec: - image: {{< param imageCrunchyPostgres >}} - postgresVersion: {{< param postgresVersion >}} - instances: - - name: instance1 - containers: - - name: testcontainer - image: mycontainer1:latest - - name: testcontainer2 - image: mycontainer1:latest - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - backups: - pgbackrest: - image: {{< param imageCrunchyPGBackrest >}} - repos: - - name: repo1 - volume: - volumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - proxy: - pgBouncer: - image: {{< param imageCrunchyPGBouncer >}} - containers: - - name: bouncertestcontainer1 - image: mycontainer1:latest -``` - -In the above example, we've added two sidecar Containers to the `instance1` Pod -and one sidecar container to the `pgBouncer` Pod. These Containers can be -defined in the manifest at any time, but the Containers will not be added to their -respective Pods until the feature gate is enabled. - -### Considerations - -- Volume mounts and other Pod details are subject to change between releases. -- The custom sidecar features are currently feature-gated. Any sidecar Containers, - as well as any settings included in their configuration, are added and used at - your own risk. Improperly configured sidecar Containers could impact the health - and/or security of your PostgreSQL cluster! -- When adding a sidecar container, we recommend adding a unique prefix to the - container name to avoid potential naming conflicts with the official PGO - containers. - -## Database Initialization SQL - -PGO can run SQL for you as part of the cluster creation and initialization process. PGO runs the SQL using the psql client so you can use meta-commands to connect to different databases, change error handling, or set and use variables. Its capabilities are described in the [psql documentation](https://www.postgresql.org/docs/current/app-psql.html). - -### Initialization SQL ConfigMap - -The Postgres cluster spec accepts a reference to a ConfigMap containing your init SQL file. Update your cluster spec to include the ConfigMap name, `spec.databaseInitSQL.name`, and the data key, `spec.databaseInitSQL.key`, for your SQL file. For example, if you create your ConfigMap with the following command: - -``` -kubectl -n postgres-operator create configmap hippo-init-sql --from-file=init.sql=/path/to/init.sql -``` - -You would add the following section to your Postgrescluster spec: - -``` -spec: - databaseInitSQL: - key: init.sql - name: hippo-init-sql -``` - -{{% notice note %}} -The ConfigMap must exist in the same namespace as your Postgres cluster. -{{% /notice %}} - -After you add the ConfigMap reference to your spec, apply the change with `kubectl apply -k kustomize/postgres`. PGO will create your `hippo` cluster and run your initialization SQL once the cluster has started. You can verify that your SQL has been run by checking the `databaseInitSQL` status on your Postgres cluster. While the status is set, your init SQL will not be run again. You can check cluster status with the `kubectl describe` command: - -``` -kubectl -n postgres-operator describe postgresclusters.postgres-operator.crunchydata.com hippo -``` - -{{% notice warning %}} - -In some cases, due to how Kubernetes treats PostgresCluster status, PGO may run your SQL commands more than once. Please ensure that the commands defined in your init SQL are idempotent. - -{{% /notice %}} - -Now that `databaseInitSQL` is defined in your cluster status, verify database objects have been created as expected. After verifying, we recommend removing the `spec.databaseInitSQL` field from your spec. Removing the field from the spec will also remove `databaseInitSQL` from the cluster status. - -### PSQL Usage -PGO uses the psql interactive terminal to execute SQL statements in your database. Statements are passed in using standard input and the filename flag (e.g. `psql -f -`). - -SQL statements are executed as superuser in the default maintenance database. This means you have full control to create database objects, extensions, or run any SQL statements that you might need. - -#### Integration with User and Database Management - -If you are creating users or databases, please see the [User/Database Management]({{< relref "tutorial/user-management.md" >}}) documentation. Databases created through the user management section of the spec can be referenced in your initialization sql. For example, if a database `zoo` is defined: - -``` -spec: - users: - - name: hippo - databases: - - "zoo" -``` - -You can connect to `zoo` by adding the following `psql` meta-command to your SQL: - -``` -\c zoo -create table t_zoo as select s, md5(random()::text) from generate_Series(1,5) s; -``` - -#### Transaction support - -By default, `psql` commits each SQL command as it completes. To combine multiple commands into a single [transaction](https://www.postgresql.org/docs/current/tutorial-transactions.html), use the [`BEGIN`](https://www.postgresql.org/docs/current/sql-begin.html) and [`COMMIT`](https://www.postgresql.org/docs/current/sql-commit.html) commands. - -``` -BEGIN; -create table t_random as select s, md5(random()::text) from generate_Series(1,5) s; -COMMIT; -``` - -#### PSQL Exit Code and Database Init SQL Status - -The exit code from `psql` will determine when the `databaseInitSQL` status is set. When `psql` returns `0` the status will be set and SQL will not be run again. When `psql` returns with an error exit code the status will not be set. PGO will continue attempting to execute the SQL as part of its reconcile loop until `psql` returns normally. If `psql` exits with a failure, you will need to edit the file in your ConfigMap to ensure your SQL statements will lead to a successful `psql` return. The easiest way to make live changes to your ConfigMap is to use the following `kubectl edit` command: - -``` -kubectl -n edit configmap hippo-init-sql -``` - -Be sure to transfer any changes back over to your local file. Another option is to make changes in your local file and use `kubectl --dry-run` to create a template and pipe the output into `kubectl apply`: - -``` -kubectl create configmap hippo-init-sql --from-file=init.sql=/path/to/init.sql --dry-run=client -o yaml | kubectl apply -f - -``` - -{{% notice tip %}} -If you edit your ConfigMap and your changes aren't showing up, you may be waiting for PGO to reconcile your cluster. After some time, PGO will automatically reconcile the cluster or you can trigger reconciliation by applying any change to your cluster (e.g. with `kubectl apply -k kustomize/postgres`). -{{% /notice %}} - -To ensure that `psql` returns a failure exit code when your SQL commands fail, set the `ON_ERROR_STOP` [variable](https://www.postgresql.org/docs/current/app-psql.html#APP-PSQL-VARIABLES) as part of your SQL file: - -``` -\set ON_ERROR_STOP -\echo Any error will lead to exit code 3 -create table t_random as select s, md5(random()::text) from generate_Series(1,5) s; -``` - -## Troubleshooting - -### Changes Not Applied - -If your Postgres configuration settings are not present, ensure that you are using the syntax that Postgres expects. -You can see this in the [Postgres configuration documentation](https://www.postgresql.org/docs/current/runtime-config.html). - -## Next Steps - -You've now seen how you can further customize your Postgres cluster, but what about [managing users and databases]({{< relref "./user-management.md" >}})? That's a great question that is answered in the [next section]({{< relref "./user-management.md" >}}). diff --git a/docs/content/tutorial/delete-cluster.md b/docs/content/tutorial/delete-cluster.md deleted file mode 100644 index e83fd65a95..0000000000 --- a/docs/content/tutorial/delete-cluster.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: "Delete a Postgres Cluster" -date: -draft: false -weight: 110 ---- - -There comes a time when it is necessary to delete your cluster. If you have been [following along with the example](https://github.com/CrunchyData/postgres-operator-examples), you can delete your Postgres cluster by simply running: - -``` -kubectl delete -k kustomize/postgres -``` - -PGO will remove all of the objects associated with your cluster. - -With data retention, this is subject to the [retention policy of your PVC](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#reclaiming). For more information on how Kubernetes manages data retention, please refer to the [Kubernetes docs on volume reclaiming](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#reclaiming). diff --git a/docs/content/tutorial/disaster-recovery.md b/docs/content/tutorial/disaster-recovery.md deleted file mode 100644 index a63a0bc811..0000000000 --- a/docs/content/tutorial/disaster-recovery.md +++ /dev/null @@ -1,608 +0,0 @@ ---- -title: "Disaster Recovery and Cloning" -date: -draft: false -weight: 85 ---- - -Perhaps someone accidentally dropped the `users` table. Perhaps you want to clone your production database to a step-down environment. Perhaps you want to exercise your disaster recovery system (and it is important that you do!). - -Regardless of scenario, it's important to know how you can perform a "restore" operation with PGO to be able to recovery your data from a particular point in time, or clone a database for other purposes. - -Let's look at how we can perform different types of restore operations. First, let's understand the core restore properties on the custom resource. - -## Restore Properties - -{{% notice info %}} - -As of v5.0.5, PGO offers the ability to restore from an existing PostgresCluster or a remote -cloud-based data source, such as S3, GCS, etc. For more on that, see the [Clone From Backups Stored in S3 / GCS / Azure Blob Storage](#cloud-based-data-source) section. - -Note that you **cannot** use both a local PostgresCluster data source and a remote cloud-based data -source at one time; if both the `dataSource.postgresCluster` and `dataSource.pgbackrest` fields -are filled in, the local PostgresCluster data source will take precedence. - -{{% /notice %}} - -There are several attributes on the custom resource that are important to understand as part of the restore process. All of these attributes are grouped together in the [`spec.dataSource.postgresCluster`]({{< relref "/references/crd#postgresclusterspecdatasourcepostgrescluster" >}}) section of the custom resource. - -Please review the table below to understand how each of these attributes work in the context of setting up a restore operation. - -- `spec.dataSource.postgresCluster.clusterName`: The name of the cluster that you are restoring from. This corresponds to the `metadata.name` attribute on a different `postgrescluster` custom resource. -- `spec.dataSource.postgresCluster.clusterNamespace`: The namespace of the cluster that you are restoring from. Used when the cluster exists in a different namespace. -- `spec.dataSource.postgresCluster.repoName`: The name of the pgBackRest repository from the `spec.dataSource.postgresCluster.clusterName` to use for the restore. Can be one of `repo1`, `repo2`, `repo3`, or `repo4`. The repository must exist in the other cluster. -- `spec.dataSource.postgresCluster.options`: Any additional [pgBackRest restore options](https://pgbackrest.org/command.html#command-restore) or general options that PGO allows. For example, you may want to set `--process-max` to help improve performance on larger databases; but you will not be able to set`--target-action`, since that option is currently disallowed. (PGO always sets it to `promote` if a `--target` is present, and otherwise leaves it blank.) -- `spec.dataSource.postgresCluster.resources`: Setting [resource limits and requests](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits) of the restore job can ensure that it runs efficiently. -- `spec.dataSource.postgresCluster.affinity`: Custom [Kubernetes affinity](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/) rules constrain the restore job so that it only runs on certain nodes. -- `spec.dataSource.postgresCluster.tolerations`: Custom [Kubernetes tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) allow the restore job to run on [tainted](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) nodes. - -Let's walk through some examples for how we can clone and restore our databases. - -## Clone a Postgres Cluster - -Let's create a clone of our [`hippo`]({{< relref "./create-cluster.md" >}}) cluster that we created previously. We know that our cluster is named `hippo` (based on its `metadata.name`) and that we only have a single backup repository called `repo1`. - -Let's call our new cluster `elephant`. We can create a clone of the `hippo` cluster using a manifest like this: - -``` -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: elephant -spec: - dataSource: - postgresCluster: - clusterName: hippo - repoName: repo1 - image: {{< param imageCrunchyPostgres >}} - postgresVersion: {{< param postgresVersion >}} - instances: - - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - backups: - pgbackrest: - image: {{< param imageCrunchyPGBackrest >}} - repos: - - name: repo1 - volume: - volumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi -``` - -Note this section of the spec: - -``` -spec: - dataSource: - postgresCluster: - clusterName: hippo - repoName: repo1 -``` - -This is the part that tells PGO to create the `elephant` cluster as an independent copy of the `hippo` cluster. - -The above is all you need to do to clone a Postgres cluster! PGO will work on creating a copy of your data on a new persistent volume claim (PVC) and work on initializing your cluster to spec. Easy! - -## Perform a Point-in-time-Recovery (PITR) - -Did someone drop the user table? You may want to perform a point-in-time-recovery (PITR) -to revert your database back to a state before a change occurred. Fortunately, PGO can help you do that. - -You can set up a PITR using the [restore](https://pgbackrest.org/command.html#command-restore) -command of [pgBackRest](https://www.pgbackrest.org), the backup management tool that powers -the disaster recovery capabilities of PGO. You will need to set a few options on -`spec.dataSource.postgresCluster.options` to perform a PITR. These options include: - -- `--type=time`: This tells pgBackRest to perform a PITR. -- `--target`: Where to perform the PITR to. An example recovery target is `2021-06-09 14:15:11-04`. - The timezone specified here as -04 for EDT. Please see the [pgBackRest documentation for other timezone options](https://pgbackrest.org/user-guide.html#pitr). -- `--set` (optional): Choose which backup to start the PITR from. - -A few quick notes before we begin: - -- To perform a PITR, you must have a backup that finished before your PITR time. - In other words, you can't perform a PITR back to a time where you do not have a backup! -- All relevant WAL files must be successfully pushed for the restore to complete correctly. -- Be sure to select the correct repository name containing the desired backup! - -With that in mind, let's use the `elephant` example above. Let's say we want to perform a point-in-time-recovery (PITR) to `2021-06-09 14:15:11-04`, we can use the following manifest: - -``` -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: elephant -spec: - dataSource: - postgresCluster: - clusterName: hippo - repoName: repo1 - options: - - --type=time - - --target="2021-06-09 14:15:11-04" - image: {{< param imageCrunchyPostgres >}} - postgresVersion: {{< param postgresVersion >}} - instances: - - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - backups: - pgbackrest: - image: {{< param imageCrunchyPGBackrest >}} - repos: - - name: repo1 - volume: - volumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi -``` - -The section to pay attention to is this: - -``` -spec: - dataSource: - postgresCluster: - clusterName: hippo - repoName: repo1 - options: - - --type=time - - --target="2021-06-09 14:15:11-04" -``` - -Notice how we put in the options to specify where to make the PITR. - -Using the above manifest, PGO will go ahead and create a new Postgres cluster that recovers -its data up until `2021-06-09 14:15:11-04`. At that point, the cluster is promoted and -you can start accessing your database from that specific point in time! - -## Perform an In-Place Point-in-time-Recovery (PITR) - -Similar to the PITR restore described above, you may want to perform a similar reversion -back to a state before a change occurred, but without creating another PostgreSQL cluster. -Fortunately, PGO can help you do this as well. - -You can set up a PITR using the [restore](https://pgbackrest.org/command.html#command-restore) -command of [pgBackRest](https://www.pgbackrest.org), the backup management tool that powers -the disaster recovery capabilities of PGO. You will need to set a few options on -`spec.backups.pgbackrest.restore.options` to perform a PITR. These options include: - -- `--type=time`: This tells pgBackRest to perform a PITR. -- `--target`: Where to perform the PITR to. An example recovery target is `2021-06-09 14:15:11-04`. -- `--set` (optional): Choose which backup to start the PITR from. - -A few quick notes before we begin: - -- To perform a PITR, you must have a backup that finished before your PITR time. - In other words, you can't perform a PITR back to a time where you do not have a backup! -- All relevant WAL files must be successfully pushed for the restore to complete correctly. -- Be sure to select the correct repository name containing the desired backup! - -To perform an in-place restore, users will first fill out the restore section of the spec as follows: - -``` -spec: - backups: - pgbackrest: - restore: - enabled: true - repoName: repo1 - options: - - --type=time - - --target="2021-06-09 14:15:11-04" -``` - -And to trigger the restore, you will then annotate the PostgresCluster as follows: - -``` -kubectl annotate -n postgres-operator postgrescluster hippo --overwrite \ - postgres-operator.crunchydata.com/pgbackrest-restore=id1 -``` - -And once the restore is complete, in-place restores can be disabled: - -``` -spec: - backups: - pgbackrest: - restore: - enabled: false -``` - -Notice how we put in the options to specify where to make the PITR. - -Using the above manifest, PGO will go ahead and re-create your Postgres cluster to recover -its data up until `2021-06-09 14:15:11-04`. At that point, the cluster is promoted and -you can start accessing your database from that specific point in time! - -## Restore Individual Databases - -You might need to restore specific databases from a cluster backup, for performance reasons -or to move selected databases to a machine that does not have enough space to restore the -entire cluster backup. - -{{% notice warning %}} -pgBackRest supports this case, but it is important to make sure this is what you want. -Restoring in this manner will restore the requested database from backup and make it -accessible, but all of the other databases in the backup will NOT be accessible after restore. - -For example, if your backup includes databases `test1`, `test2`, and `test3`, and you request that -`test2` be restored, the `test1` and `test3` databases will NOT be accessible after restore is completed. -Please review the pgBackRest documentation on the -[limitations on restoring individual databases](https://pgbackrest.org/user-guide.html#restore/option-db-include). -{{% /notice %}} - -You can restore individual databases from a backup using a spec similar to the following: - -```yaml -spec: - backups: - pgbackrest: - restore: - enabled: true - repoName: repo1 - options: - - --db-include=hippo -``` - -where `--db-include=hippo` would restore only the contents of the `hippo` database. - - -## Standby Cluster - -Advanced high-availability and disaster recovery strategies involve spreading your database clusters -across data centers to help maximize uptime. PGO provides ways to deploy postgresclusters that can -span multiple Kubernetes clusters using an external storage system or PostgreSQL streaming replication. -The [disaster recovery architecture]({{< relref "architecture/disaster-recovery.md" >}}) documentation -provides a high-level overview of standby clusters with PGO can be found in the [disaster recovery -architecture] documentation. - -### Creating a standby Cluster - -This tutorial section will describe how to create three different types of standby clusters, one -using an external storage system, one that is streaming data directly from the primary, and one that -takes advantage of both external storage and streaming. These example clusters can be created in the -same Kubernetes cluster, using a single PGO instance, or spread across different Kubernetes clusters -and PGO instances with the correct storage and networking configurations. - -#### Repo-based Standby - -A repo-based standby will recover from WAL files a pgBackRest repo stored in external storage. The -primary cluster should be created with a cloud-based [backup configuration]({{< relref "tutorial/backups.md" >}}). -The following manifest defines a Postgrescluster with `standby.enabled` set to true and `repoName` -configured to point to the `s3` repo configured in the primary: - -``` -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: hippo-standby -spec: - image: {{< param imageCrunchyPostgres >}} - postgresVersion: {{< param postgresVersion >}} - instances: - - dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } - backups: - pgbackrest: - image: {{< param imageCrunchyPGBackrest >}} - repos: - - name: repo1 - s3: - bucket: "my-bucket" - endpoint: "s3.ca-central-1.amazonaws.com" - region: "ca-central-1" - standby: - enabled: true - repoName: repo1 -``` - -#### Streaming Standby - -A streaming standby relies on an authenticated connection to the primary over the network. The primary -cluster should be accessible via the network and allow TLS authentication (TLS is enabled by default). -In the following manifest, we have `standby.enabled` set to `true` and have provided both the `host` -and `port` that point to the primary cluster. We have also defined `customTLSSecret` and -`customReplicationTLSSecret` to provide certs that allow the standby to authenticate to the primary. -For this type of standby, you must use [custom TLS]({{< relref "tutorial/customize-cluster.md" >}}#customize-tls): - -``` -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: hippo-standby -spec: - image: {{< param imageCrunchyPostgres >}} - postgresVersion: {{< param postgresVersion >}} - instances: - - dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } - backups: - pgbackrest: - repos: - - name: repo1 - volume: - volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } - customTLSSecret: - name: cluster-cert - customReplicationTLSSecret: - name: replication-cert - standby: - enabled: true - host: "192.0.2.2" - port: 5432 -``` - -#### Streaming Standby with an External Repo - -Another option is to create a standby cluster using an external pgBackRest repo that streams from the -primary. With this setup, the standby cluster will continue recovering from the pgBackRest repo if -streaming replication falls behind. In this manifest, we have enabled the settings from both previous -examples: - -``` -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: hippo-standby -spec: - image: {{< param imageCrunchyPostgres >}} - postgresVersion: {{< param postgresVersion >}} - instances: - - dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } - backups: - pgbackrest: - image: {{< param imageCrunchyPGBackrest >}} - repos: - - name: repo1 - s3: - bucket: "my-bucket" - endpoint: "s3.ca-central-1.amazonaws.com" - region: "ca-central-1" - customTLSSecret: - name: cluster-cert - customReplicationTLSSecret: - name: replication-cert - standby: - enabled: true - repoName: repo1 - host: "192.0.2.2" - port: 5432 -``` - -## Promoting a Standby Cluster - -At some point, you will want to promote the standby to start accepting both reads and writes. -This has the net effect of pushing WAL (transaction archives) to the pgBackRest repository, so we -need to ensure we don't accidentally create a split-brain scenario. Split-brain can happen if two -primary instances attempt to write to the same repository. If the primary cluster is still active, -make sure you [shutdown]({{< relref "tutorial/administrative-tasks.md" >}}#shutdown) the primary -before trying to promote the standby cluster. - -Once the primary is inactive, we can promote the standby cluster by removing or disabling its -`spec.standby` section: - -``` -spec: - standby: - enabled: false -``` - -This change triggers the promotion of the standby leader to a primary PostgreSQL -instance and the cluster begins accepting writes. - -## Clone From Backups Stored in S3 / GCS / Azure Blob Storage {#cloud-based-data-source} - -You can clone a Postgres cluster from backups that are stored in AWS S3 (or a storage system -that uses the S3 protocol), GCS, or Azure Blob Storage without needing an active Postgres cluster! -The method to do so is similar to how you clone from an existing PostgresCluster. This is useful -if you want to have a data set for people to use but keep it compressed on cheaper storage. - -For the purposes of this example, let's say that you created a Postgres cluster named `hippo` that -has its backups stored in S3 that looks similar to this: - -```yaml -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: hippo -spec: - image: {{< param imageCrunchyPostgres >}} - postgresVersion: {{< param postgresVersion >}} - instances: - - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - backups: - pgbackrest: - image: {{< param imageCrunchyPGBackrest >}} - configuration: - - secret: - name: pgo-s3-creds - global: - repo1-path: /pgbackrest/postgres-operator/hippo/repo1 - manual: - repoName: repo1 - options: - - --type=full - repos: - - name: repo1 - s3: - bucket: "my-bucket" - endpoint: "s3.ca-central-1.amazonaws.com" - region: "ca-central-1" -``` - -Ensure that the credentials in `pgo-s3-creds` match your S3 credentials. For more details on -[deploying a Postgres cluster using S3 for backups]({{< relref "./backups.md" >}}#using-s3), -please see the [Backups]({{< relref "./backups.md" >}}#using-s3) section of the tutorial. - -For optimal performance when creating a new cluster from an active cluster, ensure that you take a -recent full backup of the previous cluster. The above manifest is set up to take a full backup. -Assuming `hippo` is created in the `postgres-operator` namespace, you can trigger a full backup -with the following command: - -```shell -kubectl annotate -n postgres-operator postgrescluster hippo --overwrite \ - postgres-operator.crunchydata.com/pgbackrest-backup="$( date '+%F_%H:%M:%S' )" -``` - -Wait for the backup to complete. Once this is done, you can delete the Postgres cluster. - -Now, let's clone the data from the `hippo` backup into a new cluster called `elephant`. You can use a manifest similar to this: - -```yaml -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: elephant -spec: - image: {{< param imageCrunchyPostgres >}} - postgresVersion: {{< param postgresVersion >}} - dataSource: - pgbackrest: - stanza: db - configuration: - - secret: - name: pgo-s3-creds - global: - repo1-path: /pgbackrest/postgres-operator/hippo/repo1 - repo: - name: repo1 - s3: - bucket: "my-bucket" - endpoint: "s3.ca-central-1.amazonaws.com" - region: "ca-central-1" - instances: - - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - backups: - pgbackrest: - image: {{< param imageCrunchyPGBackrest >}} - configuration: - - secret: - name: pgo-s3-creds - global: - repo1-path: /pgbackrest/postgres-operator/elephant/repo1 - repos: - - name: repo1 - s3: - bucket: "my-bucket" - endpoint: "s3.ca-central-1.amazonaws.com" - region: "ca-central-1" -``` - -There are a few things to note in this manifest. First, note that the `spec.dataSource.pgbackrest` -object in our new PostgresCluster is very similar but slightly different from the old -PostgresCluster's `spec.backups.pgbackrest` object. The key differences are: - -1. No image is necessary when restoring from a cloud-based data source -2. `stanza` is a required field when restoring from a cloud-based data source -3. `backups.pgbackrest` has a `repos` field, which is an array -4. `dataSource.pgbackrest` has a `repo` field, which is a single object - -Note also the similarities: - -1. We are reusing the secret for both (because the new restore pod needs to have the same credentials as the original backup pod) -2. The `repo` object is the same -3. The `global` object is the same - -This is because the new restore pod for the `elephant` PostgresCluster will need to reuse the -configuration and credentials that were originally used in setting up the `hippo` PostgresCluster. - -In this example, we are creating a new cluster which is also backing up to the same S3 bucket; -only the `spec.backups.pgbackrest.global` field has changed to point to a different path. This -will ensure that the new `elephant` cluster will be pre-populated with the data from `hippo`'s -backups, but will backup to its own folders, ensuring that the original backup repository is -appropriately preserved. - -Deploy this manifest to create the `elephant` Postgres cluster. Observe that it comes up and running: - -``` -kubectl -n postgres-operator describe postgrescluster elephant -``` - -When it is ready, you will see that the number of expected instances matches the number of ready -instances, e.g.: - -``` -Instances: - Name: 00 - Ready Replicas: 1 - Replicas: 1 - Updated Replicas: 1 -``` - -The previous example shows how to use an existing S3 repository to pre-populate a PostgresCluster -while using a new S3 repository for backing up. But PostgresClusters that use cloud-based data -sources can also use local repositories. - -For example, assuming a PostgresCluster called `rhino` that was meant to pre-populate from the -original `hippo` PostgresCluster, the manifest would look like this: - -```yaml -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: rhino -spec: - image: {{< param imageCrunchyPostgres >}} - postgresVersion: {{< param postgresVersion >}} - dataSource: - pgbackrest: - stanza: db - configuration: - - secret: - name: pgo-s3-creds - global: - repo1-path: /pgbackrest/postgres-operator/hippo/repo1 - repo: - name: repo1 - s3: - bucket: "my-bucket" - endpoint: "s3.ca-central-1.amazonaws.com" - region: "ca-central-1" - instances: - - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - backups: - pgbackrest: - image: {{< param imageCrunchyPGBackrest >}} - repos: - - name: repo1 - volume: - volumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - -``` - -## Next Steps - -Now we've seen how to clone a cluster and perform a point-in-time-recovery, let's see how we can [monitor]({{< relref "./monitoring.md" >}}) our Postgres cluster to detect and prevent issues from occurring. diff --git a/docs/content/tutorial/getting-started.md b/docs/content/tutorial/getting-started.md deleted file mode 100644 index 3ca180f110..0000000000 --- a/docs/content/tutorial/getting-started.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: "Getting Started" -date: -draft: false -weight: 10 ---- - -If you have not done so, please install PGO by following the [quickstart]({{< relref "quickstart/_index.md" >}}#installation). - -As part of the installation, please be sure that you have done the following: - -1. [Forked the Postgres Operator examples repository](https://github.com/CrunchyData/postgres-operator-examples/fork) and cloned it to your host machine. -1. Installed PGO to the `postgres-operator` namespace. If you are inside your `postgres-operator-examples` directory, you can run the `kubectl apply --server-side -k kustomize/install/default` command. - -Note if you are using this guide in conjunction with images from the [Crunchy Data Customer Portal](https://access.crunchydata.com), please follow the [private registries]({{< relref "guides/private-registries.md" >}}) guide for additional setup instructions. - -Throughout this tutorial, we will be building on the example provided in the `kustomize/postgres`. - -When referring to a nested object within a YAML manifest, we will be using the `.` format similar to `kubectl explain`. For example, if we want to refer to the deepest element in this yaml file: - -``` -spec: - hippos: - appetite: huge -``` - -we would say `spec.hippos.appetite`. - -`kubectl explain` is your friend. You can use `kubectl explain postgrescluster` to introspect the `postgrescluster.postgres-operator.crunchydata.com` custom resource definition. You can also review the [CRD reference]({{< relref "references/crd.md" >}}). - -With PGO, the Postgres Operator installed, let's go and [create a Postgres cluster]({{< relref "./create-cluster.md" >}})! diff --git a/docs/content/tutorial/high-availability.md b/docs/content/tutorial/high-availability.md deleted file mode 100644 index e25467e875..0000000000 --- a/docs/content/tutorial/high-availability.md +++ /dev/null @@ -1,545 +0,0 @@ ---- -title: "High Availability" -date: -draft: false -weight: 40 ---- - -Postgres is known for its reliability: it is very stable and typically "just works." However, there are many things that can happen in a distributed environment like Kubernetes that can affect Postgres uptime, including: - -- The database storage disk fails or some other hardware failure occurs -- The network on which the database resides becomes unreachable -- The host operating system becomes unstable and crashes -- A key database file becomes corrupted -- A data center is lost -- A Kubernetes component (e.g. a Service) is accidentally deleted - -There may also be downtime events that are due to the normal case of operations, such as performing a minor upgrade, security patching of operating system, hardware upgrade, or other maintenance. - -The good news: PGO is prepared for this, and your Postgres cluster is protected from many of these scenarios. However, to maximize your high availability (HA), let's first scale up your Postgres cluster. - -## HA Postgres: Adding Replicas to your Postgres Cluster - -PGO provides several ways to add replicas to make a HA cluster: - -- Increase the `spec.instances.replicas` value -- Add an additional entry in `spec.instances` - -For the purposes of this tutorial, we will go with the first method and set `spec.instances.replicas` to `2`. Your manifest should look similar to: - -``` -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: hippo -spec: - image: {{< param imageCrunchyPostgres >}} - postgresVersion: {{< param postgresVersion >}} - instances: - - name: instance1 - replicas: 2 - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - backups: - pgbackrest: - image: {{< param imageCrunchyPGBackrest >}} - repos: - - name: repo1 - volume: - volumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi -``` - -Apply these updates to your Postgres cluster with the following command: - -``` -kubectl apply -k kustomize/postgres -``` - -Within moment, you should see a new Postgres instance initializing! You can see all of your Postgres Pods for the `hippo` cluster by running the following command: - -``` -kubectl -n postgres-operator get pods \ - --selector=postgres-operator.crunchydata.com/cluster=hippo,postgres-operator.crunchydata.com/instance-set -``` - -Let's test our high availability set up. - -## Testing Your HA Cluster - -An important part of building a resilient Postgres environment is testing its resiliency, so let's run a few tests to see how PGO performs under pressure! - -### Test #1: Remove a Service - -Let's try removing the primary Service that our application is connected to. This test does not actually require a HA Postgres cluster, but it will demonstrate PGO's ability to react to environmental changes and heal things to ensure your applications can stay up. - -Recall in the [connecting a Postgres cluster]({{< relref "./connect-cluster.md" >}}) that we observed the Services that PGO creates, e.g.: - -``` -kubectl -n postgres-operator get svc \ - --selector=postgres-operator.crunchydata.com/cluster=hippo -``` - -yields something similar to: - -``` -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -hippo-ha ClusterIP 10.103.73.92 5432/TCP 4h8m -hippo-ha-config ClusterIP None 4h8m -hippo-pods ClusterIP None 4h8m -hippo-primary ClusterIP None 5432/TCP 4h8m -hippo-replicas ClusterIP 10.98.110.215 5432/TCP 4h8m -``` - -We also mentioned that the application is connected to the `hippo-primary` Service. What happens if we were to delete this Service? - -``` -kubectl -n postgres-operator delete svc hippo-primary -``` - -This would seem like it could create a downtime scenario, but run the above selector again: - -``` -kubectl -n postgres-operator get svc \ - --selector=postgres-operator.crunchydata.com/cluster=hippo -``` - -You should see something similar to: - -``` -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -hippo-ha ClusterIP 10.103.73.92 5432/TCP 4h8m -hippo-ha-config ClusterIP None 4h8m -hippo-pods ClusterIP None 4h8m -hippo-primary ClusterIP None 5432/TCP 3s -hippo-replicas ClusterIP 10.98.110.215 5432/TCP 4h8m -``` - -Wow -- PGO detected that the primary Service was deleted and it recreated it! Based on how your application connects to Postgres, it may not have even noticed that this event took place! - -Now let's try a more extreme downtime event. - -### Test #2: Remove the Primary StatefulSet - -[StatefulSets](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) are a Kubernetes object that provide helpful mechanisms for managing Pods that interface with stateful applications, such as databases. They provide a stable mechanism for managing Pods to help ensure data is retrievable in a predictable way. - -What happens if we remove the StatefulSet that is pointed to the Pod that represents the Postgres primary? First, let's determine which Pod is the primary. We'll store it in an environmental variable for convenience. - -``` -PRIMARY_POD=$(kubectl -n postgres-operator get pods \ - --selector=postgres-operator.crunchydata.com/role=master \ - -o jsonpath='{.items[*].metadata.labels.postgres-operator\.crunchydata\.com/instance}') -``` - -Inspect the environmental variable to see which Pod is the current primary: - -``` -echo $PRIMARY_POD -``` - -should yield something similar to: - -``` -hippo-instance1-zj5s -``` - -We can use the value above to delete the StatefulSet associated with the current Postgres primary instance: - -``` -kubectl delete sts -n postgres-operator "${PRIMARY_POD}" -``` - -Let's see what happens. Try getting all of the StatefulSets for the Postgres instances in the `hippo` cluster: - -``` -kubectl get sts -n postgres-operator \ - --selector=postgres-operator.crunchydata.com/cluster=hippo,postgres-operator.crunchydata.com/instance -``` - -You should see something similar to: - -``` -NAME READY AGE -hippo-instance1-6kbw 1/1 15m -hippo-instance1-zj5s 0/1 1s -``` - -PGO recreated the StatefulSet that was deleted! After this "catastrophic" event, PGO proceeds to heal the Postgres instance so it can rejoin the cluster. We cover the high availability process in greater depth later in the documentation. - -What about the other instance? We can see that it became the new primary though the following command: - -``` -kubectl -n postgres-operator get pods \ - --selector=postgres-operator.crunchydata.com/role=master \ - -o jsonpath='{.items[*].metadata.labels.postgres-operator\.crunchydata\.com/instance}' -``` - -which should yield something similar to: - -``` -hippo-instance1-6kbw -``` - -You can test that the failover successfully occurred in a few ways. You can connect to the example Keycloak application that we [deployed in the previous section]({{< relref "./connect-cluster.md" >}}). Based on Keycloak's connection retry logic, you may need to wait a moment for it to reconnect, but you will see it connected and resume being able to read and write data. You can also connect to the Postgres instance directly and execute the following command: - -``` -SELECT NOT pg_catalog.pg_is_in_recovery() is_primary; -``` - -If it returns `true` (or `t`), then the Postgres instance is a primary! - -What if PGO was down during the downtime event? Failover would still occur: the Postgres HA system works independently of PGO and can maintain its own uptime. PGO will still need to assist with some of the healing aspects, but your application will still maintain read/write connectivity to your Postgres cluster! - -## Synchronous Replication - -PostgreSQL supports synchronous replication, which is a replication mode designed to limit the risk of transaction loss. Synchronous replication waits for a transaction to be written to at least one additional server before it considers the transaction to be committed. For more information on synchronous replication, please read about PGO's [high availability architecture]({{}}#synchronous-replication-guarding-against-transactions-loss) - -To add synchronous replication to your Postgres cluster, you can add the following to your spec: - -```yaml -spec: - patroni: - dynamicConfiguration: - synchronous_mode: true -``` - -While PostgreSQL defaults [`synchronous_commit`](https://www.postgresql.org/docs/current/runtime-config-wal.html#GUC-SYNCHRONOUS-COMMIT) to `on`, you may also want to explicitly set it, in which case the above block becomes: - -```yaml -spec: - patroni: - dynamicConfiguration: - synchronous_mode: true - postgresql: - parameters: - synchronous_commit: "on" -``` - -Note that Patroni, which manages many aspects of the cluster's availability, will favor availability over synchronicity. This means that if a synchronous replica goes down, Patroni will allow for asynchronous replication to continue as well as writes to the primary. However, if you want to disable all writing if there are no synchronous replicas available, you would have to enable `synchronous_mode_strict`, i.e.: - -```yaml -spec: - patroni: - dynamicConfiguration: - synchronous_mode: true - synchronous_mode_strict: true -``` - -## Affinity - -[Kubernetes affinity](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/) rules, which include Pod anti-affinity and Node affinity, can help you to define where you want your workloads to reside. Pod anti-affinity is important for high availability: when used correctly, it ensures that your Postgres instances are distributed amongst different Nodes. Node affinity can be used to assign instances to specific Nodes, e.g. to utilize hardware that's optimized for databases. - -### Understanding Pod Labels - -PGO sets up several labels for Postgres cluster management that can be used for Pod anti-affinity or affinity rules in general. These include: - -- `postgres-operator.crunchydata.com/cluster`: This is assigned to all managed Pods in a Postgres cluster. The value of this label is the name of your Postgres cluster, in this case: `hippo`. -- `postgres-operator.crunchydata.com/instance-set`: This is assigned to all Postgres instances within a group of `spec.instances`. In the example above, the value of this label is `instance1`. If you do not assign a label, the value is automatically set by PGO using a `NN` format, e.g. `00`. -- `postgres-operator.crunchydata.com/instance`: This is a unique label assigned to each Postgres instance containing the name of the Postgres instance. - -Let's look at how we can set up affinity rules for our Postgres cluster to help improve high availability. - -### Pod Anti-affinity - -Kubernetes has two types of Pod anti-affinity: - -- Preferred: With preferred (`preferredDuringSchedulingIgnoredDuringExecution`) Pod anti-affinity, Kubernetes will make a best effort to schedule Pods matching the anti-affinity rules to different Nodes. However, if it is not possible to do so, then Kubernetes may schedule one or more Pods to the same Node. -- Required: With required (`requiredDuringSchedulingIgnoredDuringExecution`) Pod anti-affinity, Kubernetes mandates that each Pod matching the anti-affinity rules **must** be scheduled to different Nodes. However, a Pod may not be scheduled if Kubernetes cannot find a Node that does not contain a Pod matching the rules. - -There is a trade-off with these two types of pod anti-affinity: while "required" anti-affinity will ensure that all the matching Pods are scheduled on different Nodes, if Kubernetes cannot find an available Node, your Postgres instance may not be scheduled. Likewise, while "preferred" anti-affinity will make a best effort to scheduled your Pods on different Nodes, Kubernetes may compromise and schedule more than one Postgres instance of the same cluster on the same Node. - -By understanding these trade-offs, the makeup of your Kubernetes cluster, and your requirements, you can choose the method that makes the most sense for your Postgres deployment. We'll show examples of both methods below! - -#### Using Preferred Pod Anti-Affinity - -First, let's deploy our Postgres cluster with preferred Pod anti-affinity. Note that if you have a single-node Kubernetes cluster, you will not see your Postgres instances deployed to different nodes. However, your Postgres instances _will_ be deployed. - -We can set up our HA Postgres cluster with preferred Pod anti-affinity like so: - -``` -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: hippo -spec: - image: {{< param imageCrunchyPostgres >}} - postgresVersion: {{< param postgresVersion >}} - instances: - - name: instance1 - replicas: 2 - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 1 - podAffinityTerm: - topologyKey: kubernetes.io/hostname - labelSelector: - matchLabels: - postgres-operator.crunchydata.com/cluster: hippo - postgres-operator.crunchydata.com/instance-set: instance1 - backups: - pgbackrest: - image: {{< param imageCrunchyPGBackrest >}} - repos: - - name: repo1 - volume: - volumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi -``` - -Apply those changes in your Kubernetes cluster. - -Let's take a closer look at this section: - -``` -affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 1 - podAffinityTerm: - topologyKey: kubernetes.io/hostname - labelSelector: - matchLabels: - postgres-operator.crunchydata.com/cluster: hippo - postgres-operator.crunchydata.com/instance-set: instance1 -``` - -`spec.instances.affinity.podAntiAffinity` follows the standard Kubernetes [Pod anti-affinity spec](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/). The values for the `matchLabels` are derived from what we described in the previous section: `postgres-operator.crunchydata.com/cluster` is set to our cluster name of `hippo`, and `postgres-operator.crunchydata.com/instance-set` is set to the instance set name of `instance1`. We choose a `topologyKey` of `kubernetes.io/hostname`, which is standard in Kubernetes clusters. - -Preferred Pod anti-affinity will perform a best effort to schedule your Postgres Pods to different nodes. Let's see how you can require your Postgres Pods to be scheduled to different nodes. - -#### Using Required Pod Anti-Affinity - -Required Pod anti-affinity forces Kubernetes to scheduled your Postgres Pods to different Nodes. Note that if Kubernetes is unable to schedule all Pods to different Nodes, some of your Postgres instances may become unavailable. - -Using the previous example, let's indicate to Kubernetes that we want to use required Pod anti-affinity for our Postgres clusters: - -``` -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: hippo -spec: - image: {{< param imageCrunchyPostgres >}} - postgresVersion: {{< param postgresVersion >}} - instances: - - name: instance1 - replicas: 2 - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - topologyKey: kubernetes.io/hostname - labelSelector: - matchLabels: - postgres-operator.crunchydata.com/cluster: hippo - postgres-operator.crunchydata.com/instance-set: instance1 - backups: - pgbackrest: - image: {{< param imageCrunchyPGBackrest >}} - repos: - - name: repo1 - volume: - volumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi -``` - -Apply those changes in your Kubernetes cluster. - -If you are in a single Node Kubernetes clusters, you will notice that not all of your Postgres instance Pods will be scheduled. This is due to the `requiredDuringSchedulingIgnoredDuringExecution` preference. However, if you have enough Nodes available, you will see the Postgres instance Pods scheduled to different Nodes: - -``` -kubectl get pods -n postgres-operator -o wide \ - --selector=postgres-operator.crunchydata.com/cluster=hippo,postgres-operator.crunchydata.com/instance -``` - -### Node Affinity - -Node affinity can be used to assign your Postgres instances to Nodes with specific hardware or to guarantee a Postgres instance resides in a specific zone. Node affinity can be set within the `spec.instances.affinity.nodeAffinity` attribute, following the standard Kubernetes [node affinity spec](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/). - -Let's see an example with required Node affinity. Let's say we have a set of Nodes that are reserved for database usage that have a label `workload-role=db`. We can create a Postgres cluster with a required Node affinity rule to scheduled all of the databases to those Nodes using the following configuration: - -``` -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: hippo -spec: - image: {{< param imageCrunchyPostgres >}} - postgresVersion: {{< param postgresVersion >}} - instances: - - name: instance1 - replicas: 2 - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: workload-role - operator: In - values: - - db - backups: - pgbackrest: - image: {{< param imageCrunchyPGBackrest >}} - repos: - - name: repo1 - volume: - volumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi -``` - -## Pod Topology Spread Constraints - -In addition to affinity and anti-affinity settings, [Kubernetes Pod Topology Spread Constraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/) can also help you to define where you want your workloads to reside. However, while PodAffinity allows any number of Pods to be added to a qualifying topology domain, and PodAntiAffinity allows only one Pod to be scheduled into a single topology domain, topology spread constraints allow you to distribute Pods across different topology domains with a finer level of control. - -### API Field Configuration - -The spread constraint [API fields](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods) can be configured for instance, PgBouncer and pgBackRest repo host pods. The basic configuration is as follows: - -``` - topologySpreadConstraints: - - maxSkew: - topologyKey: - whenUnsatisfiable: - labelSelector: -``` - -where "maxSkew" describes the maximum degree to which Pods can be unevenly distributed, "topologyKey" is the key that defines a topology in the Nodes' Labels, "whenUnsatisfiable" specifies what action should be taken when "maxSkew" can't be satisfied, and "labelSelector" is used to find matching Pods. - -### Example Spread Constraints - -To help illustrate how you might use this with your cluster, we can review examples for configuring spread constraints on our Instance and pgBackRest repo host Pods. For this example, assume we have a three node Kubernetes cluster where the first node is labeled with `my-node-label=one`, the second node is labeled with `my-node-label=two` and the final node is labeled `my-node-label=three`. The label key `my-node-label` will function as our `topologyKey`. Note all three nodes in our examples will be schedulable, so a Pod could live on any of the three Nodes. - -#### Instance Pod Spread Constraints - -To begin, we can set our topology spread constraints on our cluster Instance Pods. Given this configuration - -``` - instances: - - name: instance1 - replicas: 5 - topologySpreadConstraints: - - maxSkew: 1 - topologyKey: my-node-label - whenUnsatisfiable: DoNotSchedule - labelSelector: - matchLabels: - postgres-operator.crunchydata.com/instance-set: instance1 -``` - -we will expect 5 Instance pods to be created. Each of these Pods will have the standard `postgres-operator.crunchydata.com/instance-set: instance1` Label set, so each Pod will be properly counted when determining the `maxSkew`. Since we have 3 nodes with a `maxSkew` of 1 and we've set `whenUnsatisfiable` to `DoNotSchedule`, we should see 2 Pods on 2 of the nodes and 1 Pod on the remaining Node, thus ensuring our Pods are distributed as evenly as possible. - -#### pgBackRest Repo Pod Spread Constraints - -We can also set topology spread constraints on our cluster's pgBackRest repo host pod. While we normally will only have a single pod per cluster, we could use a more generic label to add a preference that repo host Pods from different clusters are distributed among our Nodes. For example, by setting our `matchLabel` value to `postgres-operator.crunchydata.com/pgbackrest: ""` and our `whenUnsatisfiable` value to `ScheduleAnyway`, we will allow our repo host Pods to be scheduled no matter what Nodes may be available, but attempt to minimize skew as much as possible. - -``` - repoHost: - topologySpreadConstraints: - - maxSkew: 1 - topologyKey: my-node-label - whenUnsatisfiable: ScheduleAnyway - labelSelector: - matchLabels: - postgres-operator.crunchydata.com/pgbackrest: "" -``` - -#### Putting it All Together - -Now that each of our Pods has our desired Topology Spread Constraints defined, let's put together a complete cluster definition: - -``` -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: hippo -spec: - image: {{< param imageCrunchyPostgres >}} - postgresVersion: {{< param postgresVersion >}} - instances: - - name: instance1 - replicas: 5 - topologySpreadConstraints: - - maxSkew: 1 - topologyKey: my-node-label - whenUnsatisfiable: DoNotSchedule - labelSelector: - matchLabels: - postgres-operator.crunchydata.com/instance-set: instance1 - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1G - backups: - pgbackrest: - image: {{< param imageCrunchyPGBackrest >}} - repoHost: - topologySpreadConstraints: - - maxSkew: 1 - topologyKey: my-node-label - whenUnsatisfiable: ScheduleAnyway - labelSelector: - matchLabels: - postgres-operator.crunchydata.com/pgbackrest: "" - repos: - - name: repo1 - volume: - volumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1G -``` - -You can then apply those changes in your Kubernetes cluster. - -Once your cluster finishes deploying, you can check that your Pods are assigned to the correct Nodes: - -``` -kubectl get pods -n postgres-operator -o wide --selector=postgres-operator.crunchydata.com/cluster=hippo -``` - -## Next Steps - -We've now seen how PGO helps your application stay "always on" with your Postgres database. Now let's explore how PGO can minimize or eliminate downtime for operations that would normally cause that, such as [resizing your Postgres cluster]({{< relref "./resize-cluster.md" >}}). diff --git a/docs/content/tutorial/monitoring.md b/docs/content/tutorial/monitoring.md deleted file mode 100644 index fa4ce3185c..0000000000 --- a/docs/content/tutorial/monitoring.md +++ /dev/null @@ -1,86 +0,0 @@ ---- -title: "Monitoring" -date: -draft: false -weight: 90 ---- - -While having [high availability]({{< relref "tutorial/high-availability.md" >}}) and -[disaster recovery]({{< relref "tutorial/disaster-recovery.md" >}}) systems in place helps in the -event of something going wrong with your PostgreSQL cluster, monitoring helps you anticipate -problems before they happen. Additionally, monitoring can help you diagnose and resolve issues that -may cause degraded performance rather than downtime. - -Let's look at how PGO allows you to enable monitoring in your cluster. - -## Adding the Exporter Sidecar - -Let's look at how we can add the Crunchy PostgreSQL Exporter sidecar to your cluster using the -`kustomize/postgres` example in the [Postgres Operator examples] repository. - -Monitoring tools are added using the `spec.monitoring` section of the custom resource. Currently, -the only monitoring tool supported is the Crunchy PostgreSQL Exporter configured with [pgMonitor]. - -In the `kustomize/postgres/postgres.yaml` file, add the following YAML to the spec: - -``` -monitoring: - pgmonitor: - exporter: - image: {{< param imageCrunchyExporter >}} -``` - -Save your changes and run: - -``` -kubectl apply -k kustomize/postgres -``` - -PGO will detect the change and add the Exporter sidecar to all Postgres Pods that exist in your -cluster. PGO will also do the work to allow the Exporter to connect to the database and gather -metrics that can be accessed using the [PGO Monitoring] stack. - -### Configuring TLS Encryption for the Exporter - -PGO allows you to configure the exporter sidecar to use TLS encryption. If you provide a custom TLS -Secret via the exporter spec: - -``` - monitoring: - pgmonitor: - exporter: - customTLSSecret: - name: hippo.tls -``` - -Like other custom TLS Secrets that can be configured with PGO, the Secret will need to be created in -the same Namespace as your PostgresCluster. It should also contain the TLS key (`tls.key`) and TLS -certificate (`tls.crt`) needed to enable encryption. - -``` -data: - tls.crt: - tls.key: -``` - -After you configure TLS for the exporter, you will need to update your Prometheus deployment to use -TLS, and your connection to the exporter will be encrypted. Check out the [Prometheus] documentation -for more information on configuring TLS for [Prometheus]. - -## Accessing the Metrics - -Once the Crunchy PostgreSQL Exporter has been enabled in your cluster, follow the steps outlined in -[PGO Monitoring] to install the monitoring stack. This will allow you to deploy a [pgMonitor] -configuration of [Prometheus], [Grafana], and [Alertmanager] monitoring tools in Kubernetes. These -tools will be set up by default to connect to the Exporter containers on your Postgres Pods. - -## Next Steps - -Now that we can monitor our cluster, let's explore how [connection pooling]({{< relref "connection-pooling.md" >}}) can be enabled using PGO and how it is helpful. - -[pgMonitor]: https://github.com/CrunchyData/pgmonitor -[Grafana]: https://grafana.com/ -[Prometheus]: https://prometheus.io/ -[Alertmanager]: https://prometheus.io/docs/alerting/latest/alertmanager/ -[PGO Monitoring]: {{< relref "installation/monitoring/_index.md" >}} -[Postgres Operator examples]: https://github.com/CrunchyData/postgres-operator-examples/fork diff --git a/docs/content/tutorial/resize-cluster.md b/docs/content/tutorial/resize-cluster.md deleted file mode 100644 index 3aaa8c46d3..0000000000 --- a/docs/content/tutorial/resize-cluster.md +++ /dev/null @@ -1,352 +0,0 @@ ---- -title: "Resize a Postgres Cluster" -date: -draft: false -weight: 50 ---- - -You did it -- the application is a success! Traffic is booming, so much so that you need to add more resources to your Postgres cluster. However, you're worried that any resize operation may cause downtime and create a poor experience for your end users. - -This is where PGO comes in: PGO will help orchestrate rolling out any potentially disruptive changes to your cluster to minimize or eliminate and downtime for your application. To do so, we will assume that you have [deployed a high availability Postgres cluster]({{< relref "./high-availability.md" >}}) as described in the [previous section]({{< relref "./high-availability.md" >}}). - -Let's dive in. - -## Resize Memory and CPU - -Memory and CPU resources are an important component for vertically scaling your Postgres cluster. -Coupled with [tweaks to your Postgres configuration file]({{< relref "./customize-cluster.md" >}}), -allocating more memory and CPU to your cluster can help it to perform better under load. - -It's important for instances in the same high availability set to have the same resources. -PGO lets you adjust CPU and memory within the `resources` sections of the `postgresclusters.postgres-operator.crunchydata.com` custom resource. These include: - -- `spec.instances.resources` section, which sets the resource values for the PostgreSQL container, - as well as any init containers in the associated pod and containers created by the `pgDataVolume` and `pgWALVolume` [data migration jobs]({{< relref "guides/data-migration.md" >}}). -- `spec.instances.sidecars.replicaCertCopy.resources` section, which sets the resources for the `replica-cert-copy` sidecar container. -- `spec.monitoring.pgmonitor.exporter.resources` section, which sets the resources for the `exporter` sidecar container. -- `spec.backups.pgbackrest.repoHost.resources` section, which sets the resources for the pgBackRest repo host container, - as well as any init containers in the associated pod and containers created by the `pgBackRestVolume` [data migration job]({{< relref "guides/data-migration.md" >}}). -- `spec.backups.pgbackrest.sidecars.pgbackrest.resources` section, which sets the resources for the `pgbackrest` sidecar container. -- `spec.backups.pgbackrest.sidecars.pgbackrestConfig.resources` section, which sets the resources for the `pgbackrest-config` sidecar container. -- `spec.backups.pgbackrest.jobs.resources` section, which sets the resources for any pgBackRest backup job. -- `spec.backups.pgbackrest.restore.resources` section, which sets the resources for manual pgBackRest restore jobs. -- `spec.dataSource.postgresCluster.resources` section, which sets the resources for pgBackRest restore jobs created during the [cloning]({{< relref "./disaster-recovery.md" >}}) process. -- `spec.proxy.pgBouncer.resources` section, which sets the resources for the `pgbouncer` container. -- `spec.proxy.pgBouncer.sidecars.pgbouncerConfig.resources` section, which sets the resources for the `pgbouncer-config` sidecar container. - -The layout of these `resources` sections should be familiar: they follow the same pattern as the standard Kubernetes structure for setting [container resources](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/). Note that these settings also allow for the configuration of [QoS classes](https://kubernetes.io/docs/tasks/configure-pod-container/quality-service-pod/). - -For example, using the `spec.instances.resources` section, let's say we want to update our `hippo` Postgres cluster so that each instance has a limit of `2.0` CPUs and `4Gi` of memory. We can make the following changes to the manifest: - -``` -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: hippo -spec: - image: {{< param imageCrunchyPostgres >}} - postgresVersion: {{< param postgresVersion >}} - instances: - - name: instance1 - replicas: 2 - resources: - limits: - cpu: 2.0 - memory: 4Gi - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - backups: - pgbackrest: - image: {{< param imageCrunchyPGBackrest >}} - repos: - - name: repo1 - volume: - volumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi -``` - -In particular, we added the following to `spec.instances`: - -``` -resources: - limits: - cpu: 2.0 - memory: 4Gi -``` - -Apply these updates to your Postgres cluster with the following command: - -``` -kubectl apply -k kustomize/postgres -``` - -Now, let's watch how the rollout happens: - -``` -watch "kubectl -n postgres-operator get pods \ - --selector=postgres-operator.crunchydata.com/cluster=hippo,postgres-operator.crunchydata.com/instance \ - -o=jsonpath='{range .items[*]}{.metadata.name}{\"\t\"}{.metadata.labels.postgres-operator\.crunchydata\.com/role}{\"\t\"}{.status.phase}{\"\t\"}{.spec.containers[].resources.limits}{\"\n\"}{end}'" -``` - -Observe how each Pod is terminated one-at-a-time. This is part of a "rolling update". Because updating the resources of a Pod is a destructive action, PGO first applies the CPU and memory changes to the replicas. PGO ensures that the changes are successfully applied to a replica instance before moving on to the next replica. - -Once all of the changes are applied, PGO will perform a "controlled switchover": it will promote a replica to become a primary, and apply the changes to the final Postgres instance. - -By rolling out the changes in this way, PGO ensures there is minimal to zero disruption to your application: you are able to successfully roll out updates and your users may not even notice! - -## Resize PVC - -Your application is a success! Your data continues to grow, and it's becoming apparently that you need more disk. That's great: you can resize your PVC directly on your `postgresclusters.postgres-operator.crunchydata.com` custom resource with minimal to zero downtime. - -PVC resizing, also known as [volume expansion](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#expanding-persistent-volumes-claims), is a function of your storage class: it must support volume resizing. Additionally, PVCs can only be **sized up**: you cannot shrink the size of a PVC. - -You can adjust PVC sizes on all of the managed storage instances in a Postgres instance that are using Kubernetes storage. These include: - -- `spec.instances.dataVolumeClaimSpec.resources.requests.storage`: The Postgres data directory (aka your database). -- `spec.backups.pgbackrest.repos.volume.volumeClaimSpec.resources.requests.storage`: The pgBackRest repository when using "volume" storage - -The above should be familiar: it follows the same pattern as the standard [Kubernetes PVC](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) structure. - -For example, let's say we want to update our `hippo` Postgres cluster so that each instance now uses a `10Gi` PVC and our backup repository uses a `20Gi` PVC. We can do so with the following markup: - -``` -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: hippo -spec: - image: {{< param imageCrunchyPostgres >}} - postgresVersion: {{< param postgresVersion >}} - instances: - - name: instance1 - replicas: 2 - resources: - limits: - cpu: 2.0 - memory: 4Gi - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 10Gi - backups: - pgbackrest: - image: {{< param imageCrunchyPGBackrest >}} - repos: - - name: repo1 - volume: - volumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 20Gi -``` - -In particular, we added the following to `spec.instances`: - -``` -dataVolumeClaimSpec: - resources: - requests: - storage: 10Gi -``` - -and added the following to `spec.backups.pgbackrest.repos.volume`: - -``` -volumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 20Gi -``` - -Apply these updates to your Postgres cluster with the following command: - -``` -kubectl apply -k kustomize/postgres -``` - -### Resize PVCs With StorageClass That Does Not Allow Expansion - -Not all Kubernetes Storage Classes allow for [volume expansion](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#expanding-persistent-volumes-claims). However, with PGO, you can still resize your Postgres cluster data volumes even if your storage class does not allow it! - -Let's go back to the previous example: - -```yaml -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: hippo -spec: - image: {{< param imageCrunchyPostgres >}} - postgresVersion: {{< param postgresVersion >}} - instances: - - name: instance1 - replicas: 2 - resources: - limits: - cpu: 2.0 - memory: 4Gi - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - backups: - pgbackrest: - image: {{< param imageCrunchyPGBackrest >}} - repos: - - name: repo1 - volume: - volumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 20Gi -``` - -First, create a new instance that has the larger volume size. Call this instance `instance2`. The manifest would look like this: - -```yaml -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: hippo -spec: - image: {{< param imageCrunchyPostgres >}} - postgresVersion: {{< param postgresVersion >}} - instances: - - name: instance1 - replicas: 2 - resources: - limits: - cpu: 2.0 - memory: 4Gi - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - - name: instance2 - replicas: 2 - resources: - limits: - cpu: 2.0 - memory: 4Gi - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 10Gi - backups: - pgbackrest: - image: {{< param imageCrunchyPGBackrest >}} - repos: - - name: repo1 - volume: - volumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 20Gi -``` - -Take note of the block that contains `instance2`: - -```yaml -- name: instance2 - replicas: 2 - resources: - limits: - cpu: 2.0 - memory: 4Gi - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 10Gi -``` - -This creates a second set of two Postgres instances, both of which come up as replicas, that have a larger PVC. - -Once this new instance set is available and they are caught to the primary, you can then apply the following manifest: - -```yaml -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: hippo -spec: - image: {{< param imageCrunchyPostgres >}} - postgresVersion: {{< param postgresVersion >}} - instances: - - name: instance2 - replicas: 2 - resources: - limits: - cpu: 2.0 - memory: 4Gi - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 10Gi - backups: - pgbackrest: - image: {{< param imageCrunchyPGBackrest >}} - repos: - - name: repo1 - volume: - volumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 20Gi -``` - -This will promote one of the instances with the larger PVC to be the new primary and remove the instances with the smaller PVCs! - -This method can also be used to shrink PVCs to use a smaller amount. - -## Troubleshooting - -### Postgres Pod Can't Be Scheduled - -There are many reasons why a PostgreSQL Pod may not be scheduled: - -- **Resources are unavailable**. Ensure that you have a Kubernetes [Node](https://kubernetes.io/docs/concepts/architecture/nodes/) with enough resources to satisfy your memory or CPU Request. -- **PVC cannot be provisioned**. Ensure that you request a PVC size that is available, or that your PVC storage class is set up correctly. - -### PVCs Do Not Resize - -Ensure that your storage class supports PVC resizing. You can check that by inspecting the `allowVolumeExpansion` attribute: - -``` -kubectl get sc -``` - -If the storage class does not support PVC resizing, you can use the technique described above to resize PVCs using a second instance set. - -## Next Steps - -You've now resized your Postgres cluster, but how can you configure Postgres to take advantage of the new resources? Let's look at how we can [customize the Postgres cluster configuration]({{< relref "./customize-cluster.md" >}}). diff --git a/docs/content/tutorial/update-cluster.md b/docs/content/tutorial/update-cluster.md deleted file mode 100644 index 0bd0cd047f..0000000000 --- a/docs/content/tutorial/update-cluster.md +++ /dev/null @@ -1,62 +0,0 @@ ---- -title: "Apply Software Updates" -date: -draft: false -weight: 70 ---- - -Did you know that Postgres releases bug fixes [once every three months](https://www.postgresql.org/developer/roadmap/)? Additionally, we periodically refresh the container images to ensure the base images have the latest software that may fix some CVEs. - -It's generally good practice to keep your software up-to-date for stability and security purposes, so let's learn how PGO helps to you accept low risk, "patch" type updates. - -The good news: you do not need to update PGO itself to apply component updates: you can update each Postgres cluster whenever you want to apply the update! This lets you choose when you want to apply updates to each of your Postgres clusters, so you can update it on your own schedule. If you have a [high availability Postgres]({{< relref "./high-availability.md" >}}) cluster, PGO uses a rolling update to minimize or eliminate any downtime for your application. - -## Applying Minor Postgres Updates - -The Postgres image is referenced using the `spec.image` and looks similar to the below: - -``` -spec: - image: registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-14.2-0 -``` - -Diving into the tag a bit further, you will notice the `14.2-0` portion. This represents the Postgres minor version (`14.2`) and the patch number of the release `0`. If the patch number is incremented (e.g. `14.2-1`), this means that the container is rebuilt, but there are no changes to the Postgres version. If the minor version is incremented (e.g. `14.2-0`), this means that there is a newer bug fix release of Postgres within the container. - -To update the image, you just need to modify the `spec.image` field with the new image reference, e.g. - -``` -spec: - image: registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-14.2-1 -``` - -You can apply the changes using `kubectl apply`. Similar to the rolling update example when we [resized the cluster]({{< relref "./resize-cluster.md" >}}), the update is first applied to the Postgres replicas, then a controlled switchover occurs, and the final instance is updated. - -For the `hippo` cluster, you can see the status of the rollout by running the command below: - -``` -kubectl -n postgres-operator get pods \ - --selector=postgres-operator.crunchydata.com/cluster=hippo,postgres-operator.crunchydata.com/instance \ - -o=jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.metadata.labels.postgres-operator\.crunchydata\.com/role}{"\t"}{.status.phase}{"\t"}{.spec.containers[].image}{"\n"}{end}' -``` - -or by running a watch: - -``` -watch "kubectl -n postgres-operator get pods \ - --selector=postgres-operator.crunchydata.com/cluster=hippo,postgres-operator.crunchydata.com/instance \ - -o=jsonpath='{range .items[*]}{.metadata.name}{\"\t\"}{.metadata.labels.postgres-operator\.crunchydata\.com/role}{\"\t\"}{.status.phase}{\"\t\"}{.spec.containers[].image}{\"\n\"}{end}'" -``` - -## Rolling Back Minor Postgres Updates - -This methodology also allows you to rollback changes from minor Postgres updates. You can change the `spec.image` field to your desired container image. PGO will then ensure each Postgres instance in the cluster rolls back to the desired image. - -## Applying Other Component Updates - -There are other components that go into a PGO Postgres cluster. These include pgBackRest, PgBouncer and others. Each one of these components has its own image: for example, you can find a reference to the pgBackRest image in the `spec.backups.pgbackrest.image` attribute. - -Applying software updates for the other components in a Postgres cluster works similarly to the above. As pgBackRest and PgBouncer are Kubernetes [Deployments](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/), Kubernetes will help manage the rolling update to minimize disruption. - -## Next Steps - -Now that we know how to update our software components, let's look at how PGO handles [disaster recovery]({{< relref "./backups.md" >}})! diff --git a/docs/content/tutorial/user-management.md b/docs/content/tutorial/user-management.md deleted file mode 100644 index 33954e7a73..0000000000 --- a/docs/content/tutorial/user-management.md +++ /dev/null @@ -1,116 +0,0 @@ ---- -title: "User / Database Management" -date: -draft: false -weight: 65 ---- - -PGO comes with some out-of-the-box conveniences for managing users and databases in your Postgres cluster. However, you may have requirements where you need to create additional users, adjust user privileges or add additional databases to your cluster. - -For detailed information for how user and database management works in PGO, please see the [User Management]({{< relref "architecture/user-management.md" >}}) section of the architecture guide. - -## Creating a New User - -You can create a new user with the following snippet in the `postgrescluster` custom resource. Let's add this to our `hippo` database: - -``` -spec: - users: - - name: rhino -``` - -You can now apply the changes and see that the new user is created. Note the following: - -- The user would only be able to connect to the default `postgres` database. -- The user will not have any connection credentials populated into the `hippo-pguser-rhino` Secret. -- The user is unprivileged. - -Let's create a new database named `zoo` that we will let the `rhino` user access: - -``` -spec: - users: - - name: rhino - databases: - - zoo -``` - -Inspect the `hippo-pguser-rhino` Secret. You should now see that the `dbname` and `uri` fields are now populated! - -We can set role privileges by using the standard [role attributes](https://www.postgresql.org/docs/current/role-attributes.html) that Postgres provides and adding them to the `spec.users.options`. Let's say we want the rhino to become a superuser (be careful about doling out Postgres superuser privileges!). You can add the following to the spec: - -``` -spec: - users: - - name: rhino - databases: - - zoo - options: "SUPERUSER" -``` - -There you have it: we have created a Postgres user named `rhino` with superuser privileges that has access to the `rhino` database (though a superuser has access to all databases!). - -## Adjusting Privileges - -Let's say you want to revoke the superuser privilege from `rhino`. You can do so with the following: - -``` -spec: - users: - - name: rhino - databases: - - zoo - options: "NOSUPERUSER" -``` - -If you want to add multiple privileges, you can add each privilege with a space between them in `options`, e.g.: - -``` -spec: - users: - - name: rhino - databases: - - zoo - options: "CREATEDB CREATEROLE" -``` - -## Managing the `postgres` User - -By default, PGO does not give you access to the `postgres` user. However, you can get access to this account by doing the following: - -``` -spec: - users: - - name: postgres -``` - -This will create a Secret of the pattern `-pguser-postgres` that contains the credentials of the `postgres` account. For our `hippo` cluster, this would be `hippo-pguser-postgres`. - -## Deleting a User - -PGO does not delete users automatically: after you remove the user from the spec, it will still exist in your cluster. To remove a user and all of its objects, as a superuser you will need to run [`DROP OWNED`](https://www.postgresql.org/docs/current/sql-drop-owned.html) in each database the user has objects in, and [`DROP ROLE`](https://www.postgresql.org/docs/current/sql-droprole.html) -in your Postgres cluster. - -For example, with the above `rhino` user, you would run the following: - -``` -DROP OWNED BY rhino; -DROP ROLE rhino; -``` - -Note that you may need to run `DROP OWNED BY rhino CASCADE;` based upon your object ownership structure -- be very careful with this command! - -## Deleting a Database - -PGO does not delete databases automatically: after you remove all instances of the database from the spec, it will still exist in your cluster. To completely remove the database, you must run the [`DROP DATABASE`](https://www.postgresql.org/docs/current/sql-dropdatabase.html) -command as a Postgres superuser. - -For example, to remove the `zoo` database, you would execute the following: - -``` -DROP DATABASE zoo; -``` - -## Next Steps - -You now know how to manage users and databases in your cluster and have now a well-rounded set of tools to support your "Day 1" operations. Let's start looking at some of the "Day 2" work you can do with PGO, such as [updating to the next Postgres version]({{< relref "./update-cluster.md" >}}), in the [next section]({{< relref "./update-cluster.md" >}}). diff --git a/docs/content/upgrade/_index.md b/docs/content/upgrade/_index.md deleted file mode 100644 index ffcfcb2f65..0000000000 --- a/docs/content/upgrade/_index.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -title: "Upgrade" -date: -draft: false -weight: 33 ---- - -# Overview - -Upgrading to a new version of PGO is typically as simple as following the various installation -guides defined within the PGO documentation: - -- [PGO Kustomize Install]({{< relref "./kustomize.md" >}}) -- [PGO Helm Install]({{< relref "./helm.md" >}}) - -However, when upgrading to or from certain versions of PGO, extra steps may be required in order -to ensure a clean and successful upgrade. - -This section provides detailed instructions for upgrading PGO 5.x using Kustomize or Helm, along with information for upgrading from PGO v4 to PGO v5. - -{{% notice info %}} -Depending on version updates, upgrading PGO may automatically rollout changes to managed Postgres clusters. This could result in downtime--we cannot guarantee no interruption of service, though PGO attempts graceful incremental rollouts of affected pods, with the goal of zero downtime. -{{% /notice %}} - -## Upgrading PGO 5.x - -- [PGO Kustomize Upgrade]({{< relref "./kustomize.md" >}}) -- [PGO Helm Upgrade]({{< relref "./helm.md" >}}) - -## Upgrading from PGO v4 to PGO v5 - -- [V4 to V5 Upgrade Methods]({{< relref "./v4tov5" >}}) diff --git a/docs/content/upgrade/helm.md b/docs/content/upgrade/helm.md deleted file mode 100644 index b04a514287..0000000000 --- a/docs/content/upgrade/helm.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: "Upgrading PGO v5 Using Helm" -date: -draft: false -weight: 70 ---- - -Once PGO v5 has been installed with Helm, it can then be upgraded using the `helm upgrade` command. -However, before running the `upgrade` command, any CustomResourceDefinitions (CRDs) must first be -manually updated (this is specifically due to a [design decision in Helm v3][helm-crd-limits], -in which any CRDs in the Helm chart are only applied when using the `helm install` command). - -[helm-crd-limits]: https://helm.sh/docs/topics/charts/#limitations-on-crds - -If you would like, before upgrading the CRDs, you can review the changes with -`kubectl diff`. They can be verbose, so a pager like `less` may be useful: - -```shell -kubectl diff -f helm/install/crds | less -``` - -Use the following command to update the CRDs using -[server-side apply](https://kubernetes.io/docs/reference/using-api/server-side-apply/) -_before_ running `helm upgrade`. The `--force-conflicts` flag tells Kubernetes that you recognize -Helm created the CRDs during `helm install`. - -```shell -kubectl apply --server-side --force-conflicts -f helm/install/crds -``` - -Then, perform the upgrade using Helm: - -```shell -helm upgrade -n helm/install -``` -PGO versions earlier than v5.4.0 include a pgo-upgrade deployment. When upgrading to v5.4.x, users -should expect the pgo-upgrade deployment to be deleted automatically. diff --git a/docs/content/upgrade/kustomize.md b/docs/content/upgrade/kustomize.md deleted file mode 100644 index 2f9327d228..0000000000 --- a/docs/content/upgrade/kustomize.md +++ /dev/null @@ -1,118 +0,0 @@ ---- -title: "Upgrading PGO v5 Using Kustomize" -date: -draft: false -weight: 50 ---- - -## Upgrading to v5.4.0 from v5.3.x - -Apply the new version of the Kubernetes installer: - -```bash -kubectl apply --server-side -k kustomize/install/default -``` - -PGO versions from 5.1.x through 5.3.x include a pgo-upgrade deployment, which -is no longer needed after upgrading to v5.4.x. Delete the deployment: - -```bash -kubectl delete deployment pgo-upgrade -``` - -## Upgrading from PGO v5.0.0 Using Kustomize - -Starting with PGO v5.0.1, both the Deployment and ServiceAccount created when installing PGO via -the installers in the -[Postgres Operator examples repository](https://github.com/CrunchyData/postgres-operator-examples) -have been renamed from `postgres-operator` to `pgo`. As a result of this change, if using -Kustomize to install PGO and upgrading from PGO v5.0.0, the following step must be completed prior -to upgrading. This will ensure multiple versions of PGO are not installed and running concurrently -within your Kubernetes environment. - -Prior to upgrading PGO, first manually delete the PGO v5.0.0 `postgres-operator` Deployment and -ServiceAccount: - -```bash -kubectl -n postgres-operator delete deployment,serviceaccount postgres-operator -``` - -Then, once both the Deployment and ServiceAccount have been deleted, proceed with upgrading PGO -by applying the new version of the Kustomize installer: - -```bash -kubectl apply --server-side -k kustomize/install/default -``` - -## Upgrading from PGO v5.0.2 and Below - -As a result of changes to pgBackRest dedicated repository host deployments in PGO v5.0.3 -(please see the [PGO v5.0.3 release notes]({{< relref "../releases/5.0.3.md" >}}) for more details), -reconciliation of a pgBackRest dedicated repository host might become stuck with the following -error (as shown in the PGO logs) following an upgrade from PGO versions v5.0.0 through v5.0.2: - -```bash -StatefulSet.apps \"hippo-repo-host\" is invalid: spec: Forbidden: updates to statefulset spec for fields other than 'replicas', 'template', 'updateStrategy' and 'minReadySeconds' are forbidden -``` - -If this is the case, proceed with deleting the pgBackRest dedicated repository host StatefulSet, -and PGO will then proceed with recreating and reconciling the dedicated repository host normally: - -```bash -kubectl delete sts hippo-repo-host -``` - -Additionally, please be sure to update and apply all PostgresCluster custom resources in accordance -with any applicable spec changes described in the -[PGO v5.0.3 release notes]({{< relref "../releases/5.0.3.md" >}}). - -## Upgrading from PGO v5.0.5 and Below - -Starting in PGO v5.1, new pgBackRest features available in version 2.38 are used -that impact both the `crunchy-postgres` and `crunchy-pgbackrest` images. For any -clusters created before v5.0.6, you will need to update these image values -BEFORE upgrading to PGO {{< param operatorVersion >}}. These changes will need -to be made in one of two places, depending on your desired configuration. - -If you are setting the image values on your `PostgresCluster` manifest, -you would update the images value as shown (updating the `image` values as -appropriate for your environment): - -```yaml -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: hippo -spec: - image: {{< param imageCrunchyPostgres >}} - postgresVersion: {{< param postgresVersion >}} -... - backups: - pgbackrest: - image: {{< param imageCrunchyPGBackrest >}} -... -``` - -After updating these values, you will apply these changes to your PostgresCluster -custom resources. After these changes are completed and the new images are in place, -you may update PGO to {{< param operatorVersion >}}. - -Relatedly, if you are instead using the `RELATED_IMAGE` environment variables to -set the image values, you would instead check and update these as needed before -redeploying PGO. - -For Kustomize installations, these can be found in the `manager` directory and -`manager.yaml` file. Here you will note various key/value pairs, these will need -to be updated before deploying PGO {{< param operatorVersion >}}. Besides updating the -`RELATED_IMAGE_PGBACKREST` value, you will also need to update the relevant -Postgres image for your environment. For example, if you are using PostgreSQL 14, -you would update the value for `RELATED_IMAGE_POSTGRES_14`. If instead you are -using the PostGIS 3.1 enabled PostgreSQL 13 image, you would update the value -for `RELATED_IMAGE_POSTGRES_13_GIS_3.1`. - -For Helm deployments, you would instead need to similarly update your `values.yaml` -file, found in the `install` directory. There you will note a `relatedImages` -section, followed by similar values as mentioned above. Again, be sure to update -`pgbackrest` as well as the appropriate `postgres` value for your clusters. - -Once there values have been properly verified, you may deploy PGO {{< param operatorVersion >}}. \ No newline at end of file diff --git a/docs/content/upgrade/v4tov5/_index.md b/docs/content/upgrade/v4tov5/_index.md deleted file mode 100644 index 174be6527c..0000000000 --- a/docs/content/upgrade/v4tov5/_index.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: "PGO v4 to PGO v5" -date: -draft: false -weight: 100 ---- - -You can upgrade from PGO v4 to PGO v5 through a variety of methods by following this guide. There are several methods that can be used to upgrade: we present these methods based upon a variety of factors, including but not limited to: - -- Redundancy / ability to roll back -- Available resources -- Downtime preferences - -These methods include: - -- [*Migrating Using Data Volumes*]({{< relref "./upgrade-method-1-data-volumes.md" >}}). This allows you to migrate from v4 to v5 using the existing data volumes that you created in v4. This is the simplest method for upgrade and is the most resource efficient, but you will have a greater potential for downtime using this method. -- [*Migrate From Backups*]({{< relref "./upgrade-method-2-backups.md" >}}). This allows you to create a Postgres cluster with v5 from the backups taken with v4. This provides a way for you to create a preview of your Postgres cluster through v5, but you would need to take your applications offline to ensure all the data is migrated. -- [*Migrate Using a Standby Cluster*]({{< relref "./upgrade-method-3-standby-cluster.md" >}}). This allows you to run a v4 and a v5 Postgres cluster in parallel, with data replicating from the v4 cluster to the v5 cluster. This method minimizes downtime and lets you preview your v5 environment, but is the most resource intensive. - -You should choose the method that makes the most sense for your environment. - -## Prerequisites - -There are several prerequisites for using any of these upgrade methods. - -- PGO v4 is currently installed within the Kubernetes cluster, and is actively managing any existing v4 PostgreSQL clusters. -- Any PGO v4 clusters being upgraded have been properly initialized using PGO v4, which means the v4 `pgcluster` custom resource should be in a `pgcluster Initialized` status: - -``` -$ kubectl get pgcluster hippo -o jsonpath='{ .status }' -{"message":"Cluster has been initialized","state":"pgcluster Initialized"} -``` - -- The PGO v4 `pgo` client is properly configured and available for use. -- PGO v5 is currently [installed]({{< relref "installation/_index.md" >}}) within the Kubernetes cluster. - -For these examples, we will use a Postgres cluster named `hippo`. - -## Additional Considerations - -Upgrading to PGO v5 may result in a base image upgrade from EL-7 (UBI / CentOS) to EL-8 -(UBI). Based on the contents of your Postgres database, you may need to perform -additional steps. - -Due to changes in the GNU C library `glibc` in EL-8, you may need to reindex certain indexes in -your Postgres cluster. For more information, please read the -[PostgreSQL Wiki on Locale Data Changes](https://wiki.postgresql.org/wiki/Locale_data_changes), how -you can determine if your indexes are affected, and how to fix them. diff --git a/docs/content/upgrade/v4tov5/upgrade-method-1-data-volumes.md b/docs/content/upgrade/v4tov5/upgrade-method-1-data-volumes.md deleted file mode 100644 index 01002d4d60..0000000000 --- a/docs/content/upgrade/v4tov5/upgrade-method-1-data-volumes.md +++ /dev/null @@ -1,115 +0,0 @@ ---- -title: "Upgrade Method #1: Data Volumes" -date: -draft: false -weight: 10 ---- - -{{% notice info %}} -Before attempting to upgrade from v4.x to v5, please familiarize yourself with the [prerequisites]({{< relref "upgrade/v4tov5/_index.md" >}}) applicable for all v4.x to v5 upgrade methods. -{{% /notice %}} - -This upgrade method allows you to migrate from PGO v4 to PGO v5 using the existing data volumes that were created in PGO v4. Note that this is an "in place" migration method: this will immediately move your Postgres clusters from being managed by PGO v4 and PGO v5. If you wish to have some failsafes in place, please use one of the other migration methods. Please also note that you will need to perform the cluster upgrade in the same namespace as the original cluster in order for your v5 cluster to access the existing PVCs. - -### Step 1: Prepare the PGO v4 Cluster for Migration - -You will need to set up your PGO v4 Postgres cluster so that it can be migrated to a PGO v5 cluster. The following describes how to set up a PGO v4 cluster for using this migration method. - -1. Scale down any existing replicas within the cluster. This will ensure that the primary PVC does not change again prior to the upgrade. - -You can get a list of replicas using the `pgo scaledown --query` command, e.g.: -``` -pgo scaledown hippo --query -``` - -If there are any replicas, you will see something similar to: - -``` -Cluster: hippo -REPLICA STATUS NODE ... -hippo running node01 ... -``` - -Scaledown any replicas that are running in this cluser, e.g.: - -``` -pgo scaledown hippo --target=hippo -``` - -2\. Once all replicas are removed and only the primary remains, proceed with deleting the cluster while retaining the data and backups. You can do this `--keep-data` and `--keep-backups` flags: - -**You MUST run this command with the `--keep-data` and `--keep-backups` flag otherwise you risk deleting ALL of your data.** - -``` -pgo delete cluster hippo --keep-data --keep-backups -``` - -3\. The PVC for the primary Postgres instance and the pgBackRest repository should still remain. You can verify this with the command below: - -``` -kubectl get pvc --selector=pg-cluster=hippo -``` - -This should yield something similar to: - -``` -NAME STATUS VOLUME ... -hippo-jgut Bound pvc-a0b89bdb- ... -hippo-pgbr-repo Bound pvc-25501671- … -``` - -A third PVC used to store write-ahead logs (WAL) may also be present if external WAL volumes were enabled for the cluster. - -### Step 2: Migrate to PGO v5 - -With the PGO v4 cluster's volumes prepared for the move to PGO v5, you can now create a [`PostgresCluster`]({{< relref "references/crd.md" >}}) custom resource using these volumes. This migration method does not carry over any specific configurations or customizations from PGO v4: you will need to create the specific `PostgresCluster` configuration that you need. - -{{% notice warning %}} - -Additional steps are required to set proper file permissions when using certain storage options, -such as NFS and HostPath storage, due to a known issue with how fsGroups are applied. When -migrating from PGO v4, this will require the user to manually set the group value of the pgBackRest -repo directory, and all subdirectories, to `26` to match the `postgres` group used in PGO v5. -Please see [here](https://github.com/kubernetes/examples/issues/260) for more information. - -{{% /notice %}} - -To complete the upgrade process, your `PostgresCluster` custom resource **MUST** include the following: - -1\. A `volumes` data source that points to the PostgreSQL data, PostgreSQL WAL (if applicable) and pgBackRest repository PVCs identified in the `spec.dataSource.volumes` section. - -For example, using the `hippo` cluster: - -``` -spec: - dataSource: - volumes: - pgDataVolume: - pvcName: hippo-jgut - directory: "hippo-jgut" - pgBackRestVolume: - pvcName: hippo-pgbr-repo - directory: "hippo-backrest-shared-repo" - # Only specify external WAL PVC if enabled in PGO v4 cluster. If enabled - # in v4, a WAL volume must be defined for the v5 cluster as well. - # pgWALVolume: - # pvcName: hippo-jgut-wal -``` - -Please see the [Data Migration]({{< relref "guides/data-migration.md" >}}) section of the [tutorial]({{< relref "tutorial/_index.md" >}}) for more details on how to properly populate this section of the spec when migrating from a PGO v4 cluster. - -{{% notice info %}} -Note that when migrating data volumes from v4 to v5, PGO relabels all volumes for PGO v5, but **will not remove existing PGO v4 labels**. This results in PVCs that are labeled for both PGO v4 and v5, which can lead to unintended behavior. -

-To avoid that behavior, follow the instructions in the section on [removing PGO v4 labels]({{< ref "guides/data-migration.md#removing-pgo-v4-labels" >}}). -{{% /notice %}} - -2\. If you customized Postgres parameters, you will need to ensure they match in the PGO v5 cluster. For more information, please review the tutorial on [customizing a Postgres cluster]({{< relref "tutorial/customize-cluster.md" >}}). - -3\. Once the `PostgresCluster` spec is populated according to these guidelines, you can create the `PostgresCluster` custom resource. For example, if the `PostgresCluster` you're creating is a modified version of the [`postgres` example](https://github.com/CrunchyData/postgres-operator-examples/tree/main/kustomize/postgres) in the [PGO examples repo](https://github.com/CrunchyData/postgres-operator-examples), you can run the following command: - -``` -kubectl apply -k examples/postgrescluster -``` - -Your upgrade is now complete! You should now remove the `spec.dataSource.volumes` section from your `PostgresCluster`. For more information on how to use PGO v5, we recommend reading through the [PGO v5 tutorial]({{< relref "tutorial/_index.md" >}}). diff --git a/docs/content/upgrade/v4tov5/upgrade-method-2-backups.md b/docs/content/upgrade/v4tov5/upgrade-method-2-backups.md deleted file mode 100644 index 087aba959e..0000000000 --- a/docs/content/upgrade/v4tov5/upgrade-method-2-backups.md +++ /dev/null @@ -1,157 +0,0 @@ ---- -title: "Upgrade Method #2: Backups" -date: -draft: false -weight: 20 ---- - -{{% notice info %}} -Before attempting to upgrade from v4.x to v5, please familiarize yourself with the [prerequisites]({{< relref "upgrade/v4tov5/_index.md" >}}) applicable for all v4.x to v5 upgrade methods. -{{% /notice %}} - -This upgrade method allows you to migrate from PGO v4 to PGO v5 by creating a new PGO v5 Postgres cluster using a backup from a PGO v4 cluster. This method allows you to preserve the data in your PGO v4 cluster while you transition to PGO v5. To fully move the data over, you will need to incur downtime and shut down your PGO v4 cluster. - -### Step 1: Prepare the PGO v4 Cluster for Migration - -1\. Ensure you have a recent backup of your cluster. You can do so with the `pgo backup` command, e.g.: - -``` -pgo backup hippo -``` - -Please ensure that the backup completes. You will see the latest backup appear using the `pgo show backup` command. - -2\. Next, delete the cluster while keeping backups (using the `--keep-backups` flag): - -``` -pgo delete cluster hippo --keep-backups -``` - -{{% notice warning %}} - -Additional steps are required to set proper file permissions when using certain storage options, -such as NFS and HostPath storage, due to a known issue with how fsGroups are applied. When -migrating from PGO v4, this will require the user to manually set the group value of the pgBackRest -repo directory, and all subdirectories, to `26` to match the `postgres` group used in PGO v5. -Please see [here](https://github.com/kubernetes/examples/issues/260) for more information. - -{{% /notice %}} - -### Step 2: Migrate to PGO v5 - -With the PGO v4 Postgres cluster's backup repository prepared, you can now create a [`PostgresCluster`]({{< relref "references/crd.md" >}}) custom resource. This migration method does not carry over any specific configurations or customizations from PGO v4: you will need to create the specific `PostgresCluster` configuration that you need. - -To complete the upgrade process, your `PostgresCluster` custom resource **MUST** include the following: - -1\. You will need to configure your pgBackRest repository based upon whether you are using a PVC to store your backups, or an object storage system such as S3/GCS. Please follow the directions based upon the repository type you are using as part of the migration. - -#### PVC-based Backup Repository - -When migrating from a PVC-based backup repository, you will need to configure a pgBackRest repo of a `spec.backups.pgbackrest.repos.volume` under the `spec.backups.pgbackrest.repos.name` of `repo1`. The `volumeClaimSpec` should match the attributes of the pgBackRest repo PVC being used as part of the migration, i.e. it must have the same `storageClassName`, `accessModes`, `resources`, etc. Please note that you will need to perform the cluster upgrade in the same namespace as the original cluster in order for your v5 cluster to access the existing PVCs. For example: - -``` -spec: - backups: - pgbackrest: - repos: - - name: repo1 - volume: - volumeClaimSpec: - storageClassName: standard-wffc - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi -``` - -#### S3 / GCS Backup Repository - -When migrating from a S3 or GCS based backup repository, you will need to configure your `spec.backups.pgbackrest.repos.volume` to point to the backup storage system. For instance, if AWS S3 storage is being utilized, the repo would be defined similar to the following: - -``` -spec: - backups: - pgbackrest: - repos: - - name: repo1 - s3: - bucket: hippo - endpoint: s3.amazonaws.com - region: us-east-1 -``` - -Any required secrets or desired custom pgBackRest configuration should be created and configured as described in the [backup tutorial]({{< relref "tutorial/backups.md" >}}). - -You will also need to ensure that the “pgbackrest-repo-path” configured for the repository matches the path used by the PGO v4 cluster. The default repository path follows the pattern `/backrestrepo/-backrest-shared-repo`. Note that the path name here is different than migrating from a PVC-based repository. - -Using the `hippo` Postgres cluster as an example, you would set the following in the `spec.backups.pgbackrest.global` section: - -``` -spec: - backups: - pgbackrest: - global: - repo1-path: /backrestrepo/hippo-backrest-shared-repo -``` - -2\. Set the `spec.dataSource` section to restore from the backups used for this migration. For example: - -``` -spec: - dataSource: - postgresCluster: - repoName: repo1 -``` - -You can also provide other pgBackRest restore options, e.g. if you wish to restore to a specific point-in-time (PITR). - -3\. If you are using a PVC-based pgBackRest repository, then you will also need to specify a pgBackRestVolume data source that references the PGO v4 pgBackRest repository PVC: - -``` -spec: - dataSource: - volumes: - pgBackRestVolume: - pvcName: hippo-pgbr-repo - directory: "hippo-backrest-shared-repo" - postgresCluster: - repoName: repo1 -``` - - -4\. If you customized other Postgres parameters, you will need to ensure they match in the PGO v5 cluster. For more information, please review the tutorial on [customizing a Postgres cluster]({{< relref "tutorial/customize-cluster.md" >}}). - -5\. Once the `PostgresCluster` spec is populated according to these guidelines, you can create the `PostgresCluster` custom resource. For example, if the `PostgresCluster` you're creating is a modified version of the [`postgres` example](https://github.com/CrunchyData/postgres-operator-examples/tree/main/kustomize/postgres) in the [PGO examples repo](https://github.com/CrunchyData/postgres-operator-examples), you can run the following command: - -``` -kubectl apply -k examples/postgrescluster -``` - -**WARNING**: Once the PostgresCluster custom resource is created, it will become the owner of the PVC. *This means that if the PostgresCluster is then deleted (e.g. if attempting to revert back to a PGO v4 cluster), then the PVC will be deleted as well.* - -If you wish to protect against this, first remove the reference to the pgBackRest PVC in the PostgresCluster spec: - -``` -kubectl patch postgrescluster hippo-pgbr-repo --type='json' -p='[{"op": "remove", "path": "/spec/dataSource/volumes"}]' -``` - -Then relabel the PVC prior to deleting the PostgresCluster custom resource. Below uses the `hippo` Postgres cluster as an example: - -``` -kubectl label pvc hippo-pgbr-repo \ - postgres-operator.crunchydata.com/cluster- \ - postgres-operator.crunchydata.com/pgbackrest-repo- \ - postgres-operator.crunchydata.com/pgbackrest-volume- \ - postgres-operator.crunchydata.com/pgbackrest- -``` - -You will also need to remove all ownership references from the PVC: - -``` -kubectl patch pvc hippo-pgbr-repo --type='json' -p='[{"op": "remove", "path": "/metadata/ownerReferences"}]' -``` - -It is recommended to set the reclaim policy for any PV’s bound to existing PVC’s to `Retain` to ensure data is retained in the event a PVC is accidentally deleted during the upgrade. - -Your upgrade is now complete! For more information on how to use PGO v5, we recommend reading through the [PGO v5 tutorial]({{< relref "tutorial/_index.md" >}}). diff --git a/docs/content/upgrade/v4tov5/upgrade-method-3-standby-cluster.md b/docs/content/upgrade/v4tov5/upgrade-method-3-standby-cluster.md deleted file mode 100644 index 165d65a883..0000000000 --- a/docs/content/upgrade/v4tov5/upgrade-method-3-standby-cluster.md +++ /dev/null @@ -1,106 +0,0 @@ ---- -title: "Upgrade Method #3: Standby Cluster" -date: -draft: false -weight: 30 ---- - -{{% notice info %}} -Before attempting to upgrade from v4.x to v5, please familiarize yourself with the [prerequisites]({{< relref "upgrade/v4tov5/_index.md" >}}) applicable for all v4.x to v5 upgrade methods. -{{% /notice %}} - -This upgrade method allows you to migrate from PGO v4 to PGO v5 by creating a new PGO v5 Postgres cluster in a "standby" mode, allowing it to mirror the PGO v4 cluster and continue to receive data updates in real time. This has the advantage of being able to fully inspect your PGO v5 Postgres cluster while leaving your PGO v4 cluster up and running, thus minimizing downtime when you cut over. The tradeoff is that you will temporarily use more resources while this migration is occurring. - -This method only works if your PGO v4 cluster uses S3 or an S3-compatible storage system, or GCS. For more information on standby clusters, please refer to the [tutorial]({{< relref "tutorial/disaster-recovery.md" >}}#standby-cluster). - -### Step 1: Migrate to PGO v5 - -Create a [`PostgresCluster`]({{< relref "references/crd.md" >}}) custom resource. This migration method does not carry over any specific configurations or customizations from PGO v4: you will need to create the specific `PostgresCluster` configuration that you need. - -To complete the upgrade process, your `PostgresCluster` custom resource **MUST** include the following: - -1\. Configure your pgBackRest to use an object storage system such as S3/GCS. You will need to configure your `spec.backups.pgbackrest.repos.volume` to point to the backup storage system. For instance, if AWS S3 storage is being utilized, the repo would be defined similar to the following: - -``` -spec: - backups: - pgbackrest: - repos: - - name: repo1 - s3: - bucket: hippo - endpoint: s3.amazonaws.com - region: us-east-1 -``` - -Any required secrets or desired custom pgBackRest configuration should be created and configured as described in the [backup tutorial]({{< relref "tutorial/backups.md" >}}). - -You will also need to ensure that the “pgbackrest-repo-path” configured for the repository matches the path used by the PGO v4 cluster. The default repository path follows the pattern `/backrestrepo/-backrest-shared-repo`. Note that the path name here is different than migrating from a PVC-based repository. - -Using the `hippo` Postgres cluster as an example, you would set the following in the `spec.backups.pgbackrest.global` section: - -``` -spec: - backups: - pgbackrest: - global: - repo1-path: /backrestrepo/hippo-backrest-shared-repo -``` - -2\. A `spec.standby` cluster configuration within the spec that is populated according to the name of pgBackRest repo configured in the spec. For example: - -``` -spec: - standby: - enabled: true - repoName: repo1 -``` - -3\. If you customized other Postgres parameters, you will need to ensure they match in the PGO v5 cluster. For more information, please review the tutorial on [customizing a Postgres cluster]({{< relref "tutorial/customize-cluster.md" >}}). - -4\. Once the `PostgresCluster` spec is populated according to these guidelines, you can create the `PostgresCluster` custom resource. For example, if the `PostgresCluster` you're creating is a modified version of the [`postgres` example](https://github.com/CrunchyData/postgres-operator-examples/tree/main/kustomize/postgres) in the [PGO examples repo](https://github.com/CrunchyData/postgres-operator-examples), you can run the following command: - -``` -kubectl apply -k examples/postgrescluster -``` - -5\. Once the standby cluster is up and running and you are satisfied with your set up, you can promote it. - -First, you will need to shut down your PGO v4 cluster. You can do so with the following command, e.g.: - -``` -pgo update cluster hippo --shutdown -``` - -You can then update your PGO v5 cluster spec to promote your standby cluster: - -``` -spec: - standby: - enabled: false -``` - -Note: When the v5 cluster is running in non-standby mode, you will not be able to restart the v4 cluster, as that data is now being managed by the v5 cluster. - -Once the v5 cluster is up and running, you will need to run the following SQL commands as a PostgreSQL superuser. For example, you can login as the `postgres` user, or exec into the Pod and use `psql`: - -```sql --- add the managed replication user -CREATE ROLE _crunchyrepl WITH LOGIN REPLICATION; - --- allow for the replication user to execute the functions required as part of "rewinding" -GRANT EXECUTE ON function pg_catalog.pg_ls_dir(text, boolean, boolean) TO _crunchyrepl; -GRANT EXECUTE ON function pg_catalog.pg_stat_file(text, boolean) TO _crunchyrepl; -GRANT EXECUTE ON function pg_catalog.pg_read_binary_file(text) TO _crunchyrepl; -GRANT EXECUTE ON function pg_catalog.pg_read_binary_file(text, bigint, bigint, boolean) TO _crunchyrepl; -``` - -The above step will be automated in an upcoming release. - -Your upgrade is now complete! Once you verify that the PGO v5 cluster is running and you have recorded the user credentials from the v4 cluster, you can remove the old cluster: - -``` -pgo delete cluster hippo -``` - -For more information on how to use PGO v5, we recommend reading through the [PGO v5 tutorial]({{< relref "tutorial/_index.md" >}}). diff --git a/docs/layouts/shortcodes/exporter_metrics.html b/docs/layouts/shortcodes/exporter_metrics.html deleted file mode 100644 index a69cd351a0..0000000000 --- a/docs/layouts/shortcodes/exporter_metrics.html +++ /dev/null @@ -1,17 +0,0 @@ -{{ range $metricsfile, $value0 := .Site.Data.pgmonitor.general }} -

{{ $metricsfile }}

- -{{ range $query, $value1 := $value0 }} -

{{ $query }}

-

SQL Query:

-{{ $value1.query }} - -

Metrics:

-{{ range $key2, $value2 := $value1.metrics }} -{{ range $metric, $value3 := $value2 }} -
{{ $metric }}
-{{ $value3.description }} -{{end}} -{{end}} -{{end}} -{{end}} diff --git a/docs/layouts/shortcodes/pgnodemx_metrics.html b/docs/layouts/shortcodes/pgnodemx_metrics.html deleted file mode 100644 index 919aadd428..0000000000 --- a/docs/layouts/shortcodes/pgnodemx_metrics.html +++ /dev/null @@ -1,17 +0,0 @@ -{{ range $metricsfile, $value0 := .Site.Data.pgmonitor.pgnodemx }} -

{{ $metricsfile }}

- -{{ range $query, $value1 := $value0 }} -

{{ $query }}

-

SQL Query:

-{{ $value1.query }} - -

Metrics:

-{{ range $key2, $value2 := $value1.metrics }} -{{ range $metric, $value3 := $value2 }} -
{{ $metric }}
-{{ $value3.description }} -{{end}} -{{end}} -{{end}} -{{end}} diff --git a/docs/static/Operator-Architecture-wCRDs.png b/docs/static/Operator-Architecture-wCRDs.png deleted file mode 100644 index 291cbefef3..0000000000 Binary files a/docs/static/Operator-Architecture-wCRDs.png and /dev/null differ diff --git a/docs/static/Operator-Architecture.png b/docs/static/Operator-Architecture.png deleted file mode 100644 index aa8a43a134..0000000000 Binary files a/docs/static/Operator-Architecture.png and /dev/null differ diff --git a/docs/static/Operator-DR-Storage.png b/docs/static/Operator-DR-Storage.png deleted file mode 100644 index 7bab1bc27c..0000000000 Binary files a/docs/static/Operator-DR-Storage.png and /dev/null differ diff --git a/docs/static/OperatorReferenceDiagram.1.png b/docs/static/OperatorReferenceDiagram.1.png deleted file mode 100644 index ed2b7164e6..0000000000 Binary files a/docs/static/OperatorReferenceDiagram.1.png and /dev/null differ diff --git a/docs/static/OperatorReferenceDiagram.png b/docs/static/OperatorReferenceDiagram.png deleted file mode 100644 index ed2b7164e6..0000000000 Binary files a/docs/static/OperatorReferenceDiagram.png and /dev/null differ diff --git a/docs/static/crunchy_logo.png b/docs/static/crunchy_logo.png deleted file mode 100644 index 2fbf3352c1..0000000000 Binary files a/docs/static/crunchy_logo.png and /dev/null differ diff --git a/docs/static/drawio/crunchy-postgresql-cluster-architecture.xml b/docs/static/drawio/crunchy-postgresql-cluster-architecture.xml deleted file mode 100644 index 2ef12a4f6a..0000000000 --- a/docs/static/drawio/crunchy-postgresql-cluster-architecture.xml +++ /dev/null @@ -1 +0,0 @@ -7V1Zd9rK0v01eTys1sTwiAEn5FpyCBAHv3wLC4IF2PiCHJB+/Ve7qsUo23jAB/vKOesAQkN31a6qXdUDX6zKzeLrtHt37U56/fEXU/UWX6zqF9M0bNP8gv9UL5IjxbySA4Np0NMnrQ40g7ivDyan3Qe9/mzjxHAyGYfB3eZBf3J72/fDjWPd6XQy3zztz2S8+dS77qC/c6Dpd8e7Ry+CXnitjxpKrb741g8G1/rRRUd/cdNNTtYHZtfd3mS+dsiqfbEq08kklHc3i0p/DOElcpHrTh/4dtmwaf823OeC8rxr+9Pynx8Xp8WC+39upXjS+Me05TZ/u+N73ePgdhZ2b/3+F9zP0uqbhVEikcF0cn/3xTq560+Dm37Ynzbvun5wO6Bvi3R4Fk4no35lMp5M6cjt5La/PJgI0KIjf4LxODmJHvPHwT8cn9yGa8cLBv7RcXroba+PruDTUpaKPgzG3dlMv5+N+qF/rT8QJsJucNuf6s+492n3JhgDi5XJ/TSgr0zl9ef6y6buZYps9aG//WnYX6Qhr3uVCGilXLKK/oRkNI3oPH1VSUtU24NlFuXzfIUu09KQuV4DlpkAq6sRPVjeeqV0eqP1no6B6p+z76MLt1f5z+jrxXjYHH+7KfyzVPKDIDAyELwtCCznaRTYpfdFgVPcQcGO2jcVML8Owj4Uj2/n5P/p2HV4M9ZfszDFoRd3lar4bz+sGLtYOeU/KH7a7QX91a31DZLD1WBKegkmt/zVFLc72V+xDyowb1o5q7CpQ2dXh0apmDPzKWo0DqVGI8WY30yNRn5Xjyf87+V6NOif+bAe30BXdhKBtaIMy9lVVDHF2AoHs7WCs6Mkf3p/619H/9wNrkg7PpzSW2lN7WptaT6bWqNvSvy3j+qUKpgnhcOqztr0k4adorrk2Lrq7EOprmilmFd+HGoRQ2MJh83/9x7c7sRYvQUDFY2tjm3GoPVTV7pfHVx7N9Cv/PCr1PPRpH+kQWU6oXS3SL3Jj1+V5D4klKvte9Mx6VtyeAuYFIjv8NaPxgEhdGo9Dc8rwfLZ1fJA1x8NGOHn9yHdpp8EeY1h59mYfgVyd4xiees3wHSx4OTMTVjnjV1Y2ykO6WDBv7ibBmyhen/Y7uBSDvS6YZdOzqD2jlBz8oUtnKkdnOVTcJY/GM52494eDuyPputlzkby3Rto+AEYbnyZ6uyeAmKGuFcgzlLHhrh8hrhPjTgnn8s7x4W5pMRzwGg67d+NAx+oqjJT7E//BqjW6NOmOxfuHHlJ9H2HDGTfvHFZEzpc8mFuJfhmWvJhpgDrYMmHYTzszd4KWSl56FED5uhSVnsrZXX+7ZTVMNIqe2+LGpLnn+5td9cVHSlqjsjNFAt2ziptYkalYCZtUOBwmEn82sOY8ZdSWqEjkdfLCh4CB5c6EPgzhLbu7Ppq0p329gUOSTzcRMeOr1gqdF3RWpndcTBAndgnZWK84ARKpCg7LusvboJeD89KxeQmap+omi6r3y8ccXgYTVsVs5Ra57L4vI4k63BIMv4tJNVv/ckNBqVMVb5jvsQDAab62f/vfX8WzjJc7Z/YFZ7ElaXeF1dptYQtxfV7g34ihVl/cEP9r60OnfRve2XMEYBuMH4Y+JtaXolePUP0s7A7TU605Clrn3rk1JbafFAxM0K2BvtDQV0Xoulhg364T/iHMB5V9ZounUeqj9P+mAzpb3+jwWn61U/4MQnYzBMPld9EkpkQpOQW0nd91QolOzcqGVs3Uls3EtHs3Ijhtuz2axBY+N9GYP6ocGWX3ghX+eLWjez9cEWK7EZrp93hhNnDDd5+jqFHBVcwlTu+MWj3GOb/xKBNzjxS0BovBa2zPVJqFd/XGVppCcNnwdWRoMXZinj5l4LF+LfBkpYTZGA5qGt5KVh2XNS7g+Vj0qz+Igh/4445I+/ozx3cMqeKBf25utCP5A/R2ocfycRKfeyt49+TaUOi9SNB83Z8M42X+r5t1rVtF2/E7pYz37dqeg+1y9xul3oHNlj8BGRwaWdrNqZyBedxE7slwYl1OsnHzvp3q8v408Z127b5oDk+aWTJvJsjMTKe1Zovrf42EOkYpdxLjS6ZSpnEIrVVFnojo3NKxe0mH96IzNKT4ysfcE5g08rmab3XHIaS6eSSq45lSqCZWP+nQvXXSjOD9bvBupDPWdZxwXo5SPGpYF2+bP+sZcB+L2AbaisbSYO1VVgO6781stOXcu4xHpat31pq0MmpTR0+a/2WKuSSAbi3j7z5J/nkviPmTzoUw9jxKK+ZDLSXx/sxmYWDab/ZOKMLflAq1WXN/Jj0XuPBnhqj17g64PD8o6ngM5YW2pu5kmMbuWLKfKBCStikDM0uHAyWryWEj4PjjXG3Z+Rcm+5I4Wvan4XvOnNtffUz/+34TvVI7EtF+FtA0NmEoJ0M7mzMYUyb7XFI/BUenkj0blP5twH/atiJL5z901/cUXTbmG27ibQ9Fg58HvTlSvZmbKZMIom3/yoG83uMcsreCHvL4dmbB+Qt4i7KtlWpZBWKxa26sV10cmkLnAsURfL0YpUMS9mWXUyhpmYufzhSs8eQ33EC+AkkPI3rp6L2YeVuvjJqH5oObrvCj08CH0pqt5PfA0AtqYPbuVLJNotOwZD/5zd8RIlcQdGgVFT/GbvuQpm51dX0/5QkyKJTyAMt76Ls12M4PbN92nMk9QtKJjCSstTeWfeqP6aEI9Ap5NUkDCc3KeoNJ9AjX16e3clOUtBRN/nwJ1hAwSf6CVVe1GeV5aN5Ovs7+GKeLAh3ZuXHN8+8jE7sq4vFvR/f2Vdfx/fdWAXdbz+VX538PbN6Vi9yLDdy/vo3/l+3NXLOmyU5L6oP+l+N2dWtW6rfXKvet3L+LCrRFf59L3bvr6zvt2dxfe5Wy3996/K2HpyY3YtfVuOmZP9o1uf1anngxiP7bDgy61XXOG/aphvUB5dfT1Wnadx3Lozxj+Z3t/N7PPaD8oKuv7usqqD19dS5/P39j9usUz9+fP05vrx18e6bp/oXi/GPwBlefXND93d4071YzM4Dd+4Ny6Zb7RT+NPV3t8l33o130TEuh+3Vd5b+7vZXdFUZDNHXy4vT6OyiF3Uuft5dXjjqP62w9KNSinvfxrPLll1Mzvlpfb++/DoeX902BpfL9z9/Xd2MlU9y7f32xv7Ym3cuvHHLdH75ZjuofxtFJI+F1+rcu8MBfR7gc+xV/Xs3ruP7xdb3+Ky8ir1wSQfdiwbJf3F3dfPr2g+MW//mdERyvu9V6loqV1Z54Fs///oVY3hlLv76QxXUSTPnrctRp1kfXJmXN775S7FWguKjV5VJlr3rrasW+qpKada9cMad39+/LVtRtYtn5kpHJ9e9r4MB67FVn6MP58059bV871Z7w7OWb5wN29FZ0zbcplp41dr9ebUWetUR+h66rU5E3y/OWjW6zlaechdeQPeozBdu3Lh3W7WZNyQ5tTozr9q592J/5rba6mxYs86qdbp33ejEHfNs2IjoHvRaV2fVWuRG9LxgHtF787xim9Qm+mzHbsSvkUevdDzy6Di12aJzqW1luje1e1inV8Yx3bNOz3HpOX581urEdH+6Z9vk11Zt7kW26QVzwrk99wJF5zVwLn9H1xv0fJJDG/2896ruzKU+nrfdyCUseBUtp9Zg5g7LuM6hZym3QveK5nQO+j+akRzoXmXrYqQW5y1q25Cubf0k2TbotT1zK8oheUDGeC61keRRLYcuZB5xu0hedG6V5B2xfkjmAzqH7hXZNrVfidwGM+qPos8LaoPp8XmNBc4jnUSkk/kZ6c2r1kP3gtpH8iT50OdfdXfYMDuRwnXKjVgOoTusoc8h3TPyKiqm8+meiuRadhqEVujZJazQNfSMGvkhfkZExw3q/3pfu1dNZXloE33vXpBNk8zofLIXvm/ExwPboeewHVEfYtKvRe0j2dVs6gPprGPQZ5KVT31tW2dDN5LPZZLFaM56wzlVOj50GQteq0H4o37EJOcYmB7NzqG7mHDZZBwRPqDrDl5jxkyVsIl7UZ8I24KBoQt92JA32kzyQTtCkgfdaxCKTqiNjGHci54xJDkPXXpl3KBvEdtS1b0/b5Fcm3M6n86r1mbUd8I1vh9BX2hvSHIg/aDtdTrXJvkRfirQjW+ftVzRLT8PcqD3QxfHqW0/r13ojGTIbYJdQPe4F2wW9zLd8By2XkFfOrHgvb6QV9yHcMLPoxjQnC9YDq0aPXtAuoAt1EKXZMgyrnZC2LYbf792Ww2xh+SVMceyVtQ/kmtvSP1S8ty6DTnRucAY2eAcNou2RGJLwCZ/NydcQd+Obh/1qUaYHyn53GCZerGL59F5DZNsac4+qMn2Qm1o35/zs9A20meggFmD79VqE05qsCWT9VltMDbI38XcnuoAfgw6MLyay37NA45a8GUuPa+zEPyVHWDIY4zMbcI54aYGOSv4ipUMGWuwdQfvOy3CMvCc6AnfDdukgwH16fuQsEE2DmxQv6oNsckm+ULYfQt++PrEhb4I68tXLXfyW/BJ80ZcU2RvNskE9uZApxTLSCYkj5ie04K91ETG1Qbp4Vf3qtUh/cCP+maHzjlnf4bz2vTM0Yxttsm+XtsgdMF2Db+8YHnCR1YS7JOsqg2Sw2WV2kntq8P/Es8g3x3jnvCjNYknpBuSG9oZua1Tji/sk5oKfsKG33Jbv8iHkm8YdhyxE8J27KNtkE8EW4HfI3uNJL40EL/JBtqsS8F1x2IfSLhgfzBybdiyO+wA29TWAewRfVhwXIJvo7jkEp9yq+OhCx0xloCDGuyPfY0HTJENU5/JV9UXbPNVuifaF3dC+FJ6voKP86h9LvoaoD0N9smQL8kv5hgRzU3ue2Bb57/J1qGHmDAcj+DXbRe2DJsmnwtcACukF5N9MfxAUyl89sQ3a/9FbW1ybDHZBoYd3MOg6x3GfUXkdwY5QJZN2G7dkX64iEMWYjGuo2cA9yvfwf6TMAqfhjYhbiC2BNzHGP06h29psl0YHC+JWyztmK4Fjjy0OWIfpthPVK/Jv3SU+P+aibjvMc7ZbhArLdKNAxn6rTZ0rsRXjBS3veXSM2qLTgw/2IEf5LiDPlDblMd4GBgs8wpibkf7ohrORZvBS2C7JIMRYa2d+G6SNWI4cRzEzyb3DVwlEi5RlzZF6GPia2tsu9rPW9Ah9VVJjINvQ3z3JeYPgdmOcJt4hLYCo3Svso344YJDMFdHbOYYxP6K+UMAXRP3qcIX1df0RjZO/SRdAkPADDCxkBjYERkIj4LPBDbmjK8K8K/gV3Ef4IzkpiL2eWQ3HL/Jz+BcsQfynUNun6FjI8c3Okb9ouezHTe4rZAZ7IdtL8BnjhGWH9eF0zWZ62g/QufFl0N6RpzEZ3AsiRkj4EH6DT8qcRx9R4yJ2R9HwD3pgnkE/LIrchyCd9E5TbZPut7FOeBnhnAB3F/Luwk5uoZglmIGt43iFsdIl3myKzbAPFf8zwgxg+O/8EbEenofQ04+8yXyLSHlR+KTI3Ap4t1kxy7Hc9gZYnyb4xZs0WNON4K9xcAe41peHfg+8fccy0mHCr4sIg4bI0Z73J4y+uXQ9RbZgM0+BL4iHiFmEn8CTuBr25ARnhMxBirww7AP2Bb8cF1z+BowTvognr/SL90b+uQYDh7Gx8QnUMyFv4NPjNS9xMqB4CSCPYPfdBiL5ENDjj/wpeBkwZz8I2y1DZ+w4kqI48PTGLjymGuUwSFJ7o2Q+/8VPo/wwrGsNhNfpBaaT1vMw5h7Qs6IuYzJheC5PpOcI+HL3EZwkZlwOPjAtsQZkhHH6aELvYXshxEvuE/oh8+2gDjofa0tznUc0K/i2xhnHLOsDmyB/Qj629bvfeZRXmsUcg5q1hAH5hxX5VX7SPYPLP/z6vVQ26MjvA+YaiAHIT+twBXY17ucEyFngs4ge41LkgUwBk4v+B44ndYI+KTrklf2ywvhOW1T+xanw/F44LC+hFfJsxBLKuxHLJ0HzrRPjyUXa3O+5IkPMzXnuOdclM9px2fwIcTfgC3olH11qy6+kDk65VaI97HwPR99aGo+FLBfiCXeEf5h4xc12EsMX6BftU4UYoeNGNPheEr6kHwGPACylhwMOgRmkAtzHol80Wf/5nFMBb9EPCA/NRxQezlXWIBjEza43eRLbdiNH+n8FHbRqiX8nXkI858q+/yFcC3mwKb4f+S7/kJyPfI5kD+wg9xUfLXJGKBnwt5xjOO5cHGSFWyvI3gOmAeFzL3RjmbCS13GH8fJ367ktZIvcs7FdmWCNwArvikxjm0Wz0GuKbxc+oxcR+JVwLxlzv4ZbW8KX9IcQMeYjsThIeTiS74aMc+wKBs33QpzZcpHR5zDcz42LC8kx/OV5uPO+YWLuL2Q2N+ALmzGGLgc+C5iLfxzxPl1rLlJzPwu4S7NueSzEqcM4fI+y5EwQM9tg4OG7NcJszqWmrB9T8d1/ZrwGkv8FO5DPpj1CFtn2Vjis4CjAZ6BvNUmru+AyzAH5TwYuQn6UzPED5MsoH/2/Q3YmnlxozgGEYemvKbGvICeTfLsCbZiP+QaBfgt+VCOi/BfcU04LjDe6oS6rSbjucn5eyS+XPCs6y22+D/4Az/Jt9mvM0cGHiJwkjniHvR9z5yc+gs7Zd8LOcNuTcJTlXqZcPhIeBzzB8kFDbk/fFxHeETAOpa8eSg+DXxO+zMD3JS/ayZ5FzhhTXPCuo5pl1I70bFNODtzjOj8oib1AeBBuIjkYuDvTcnxOzFi6iCUegXiGvN7zps8rnepe6nfQCZsP+C35KsQS8Dd4NdORT5odyCcCz5LfLnPPNcFfwQvIN8mfAr2wviL3ZG7AKbP4c8Rl9mma0l9jXKckfiLViPhbeBF8/Nfdx7lanPJ09pGp+XajEVwK65ddFZ5/7DDn8HzPOIc7KuhH+a0yD3BPTrgxfAnNtkV/LTh/ZoE9a/erPPbi380v0sduFn/+2O4mHd+/5zUvzZKddTTtnDcQR5J/IxrDE3E5Y7k71x3RH4MH+jPdFxUEjsHOncuM/fU+TvL9vwhu2TfXZY8fsh56Hz5LObsrq5DlhGjGcuSgwlv8irMj1HPWnAsBGfkmk6d4xPlFiFzb8o9wM3PubasJPdBvYB4Mcc6zos7UiuBX2GO2eYaAXgm5w/se3zNnRBzOU8xNDekODSX+wluYvYV7AOYc8ccT6qu9EvyTFPqXwPELlv7AeTuofbh4MOcX0lO20YOdM+6qrJt257lMjcWe+UYxXzAjcdSF42ZY+HZTsJvzgXnJtfN8CzcB/xH+sy2J+32maNKfi+5m+RiNUO+bwuvBu+MxS7l+zpwoTm+O5P4Sjj5ndTaNNY4JiuOycJl4BNgyw2dAxMPqfqa45fZL2l/P5d6Vz3UudZcar0n18wP0JbkNclrSY6cr3xjTse5JfyA9n/IyzVPQW5b13HZTfI/rgFIzHDvub/iZ+bih8SPCG9kHulI/QJ9abO/JPmEUif2OU/xpE6+4FpXi3mWIxwROWZDxgYQf7/WHF2rTl6TeLbgOkPA/TAp5iqtS+TyjshQ8lLOT4Zc60rsT9cNUQMBVpm3zuBTxLd9P5HcYDRbvia+V2pnBvsI0RnbDHDjab7HGOTYitqW8OhUPs51kfaM+bhwcZ13+oxBzYcR/yKxrbbStUxtT5zfSByhnE9yBCU5AufGtZB5dSD1YWq7rrNynScW3wGMde4Z/8wba5KLsj9Hf4E7rnEwB5b7A3PMGxy6B2KZ7ceu2ECTuS30iJoOxQ6204X2g3GiLx1j5lxjihTX2qQGmOQ0sHOuHescrsZxUXBeZx7M3BA1IKlhOFLn0GMZFa4botYA/2Kxf5H8w9S5vin5NXIzjrFKavYYYyEZVBEH8CyM9fgW8w727fAtUvvl+MS8CL7aF5sjfnfOdcq55AP8fLou1jri2v8ceRHiqOK6GPgm8wcXOatif1bhWgj8jemKj11o7iX+N+DxGUP8bZtrI/BzbI8SH2KxLeFZUkNVpvi4Tqjru2FiC5qvxpqPSZ2Cff+I+TLXDZocP+biN1F/0jUPGWOKeLyFbYLHoiQGNCW/A/9wBdexcCHXYi4ea/6jayHnMrah8zjijMIzbcl7ayJnziH8hOfguY7ky1yj0Vx7xPFZxg4xBtUg+26LT+fxMOY/hvDhAeldZKLHk+bSjzrnhRyj5VXqXVXtV5rsP4WXM3/gfAw+yl5ei3HLSOrMbE/MpTt6HIa5qKlrILFcV+N8HnmIrrMYUvsdST0YnNVy51KLGojsYoyPob/eUOpevsZnTWqnqDNHUtORfBnjmBjn8Q3Ja2FXNZG52Lot/PDnQsaDOBbocSFdA63qGmhrWT9doL7jxcsxg9DjGMD1YMV1SbZt8ctcN13GbMQ9X3Am41e61jtf6JqTrvXOda1X6VrvXMcejBlx/dHU9So9Fof2cg1ZsMWxCj4UNtCQ3Ek4C9sJ65RjjOLYyfVM2B1kzzLiMbX5GWo1AcdN5g1cuyQ8aV6ma+M8JiB1O87hOuBbodStkB97sJ/ZmeTC4teZs9RERoxjYL5uif+pC8dsYUyFP89lXBrntdmWZCyN67ihjEe0tR/mmpKt9R1izIG4BI+HoebF4xVS61eCU67Lo266EJ4IfcnYotTyGdu2jJvVl+MRHp9Hdsb5Edd2lHtbWyxjh+RettQdfg1d1hvXCQzNQxYyVob+c4x1RB6ouZG/xhgJ169dzs+F43B9AHpS8HEYT0a9BFjx4C+QGwhm4fOVp+PFTt46ov4T/s7ZHr4PGY/MX+BDMcegocdhazJ+zzV0qZOsxltqMv7HORTqhuKT9dj/LKl16zqKJfVe6ErqT7q+pJa1acSNSF/PdWGOo4aud+i5D4hJkL1whnOO6XWp0SA3aiI/rUv7wc8icD+Ol8YZ8xUeU4l0+yWfawtH9KTuh9eEc63LCGPxNvw+xV0eS0cuyjk29fW8osfWOD7WZPxV6hm61rLMCyLhXJznR8IB3ciX2l4k9agB8xjUqLwbN7GphdSJdf1JYqaVjHtr7m6hHYid4IXCE7gWped0NNgOXPH3CxnDrXN9zask8yXqMq43vNa1X9Rm2P65ntmJktyEMSqYYv3xvBSSE6FFcjrmHlxDgl9DrOF6GGItxvJr7Df4HOhkfBdKvQ/jQ2U9z8MV3oNx8mgudbQI8RttKXMed86cUNeNq7oWNpQcROeOSsaqRpHYBGyVZFWZ67ks19TeemLjsa47c57tIs7I2BfX9c4rgiUeQyQ81IOT4dXX0xjzoTrmArEpOIvtov/1VHUrJyPKv72dGsAFYhHm7bgy9se1lIHUWjh+cr1cCT+Zax3Axrifkc/5M/M7k3mijIvP2MfFp0OppQvPZH1ITc4We2N+bAs/Id/I4wKoQ9X02Blq2r9OEKfOeQyn4ayPrSMn4jrFDcaM6roOjbxurrlIXcasYJec+42k/h0l82y4/jPk8Um+f8eRvI45ZMT8ju28bYsPdtn3eawvrpdrLq3HdZqiwyv46Ba4AMtiIWP7yOkHwsF4TkUnlHEbnn8htRupZ8m4D9eK4e/K7Md0TS3k8XoZC9c+rKbHGjU/HtYldwtknE/OKUveWmFbMTiuUUy6COoDwsm1b2GeoTeDzSez1NZnk9Uwayt2azXJ+JLMbzViu+DqEJgzea+OWITSo0Mysg1L5plm19fkKVGBXbKUpOKHEXT3hmezKK7ocOThysSCLQVMsSozLcRTzZ2kkskV2ZZURWT2QTnJkAUVkolpxi4Vdz0aw9HVTSopAaoUQBlXimWEiryEzDJYMWqpGsnsFF35lcyYI0Fbj7DU7wVVHP2VrsrxLJdGDFZQMzWKMXoGpMUcSSlblFl3aMNPPROnMVu+itxD7QV5Jg73oSoocbmiAS/sznUFaK5nnJhJNdQVhqwjG7xKe6ZHWTALY9FIRm9l9lEyemW64m0jt3Uy5ExeZvQwu0WlUc/+Ua72hLBcmenCLEPpbFVJhHAlIrNOfMmiqxxJRCdgoxVUDJQeQecRpphH9cBEKlJh9Xg2Erx7XWZA6ZFkj0fShFHyyCQid4uzD9OVWQyK2aqM4HCVEMxXZk8sX5mZJaxXV24wUw6VF5uv4RHKgbA6HklCBRCzG3gU29HZ0FxXyBeaNcroCc+GwwwCnIfqA2Y6gGlhxKJHkYC9sS0jGG1mUNuzf3jENT7ptlprVdeL2qqCGw8ieZYexavC2+MzItCvKtmOwyNdXOHHTCGXM8xkpsl5tcysymv9omfUUD2JZJQTGYWuhnHmxl441B6SR864zdx/zMRIqj41R/Tf0VFkwLOhwLDOk1lfUlHTFTyONqbMMiszTqGnqyra60om3h5EXvVSV28bPKKBTACYk1EVZDiuzCpkLzyuumgXV1JgJ+j/yJFnQCdtHkla9vU0pYod7VSx19nZNWea2ib5NamkB5w5cZVOZrC0ZfYWZCEjpcgUlZ6FJtmzZF9KKnsNqWxFgkWwVC9azhhQGoORZF5tmWExBJMexTwSIhUeZocej3i6unqgq4FJVhvMV5XEofg01lVLZpPScy3JPvxkhDzGiBV8C88cZbZa5tknLJMqj9ahSs82kBJ9FqnRZ5OX3fA8ViVjo7AcrifZMi7nYo4NRxC2eMwjQ84pteKQ4yvzWpZErGulkfBL5u82z1uo8tyDyL3l2qTU1GRurtQHh9fgDAsZu+Y5btKWJecbIJeYyRiUnjsktWsjmTfBuUnEuXWk+RPH92TOCTihyzUAssjhZSw5HHPfJIZTFCF+iHEizLFjpAsX5LxhiLEwtmrwMkRH5MPm3lLfZpf0PLQW8Q69HshsGZYAsyVDshSXWZ1kTRhJHEkFHyOFrbqeSVfn2Reofp0Ly7fF53HmrahXC3e9NzziUzP0KA3PBOH4iHtwD5kRP4YnrCe48end4VbbGCpXKjlOSam8aRhGobC550kxZxbU2t9+O6BY+dwj+3e9but0Z48dFZNfeN74veRW93py033WMs9nL28sbG6EZKX9gkGawA64s8aTv7rzefZj+Ln8YalsPwb9rWnk8g6WzBaK5ByKhc2lyY7t5PL5lPW2h9ueIRWk++y/fOiVyYaBH2Ozi7ZdMJVdypc2f0DLLpZyRpqo3m1pcrrkPuzK5MeBcDQLk9Obma1L/jjrkl8LtE+5LNlK+ynwbF1yti45W5ecrUvO1iVn65KzdcnZuuRsXXK2Ljlbl5ytS87WJWfrkrN1ydm65GxdcrYuOVuXnK1LztYlZ+uSs3XJ2brkbF1yti45W5ecrUvO1iVn65KzdcnZuuRsXXK2Ljlbl5ytS87WJWfrkrN1ydm65GxdcrYuOVuXnK1LztYlZ+uSs3XJ2brkbF3y51+X/EarbT7asmSzuMeCmn5v0G/qj7P+4IaEVlsdOunf9srT6WSOFVPj7mwW+JvLrnYXRCULn/K7C6TWfuK+O01OtOQpa5963dn1cmngg6srZ5P7qV6wlt79ku4serjvqj0nbdGePjbtj7th8Le/0Yo0pekn/JgEvOxOo2frZ2XNvLN5B+mPvmil+Z37GFu/JW3lt5Zpk2wH/XDnRgyhZa9fjqrSHutijx1U/UUQ/ta3x/sO3ucKjv5YXax9V42SD7ckOL4q5yQfO+vfrS7jTxvX/ehPA5I8Vhw+vo5atPeI/JMlekcCa0M5OWWuvJ65uduAY5Ryll3YXoL4XNAXNjHvFA6G+fRY4RxgRbMGZ/FRSKcsat1c9WzsrnperkMdTLu9oL+6tb5BcrgaTPu+Xi95O5nidm+xvt8qbHo6yynl7JTfGi9RwExZbmqqwvKnyd9+iWm288RHXWz9FsgsFnN5a23nCWfTq9iFnCqWVn+7mH33TSjSNpZ5zqL+x1HzxoDcsoDSjgFsbQQwuOr6o2l/Fr4Gnce5k8SzsWkrY+n2ku09khRlHYGpOcQhEVh8EIGPgOCP3uAIMKCvujfQwwMQ2/gyFTDbkH818PQOFP/0F3cU99CWB7CWBsvPjL8HfaNVyC/h+W/C0T6CXXmsokVhZLkrz9aePCVKD0prZYAUof3L+/PYn2t/Hvtj7M9jZ/vzfPD9eZ4BtE+5P4+d7c+T7c+T7c+T7c+T7c+T7c+T7c+T7c+T7c+T7c+T7c+T7c+T7c+T7c+T7c+T7c+T7c+T7c+T7c+T7c+T7c+T7c+T7c+T7c+T7c+T7c+T7c+T7c+T7c+T7c+T7c+T7c+T7c+T7c+T7c+T7c+T7c+T7c+T7c+T7c+T7c+T7c+T7c+T7c+T7c+T7c+T7c+T7c+T7c+T7c/zitU2H21/nmLp6fU0R7+TypFsiqKVeSSboljb+5UYpc1b7LvviWVt3aj0vhuf2HtsfHL0EE02+8kl+/t01r55271+HkTwkQCzYJRyJXsDUmbxhRtRFUwjt7VI3CkeCp2pVm8YaWtq32hfHuMVG/AY9M/Eklq6XTe4fdrB7R/kivnN3XWMQnF31WgaggxLPYyWV8WxUpqT2FjZ/Iqly3Lgboq2XvfvX7Uq+VVQeGgx8csgslz5/sAeTYdATqGUs0qbtu/skiDDTAGPfSjsGEbaouK3BQ+J+E/3tpsh5w2RY5TeETnp1CRtK69PQU2WbORZ1GTJxp9LTZ7eSNPZk4UvjflI2E6paG/jdrnZyrO33TSUkSvkS5ahLNsumlv745hJWH4n6uMccEfCNKclf3uRn3T3ttpF70AuapMW2c4uLTJVGi0y32CrolQVJcTsAX+EDRcng8ltd7zukjZdzuqcswn2xWB5D/thGGlVde/DyaYeX5LtPKH7osK/Jz3byx2Ms6+DcfZN8/d2HK/bYDl1L9wXkpf0nSf1XkpPk5xp/25i/BsUJ2X/0jz/HYzhPLZ/6SFcS9HMKWWs/jYcjWPvEiEzn7Ot9T971+/QOfmNvzfYtecBkKZt9/XvgdTMQHoAkBIvyZVKKzQVPhpI0yo5/x5IrQykhwBp0coZxgf2pG9Yq3gDkNoZSA8AUsrvnJyTd5Z/9sdCaeG1W0YfdE/olF3R5S4/CdD0/bcJdoP+HDuW75tWHYKzlqzc+r6Tmxi28ynFO6eQM9YvKaTkylZuA+aWeSgMH6K6cYAdag+TbWzqqujkrBR1FQ+3fW26Sp785YRX1+nfdl/44/EB+2xBexAfsL0ztZMj3Kz2m7VTnIDKrU+RUe+03Wwq4qzkzh+5wv+gHp8sii1ToierYmbySxXHUnY3Nl3YkjI9t+Ze2hrrtktbNzpwkd1Sn2D6y2sQaO2JQCuprB8JAg3lbEGw8GEh+D/9G37L4uUeTrB4VBD8RE4wrTT3P4RAtScCi0c2+P0BAZg+0ePpgsaTVQf1QNVh2u/2/pncjqNnZhXPGExPPp/qn0SqbqZFD6cIO2nIgyDePydAGaq0/reZIpj5lJ+gSJsVbx5sVs/DPzf1dnMJg5suC6WKctesP/0b0C2T06Y7F+4c+eAzyJbzMB4ouL4BzvJ5tTPPOKVw+s4zxtLWW7yhI/mC3pyih1PS7v+IR8k7u5r+172Is8cvXB0hZVnN3SnsP3nnTZbjvJwfGWlrdB517UfCj5yt2JdMIHv2Mojkh70S9Kv9FkEQurrR2ml3OGG2f3sNXZx4ZHnGY+fTG2nB267I2OeXko/Q8vjb3afy4dNgnDw6dfKusWaqT/6I+IOGdiRWUTSNXN58iB86RjH3wiyiuLMk7p3XCpmfoJqcDjnzhZB7OvdNJtE+mfuW9p2Y+V4o3vTJVvGFvzu/nURb24vlDpz7JmXM7Pef3y6xOs7f3zUsM+dsrlU4it9/dh6e8/QICLLff/54+LNzhrP2+8+bP1V6LL//7HyiKJ5EaonijwfxtcSu8E6Z3XKJ09PR3zyq6G+XNqnmTkVp3+jvFJ2N6RBbXECVNmupezGD56Z9lrmV9uVflcbRx+kETnF1Ojmpa3fS6+OM/wc= \ No newline at end of file diff --git a/docs/static/drawio/repo-based-standby.xml b/docs/static/drawio/repo-based-standby.xml deleted file mode 100644 index f1523908bc..0000000000 --- a/docs/static/drawio/repo-based-standby.xml +++ /dev/null @@ -1 +0,0 @@ -7L1Zu6LI0jb8a/pw98UoesioKKCoiHgGiAyKqKAMv/7LyMSae3d1P11Vvd+PVdcqloyZGXdE3BGZIb+xct5MH/4tMYtjdPmNoY7Nb6zyG8PQNMWgDexp+z0Uw5M98SM99vs+7tikXfQ+sd/7TI9R+dmJVVFcqvT2+c6wuF6jsPpsn/94FPXnp52Ky+dPvflx9NWOTehfvt7rpscqeXdsNPl4YBalcdI/eswI5EDuv0/ue1Im/rGoP9nFqr+x8qMoKvJX3sjRBUbvPS6u3roX4zyazu3y7jvSYmvt/kNupv2VSz504RFdq799ayaMD6NgazyjU92O5+4meMjvW7/8y7Mfr76vVfsewKTKL+gv+jdWekWPKkVDa/hBdFkVZVqlxRUdC4qqKnJ0gn9JY9gRooZGD7TjAmdKfniOH8XzepSLS/HAt2VP+OeTm4r9tVVxQ3vL6lGcP0iMgT3v4afQh6NfJtGx/4CO3KCxeRMDiH9Pi1L4PUWAKn8PL8UTnSad0svlk4drDPz78JhPj8jwDx35zjF/DyDqRNR8grheBtOoyKPq0aJT+qMc2+OrVyi+R1f9EZzMpFex5BNc0gL1u9Dv93udiD/c/aPc0R+96P8CDKg/h0HxrC7pFQ3VW02p7wHEt4X7FUw+wdiXwvTrkv09eIbnqPpajrQwpkLqKzlei2sEJxfX6luIQzc+ptHHY+/T0b03fYfL4gID9jnofhQkeJ7/DBI0+zUmRtTXkBjRv/8oQLB/Dgis0FgHQWx1klbR5uaHcLRGkvtcqn6ARvRZReIj7F0E3vvxE/fnOv8t+XyBhxON/jF/LOJvwuRHiVWgPpMqM/laqtzkG2Jl3w7n/yLV2P5PrLKXF5/ya0lJpWmaUf8RvhJidETOsv9YPKqkiIurf1E/7pU+ihlk8PEcowBVxmLMoqpqe0H6z6r4XPRRk1Z7uByBlXzy+pvB30rz6Yf2/eGKugsX/Yf6naLY9x584e8jgXvv+Hg1/vTZ5avokaJhA/vyucug/7LMy+L5CKP/oi5cT2z8RxxVf+5uYdT/K4Ie0cWv0tfnFOYfV3Luf1PJKUpgAuFfouSoNZ9p+Qfl/UTLx99Q8vE/oOPfFKrwPyjUXkbfJc9PAfDDOBr950KlmZ8p1cn/oFT/darKfi5V7hs0i6G+xbN+lFTp7wi/BrH+iVgn/L9Nqswg1X9cWfmfKdZvkufviJH/jVL9V4VEI+ZfFxP9Lyrrv40vjfhfyJe+KdX/0fzFv8oEj8e/kC99U6pDwPoP6Cr3PXzpGwnoHyZVfpDqPyDV0b+MLk2+EuLPyDW+84YfsoAkZcj/ScLws9zgJwlLbvzbJynL3yeT0W//NW2JPnyZdvxrEv/TTON7CvWTTON/M5X/XKaxv3RVpKgfH3PcXya5uS/wRDrUX8V8Mjv5xY1oip/8TgnUhx/ms/vSzO/UaPLxhx19/hgyHl89BiP3Q2//Ppjf+aCfi+Y/QCU3oj9BJf07MkR/GZUfFIX+a4ry49H839JfPxrN/BeM58MUzl9FM/9lqulLM/uj8Ur/i/AqfDrvQ/9XqP7D8PpJsPkyqhXovwkbbvwHSY+fBZuvmdevgw3z/bD56Pb7qz4YtB9mzv6C0/0Ob83/GFxOvrBCk7+Jy9EXOVb+y2zMj8bl6Nfj8iPEOG7yudNkub81If3XwPenGKJ+jrEbc3+QFfiroBp/YTXH1E8G1S9ZDfEHoPqRLOxPkcP+JOTwf0KKvhc5whfTswL/k5Ez/vXIebvJz+LTt8f8Ey/5azn/5GtU/tfJ0R9O+r9YwSF86dy+F5ZfLgX56aT/l6Rc/oC9UezfWO31CRa9T3H6R171x6dbvhesPyndwn5hQoW/a0K/vNHPdr7vHO9/S/q+VwKnOV7i/3cXG+PLxfL2cdWy//5wShuArtQ/QTn6lf8bK5KPjFa+4t8YqUFQZ+TVzGIOrcQFbvMMuxsXTC9Pv6NSf7amQqV4GeyRPbY8a7b8K8zDl7k988vNhJzX6nE0pcvgak70PKGOM3FktBN0Rfg8duYzYOdXo9NrUxFfIXu46qnE+O6OtfMJt9rota6IsdmdOSM7M7pi0ssNx5ipHh+mGuVt6Kfn0pfVZm56+8slTMUGXX87KFS6nWr8YT8/mRsd9WM1XV8OVxP+mllU5DaXVcpnwcyszH2V+25TLlOztjKRMRVPOG36Y9f3MSu3XI8+ZM7HY2x/7LprAznOoK8HV2sN99h67vp2cHlqsa0mK3nSHWeX8rDlxu9z1uw8OUwvl+Bqx4cPf693QX6hQjSux711CS9W7bnWZcvwu5BxUn12btF4NNbWe5pZjD7H8LmzlPBpdjocb744Dp8pS+YaE8nAd200/s0tyHdJmNLXMNfOaJyfR1nvRyVgxThk169QprOAaV5hRqU6ksxyezh7Gz0OmEMeMjsKSyUd/9erRDSWx+SLq5r+KnlS+i5/8fbz2YdWKNzYYD7KSEqO0zjGctzqNfRhualRX8WnqRwzYxvSRua0xoajzQ3VWIr6XCpqZSln6Htlbr0WHW+MrYqu4yiLMhsrRfeQ68bs7Ke5VUsrQ+O09UpL8Z5WF5bm1qGMTGUNRUf31mmv8xgjs1t0D7TVKUNRW7NFz0vrFv3NLGWOQW1Cn7nObPG2tdAW7W8ttB+1mUXnoraJ6N6o3ZmOthjH6J46eo6JnhN2xtbr0P3RPR0Gb7dqbbUcY6U1wjlXWymFzrPhXHwMXU+j56NxcKCfT0sxSxP1cemYrYmwYMn9OG3j0sxEuI5Hz6JMGd2rrdE50P9zicYB3Utk3TPVLLeobRm6drtGY2ujrVOaMsWj8YAxhueiNqLxUMTKhDFvcbvQeKFzFTTeLZYPGvMYnYPu1XIcaj9Fxi0uUX8o9LlBbWAsfJ7dwHlIJi2SSW0guVmKXpkuah8aTzQ+6PNONzOb8VoKrqPMFo9DZWYq9LlC92wtmerQ+eieFBpXkbcRWkHOJsIKugY9Q0V2CD+jRftp1P9P++oHG4q1oE3ouOkinUZjhs5H+oLv2+L9Kcej52A9Qn3okHxZ1D40diqH+oBk5tHoMxqrEPXVYY3MbMlnEY3FucZyg3MUtD8zMRasrY3wh/rRoXHuANPncgmy6xAuNxhHCB8gaw+2HcaMgrAJ90J9QtgmGMhMkAcH4w1tRuMD7ajQeKB7xRWRCWojxjDcCz0jQ+OcmWiLcQN9a7EuKeZzuUXjuqnR+eg8RS1R3xGu4fgZ5AXtrdA4IPlA23V0LofGD+FHBtmEnLE1iWzx82Ac0N+ZCftR29aJCTJDY4jbBHoBsod7gc7CvRizWoKuy9AXryN41xuyhfsgnODnIR+wqRs8DlsVPTtGsgBdUCsTjSEeY8WrQLfNbp6YW5vow3uLMYfHmkL9Q+N6zFC/KPJcnYNxQucCxpAO1qCz0JaW6BJgEx+rEa5A3nzfPtQnFWH+TJHPNh5TqzPheeg8m0G6VGMbtMH6gtrgPJf4WdA2JM+UAszS+F5bB+FEBV1isDwVG2MD2bsOt0eJwY6BDGhLNbFdswBHW7BlJnqe1xD8iTxgyMIYqTmEc4QbFcaZAlvxcQwx1kDXefjb2yIsA57fcoJjmYNkEKM+zTOEDaTjgA3UL8UmOrlBthD0fgt2OJFMkBfC+odtP+7IboFNqu1OpZC+cWhMQN94kCnyZWhM0Hh06Dlb0BeVjLFiIzns/GDrIfmAHQ0ZD52zxPYMznPQM88l1tkNtvW9DoIssF6DXW7weIKNlN/YR2Ol2GgcDgpqJ2qfDvYX8Qxkuzu4J9hRlfgTJBs0btDO1txq2L9gm7ShwE5wYLfM7Q7ZUGQbMo8neoKw3YXQNhifFnQF7B7S15b4Fxv8N9IBB8uS4NpjsQ1EuMD24GxyoMtm5gG2UVtj0EfoQ4P9Etg25JdMxKdM5ZKZICOMJcCBCvqHbY0FmEI6jPqMbJXeYJ1X0D2hfZ1XgS1Fz6fAxlmofSb0NYX22Ngmw/ii8euwj2hrBvc95djlHuk6yKFDGO7OYNc5E3QZdBrZXMAFYAXJhcG2GOzAhqLgs0Vsc2+/UFs32LcwWAcyD+5Bo+t5jHuZjJ8B4wBjuQHd1XnSDxP8EAu+GK5DzwDcf7Qd2H4ijIJNgzaB3wDfkuI+dtCvJdiWDdYLGvtLxC0+6DG6FnBkQZtbbMMobCeUBNkXjyL2X2XA71sY51hvwFeySDY8jGG4dUDmFLEVZwq3fWuiZ6iN14Ed9MAOYr8DfUBtoyyMh5jGYy6Dz/V6W6TCudBm4CWgu2gMzghrztt2o7EGH444DvjPDe4bcJWWcAmdtKmFPr5trYp1t7fzLMgQ9ZUiPg5sG/j3kPj8DDDrEW7TnaGtgFF0L5ED/2ECh8BcHXwz9kHYXmH+kIKsEfdRwBbpn8gN6TjqJ5IlYAgwA5hoiA/0yBgQHgU2E7BRY3zJgH8K7CrcB3CGxo1qsc1DeoP9N7IzcC7RB2Q7M9w+uveN2L+hfahf6PlYj23cVhgz0B+seyl8xj6CDTudcLoN5jq9HUHndYcMPaN7+2fgWMRnnAEPpN9gR4kfh76Dj+mwPW4B90gWmEeAXTbJOGbAu9A5G6yf6HoTzgF+RhMuAPfvx3sD42jSBLPIZ+C2Ib+FfaSJebJJdADzXGJ/zuAzsP8nvBF8Pfq7g3EKMV9CtqVC8RGxyS1wKcS7kR6b2J+DnoGPd7DfAl20MKc7g751gD2Ma7LlwfYRe499OZIhBbasRRy2Ax9t4faI0C8eXc8iHeCwDQFb0Z3BZyL+BDgBW+vAGMFzWowBGeww6AfoFthhvefwKmAcyQPx/I/yRfcGeWIfDjwM7yM2AflcsHdgE1vqSXxlTHDSgj4Dv/EwFpENrbD/AVsKnCytkX0EXXXAJnzkSuDHM60DXFmYa4jAIdG42xXu/xRsHsIL9mVqSWwR1fR8msU8DHNPGGfwuRiTDcGzXpKY482XcRuBi5SEw4ENdIifQWOE/XRmgtwqbIfBX+A+QT9CrAvgB62p2ix7P9BviW3DOMM+i/VAF7Adgf46/d8h5lHW9lzhGJRRwQ/U2K+SbW8jsX3A479UkqzXR57wPsCUDTEIstMUcAVs600cE0HMBDKDse9xicYCMAacnuA75r3tGfCJrntvsV1uCM9xmN628B72xzGP5UV4FXkW+BIZ2xG2jwPL3qZ3JBZzcLxkERvG9JzjiWNRfI7TGWBDEH8DbIFMsa3e6sQWYo6OYivw9x3heyH0YdPzoRTbhY74O4R/0HFXBX3pwBb0214mFPgODnyMh/0pkgeJZ4AHwFiTGAxkCJiBWBjHkRAvhti+WdinAr8Ef4DsVBaj9uJYoQGOjbCB241sKQd6E7Z9fAp6sVXf/B3zEMx/FGzzG8K1MAdmiP2HeDdsSKyHbA6MP2AHYlNiqxmMAfRM0HfYh/054eJorED3PILnFPOgCnNvaMfmzUtNjD/sJ/cmiWtJvIhjLqxXDPAGwErIEB+HdRaeA7Em4eWkzxDrEH+VYt5SY/sMbd8QvtRzgN7HeMQPZzAuIYlXW8wzWBSNM6aMuTKKR884hsfxWCY2JMYLqZ6P80vXBL/dEN9vgyw4jDHgcsB3wdeCfW5xfN313KTD/O7NXTY1iWeJn6IJlw/xOCIMoOc6wEErbNcRZntfyoDuW71f77dvXsMSOwX3QTYYyxF0HY8NS2wW4CiGZ0DcyiGuzwOXwRwUx8EQm0B/VJrYYTQWIH9s+23QNcbNKeyDEIdGcY2KeQF6NhrPI8FWF1Y4RwH8FtlQ7BfBfnUq4biA8a1X9W1lMJ43OH5viS0neO7zLRyxf2APwne8je065siAhxY4SQ1+D+T9xJwc9Rf0FNteGGfQWwbhSUG9fHP4lvA4zB9ILEiT+4ON8wiPSLGMSdycEZsGfK63ZzRwU3xs8467gBOqPSfUe592ILmT3rcRzo45Rrt0VZIfADwQLkJiMeDvGxLjex341Lgi+Qrwa5jf47jJwvku6knyNzAmWH+A3yJbBb4EuBvYNY2MD7Q7JZwLbBax5SHmuSbwR+AFyLYRPgX6gvHXmWezAUwvwZ6DX8Y6rb7zayjGORN7sbXfvA14Ub3c3SwUq9UkTnNob2tyGIvArXDuwvsY92ce/gw8z0KcA9tqkA/mtBB7AvfwgBeDPeGQXoGdpq1dkepTq/T2VrfazEkeeKO/VllTe/t1oU/tiQ75tC9w7EEcifgZzjFswC97JH7HeUeIj8EGhmXvFyniO+M+dhYx9+zjdzy2yz/SS2y7RRLHZzgOrT88C3N2s89DiuCjMZZJDEZ4kyVjfgz5rAb7QuCMOKejY/+EYosKc28UewA3X+LcMkViH8gXIF6MfR2Oiz2SKwG7gjmmg3MEwDNx/IBtT9hzJ/C5OE6he26I/FBN7kdw02FbgW0A5twd9ieKSfpF4kyG5L9i8F1cbwcgdq96Gw58GMdXJKZ1IAZ6YlkpWLc5izUxNyb6in0U5gNmdyF50Q5zLHg2/+Y3S4JzBufN4FlwH+A/pM9Y90i7Q8xRSXxPYjcSi6k0Oe4QXg28syN6SY7rgIue45sl8a8IJ/t3rq3HGvbJFPbJhMuATQBdtvsYGPEQJew5vojtUm/va5Lv0qs+1qpJrldKMD+Atry377gWjSOOV2aY0+HYEuxAb/8gLu95CsS2eu+XzXf8h3MAxGeYT9xfYmdqYoeIHSG8EfNInuQvoC8OtpdofCqSJw5xnGKRPHmDc11bzLN4whEhxrTJ3AD436nK97nq9/btzxqcZ0hxPxjkc6lelhDL82QMSVyK45MM57re+tfnDSEHAljFvLUEm0Js21wiscG5/LB9216SO6OxjSAywzoDuLF6vocxiH0r5LYIj/4mH8d5EafEfJxw8T7uDDEGez4M/q8luuVQfS6z1ycc3xA/gmI+EiNQJEbAsbFaYV6dkvwwanufZ8V5no7YDsCY98T4x7xRJbEotufQX8AdznFgDkzuD5jDvIFH9wBfxoWdSXRgg7ktyBFyOsh3YD1tejvYveXV+5ga55haCufaSA7wHdOAnuPccR/DqdgvEpzrmAdjbgg5IJLD4Emeo5/LkHHeEHINYF9YbF9I/MH0sT5D4muIzbCPpUjOHuZY0Bgo4AfgWTDXE7KYd2DbDraF5H6xf8K8CGx1SHQO8bslzlPWJB7Az0fXdb2McO6/hrgI/CiF82LANzF/MCFmpbA9k3EuBOwNYxIb2/Tci9jfFM/P0MTeOjg3AnYO6yPxDx3RLcKzSA6VYoiN86o+v1u9daHnq13Px0ieAtv+M+bLOG+wwf6jJnYT8k99zoPMMbV4vgXrBJ6LIj5gQ+I74B8mwXVHuJDJYi7e9fynz4UsydxGH8chzkh4JkfiXpWMM44hwjfPgefyJF7GOZqea5+xfyZzhzAHZSP9dohNx/NhmP/QhA/HSO5kTPr5pJr0Q8dxIfbRZEvyXUpvVzbYfhJejvkDjsfARnEfroV5y5bkmbE+YS7t9fMwmIsyfQ6kI9epOJ6HOKTPs9Ak93sm+WDgrKxZk1xUTMaug/kx6K+VkbxX2ONTJblTyDO3JKdD4mWYx4R5npAmcS3olUrGnOg6R/jhuiHzQdgX9PNCfQ5U6XOg2w/50wbyO1b3Yc6gsrAPwPlgCuclsW4Tu4zzph98Nvi9kOCMzF/1ud666XNOfa637nO9VJ/rrXvfA3NGOP/I9Pmqfi4O2otzyARb2FeBDQUdsEnsRDgL1hMsU+xjKOw7cT4T9A7GHo8RnlOrDcjVpNhvYt6Ac5cITz0v63PjeE6A5O1wDOcB36pI3griYwv0pzRILEzsOuYsKhkjjGPAvM4S+6MTjrmFORX8uSbz0nCeg3WJzKXhPG5F5iOc3g7jnBLXy7uCOQfEJfB8GOS88HwFyfVTBKc4Lw9504bwRJAXmVskuXyMbY7Mm+kf5iMsfB7SMxwf4dwOZV7V5oPvILEXR/IOu8zEcsN5ArrnIQ2ZK4P+Yx/Lk/GAnBuy1zBHgvPXJo7PCcfB+QGQEwU2DuaTIV8CWLHAXkBsQDALNp+yen/xVdx6Rv1H+FtifZhnGI+Yv4ANhTUGdj8Pq5L5e5xDJ3mSj/MtKpn/wzEU5A2JTe7n/st3rrvPo7Ak3wuyIvmnPr9EfchNg99o++txXhj7UbrPd/RrH8AnwdgTzrDEPl0nORqIjTYQn+qk/cDPWuB+2F/SBuYreE6l7dtP4jmHcESL5P1g++Zcn44RzMVzYPeR38Vz6RCL4hgb9XUp93Nr2D+qZP6V5DP6XMuHuKAlnAvH+S3hgGYbktxeS/JRMeYxkKOycvOtUw3JE/f5J+Iz2fe8d8/dWWgH+E7ghYQn4FxUv6bDxnpgEnvfkDlcHefXLPm9XkIn83pZ0ud+ITeD9R/nM732HZtgjBJMYfnhdSlonBBaSEyHuQfOIYFdA1+D82Hga2EuX8V2A58DMrncKpLvg/khsV/nYRLeA/PkbU3yaC34b2iLiOO4JeaEfd5Y6XNhGYlB+tiRInNV55boBOgqGiu57teyJKi9+lvHuz7vjONsE/wMmfvCeb2lTLCE5xARHvRUyoKp1sF6KI9pwDelRseNw6lG+bJ0RvG39VUOwAVfBOt2TDL3h3MpMcm1YP+J8+UU4Sd1LwPQMdzPNsTxM+Z3DOaJZF68xDau0zKSSyc8E8uD5OQ4om+YH3OEnyDbiOcFIA+l9nNnkNPeSeCnlngOx+Y/nVuHmAjnKXKYM9L7PDTEdXXPRXQyZwV6iWO/M8l/t+91Njj/k+H5SXx/jydxHeaQLeZ3WM8djthgE9s+C8sL58t7Lt3P62yIDAOw0VvgAngsGjK3DzF9TDgYXlPhVWTeBq+/ILkbks8i8z44Vwz2TsR2rM+pVXi+nsyF9zZM7ecae36c6SR2S8k8HzlHJHGrjHWFxn4N+SQ31WOEkyRkYZ2hVYLOv1epfbqaTIVVW52pqiTie0d+H2dsG5wdAuaMrJdHNILqZ4fIzDZoMl5pliTIUkIG9gNLeWf8YAbdzPFqFgpndLDnwZmJBmsKMEWFrLQglqrm35lMnJHdkqwIWX0gviNkggoSifWMnWTc+9kY7F3NdyYlhSwFoAxniskMFbISZJXBR0ZNskZkdUqf+SWRMfYETj/Doj8JqrD3p/qsHF7lYnfAClSmRzHMngHSOuxJUbRIVt1BG9b9Shy7/LAl4171VhCvxMF9UAhKTJzRACts1n0GqO5XnDDvbKhJGHLv2cCqOGU/ywKrMBr7PXtLVh+9Z68Yk1jb1txKGY7kyYoezG4h09iv/qHM3hKC5pKVLphlUH20ShEPYRKPjGUSkihawZ6EyATYqAwZA6qfQcczTB2e1QMmIpMMq4VXI4F118kKqH4m2cIzaYRR4plJ8NxbHH0wJlnFQGG2SmZwcJYQmC9ZPfFhi5nZm/X2mRtYKQeZFw5fg2coY8Lq8EwSZABhdQOexeb7aKjuM+RNzxrJ7AleDQcrCOA8yD7ASgdgWjBjcUSeAFtjjsxgOJhBfbn6B8+4dpK/3X6SdXXVjxncLm7Js/pZPAWsPXwGD7RTkO7weKYLZ/hhpZCJI8z3SpOlImJWZW136BkqZE9aMssJEUWfDcORG7bCVW8h8cwZbjPuP6zEeGd9VJ7I3+u9SIxXQwHDWr5XfZGMWp/Bw96GIavMRIxTkFOgQHtNEok7cWsphz57a+MZDYgEAHNkVgUiHJOsKsRW+KKY0C6cSQE9gf6fefIMkImDZ5I+9FX7Rha7/SqL/Sk7S3Ck2esk3r4z6SmOnHCWjqxgccjqLRgLMlMKkSLVr0Ij0TOJviiS2bNJZqslWASWarUfVgxQPQZbEnk5ZIVFBkz63OGZEJLhwezQwjOeZp896LOB76g2rT9mEjNi07CstmQ1KXouS6KP8D1D3sGMFdgWvHIUs1URrz7BY6Lg2TrI0mMd+Ib3ab7pfT7nZTlex0qRuVHQHJxP4si8nAlrbLAHwRoP68gg5iS54gr7V8xr8Uh0fa60JfwS83cOr1tQ8NqD1rzi3CTJqZG1uSQ/mCXAGRoyd43XuJG2fOB8McQSJZmD6tcOkdw1/V43gWOTFsfWbc+fsH9/rzkBTmjiHADSyOzQkRgOc9+3D0deBPFDmCeCNXYY6YQL4rghg7kwrNXAy8A7QjzMfPeof8ku0fOgteDvoNcxWS2DRwCzJZpEKSZmdSRqgpnEM8ngw0zhVu9X0ul49QVkv5aE5XPE5uHIm0K9asxPe4NnfFS6n6XBK0Gwf4R74B5iRvzf8AT1BHmI/vpr1T7f/3U69JelXKzwO8d+8v0jfb3PJ9+u8y6o+fTLdTj+93dJ/T//vXXf8WVYv7yC5naNf2PkdCct1zW1mMaFiH6sjZOoToz+OoTov6krix7aytSyHMEJUjK9KDYtzW3KjMPZ/HXIL+XBFsXFXJuct7ReU2LY7Cbbm+PelcRbh2FiXze1o+1nziLZJPb9vHOvTv082Uae2efFZrSM85dUe6hJ2r1YGuWIWz4KHcp7xFk6Wm7OI1lHQ66l41ulZw1nxCcBHeQmQsVJ9n0l7wzzId1T/+Db23A7DdzGUR51EqszStdTr3H4iXE7GPVMQ3dhx6sZbZT37mWiT5Pl/r57mWfxSsXjlt0ia8TF12pEHU1BaKaae062KaB53sQtr6MrzIRmIoftzu5MOnAHFHncokivy11WekauuOHFTv1I17n73QlG9nF3TV7m7vA4rr0RL4wTfzquHoZ5PBTZZiquN1QiJXrtt/dMtG9rN1vbW/94X4GE0tlsjx692CsLvs2Vs6uKu0W6nSH4aNzsJZS0zJvb4KIXynWD9jGrYLaQFE863ADaGkKhVD3ZXI6QMmrSfURVD/SHL07RYEsbH/3HVmG8poyxtdda1tqfV0+rlpIbT7UHMWydyXYhbcRJtxrdkroOxRf1MPJJdTo06NpYOt8Xr7Oyl2pZr3eL/evITVipq0A6q+PqLtTHPJ0tUseORA3QJR7SDXVUxdVYRO2ooR+FwVzPNRy71Bvt0gEGafRZOYaSnaZBhK87UhuH1lAvJG+mxdxCHCErdAFkMvuNs5Z2s8wXjvTRyU1ejQ92YC6zydh0GZELHKu2Sgkp/Iqt3amyEpvtcXfca1Pttdn6+2UapXp6SA/N7UCN6VW0mqBmheK/8UfbdKa9rZPwdKX4E9goRjsiT1anqkzbpharSh27vqQ/RTFS5a0312pTPFUFGqk5ZayXjX+UQXJhrYnonHgvirYRf9jqWv3eyqpco62QLmghEkNxJkmmrolLVRb7bTwTJbKVpYXVbBS0X5L0j9firSJLNmylR7gqz6i9SwGws9+u0d/IYWhVu5A60VGu4mMqaBVlyNKxUNNaljIUT9o+ejZua61H0TR3ntWove9DmZpytj6nX0E27majpO2EesK0xgLdO4cuqvIy0Y9KHO8l8861r1Ht8sHFN/S1kUr2UXF0jVqKtpS3BTutpCfq19lhY1aUis28QIMQ7ySxlaWIewY5/9zkl2AegTZ2Yaa7tm3cWzocT9z5zL7bZdDol1p38ribKZK8DpPQFZboZPCInqZvrFBSX62Yak6dZIz19Az6JgVV4RqeefZNda4hUUqWlx/me6EY+4JsOkGobk4sn7vqyS328kEwkdJIj6tgdzsoLZXm7cspC0bQbvJTUSZ1cFwrpk+9LOTAtORQCj6Vc5ysjEeaP35tTzO3unlUHe/pbOxp3rwEA3ApVhstdKGx8XHKalGil/L8GF+TEWODpMK0FMfidsXl6UOox7w6eSLkScKoFmo+YCd5qF8q9MDZDf13YP0X+8o6Kc9iV005+bC8ZMupd3K5g34p1GPgcPOLFURTfXap6W3LrdNcF0ZhuB2rYLH49Nj5zUO+MYZw30tCziDDoDAJyNYMaxadM/VNhB0p4SxPz5fZY/ZU04NURL6lGnufktX0Eq6TKxi67qbexgtVbszZ1trKyAzwaPeoFO6BJh+yVTk9R9xWCV/GYcKAcE35iE7YylRkCNXrPo7HvLWIdkwUwP303UaV+VzIF+Nll2fanTolUxk97ZZeq6P3rAtXXEfbm370jTmLPMIuOEWn7WrGIgq93id2tTj6Nz8193fOlcYqv3JecUM7B3g0B1J70IBUp4udfThNQ0rdn0dWgQYxfIgTyrjYPKPdwT4ZBjgvWuSORj5yD5TxvCXn52RqXr0XHYcjbWTzMus9aH2HbANl7oQlZV+bpRQUlK5xh1a+0vEieeS2Gj72SD9zsbSN0i5nVz+ji0xU21HsmTW94s+n9uxyupZ4baCXjzaPLtSCK4x425UXfx3EdXOeParJbFZGh1BeB4mYhLuHd1Fut3U7Re0UOsa2470hdrPzs9vTDGLrUbJC6NJkydlPEIncbIqcY7PATcDZyiJXmshkn0F/2qph8lxg70mZQeHvJXf4ZZ7QRi6zftk+3J3wZClE0GNXm1un5er6unopE6PRs728RvoeF9YWNNe+JNbCdQRBuYlPXUnVq2nuQ0t8LkRObpLsgG7Pb5+ZoyYyVRsv8d4uk9CZSuM5szovWy2cLk72QoHzxhpXJ2LFRA8+mwQaMAOaq8fIKGxkYaNQ+UTjW/4xNosnskHnZBM4gsJ6DXM1DGW9jTFXdlv/UVfS4tLojLAr0T0wKWHGsxXa7Nhj4x1rZ7/eaPzuAcg0J80TA7sJJ651f8mOmu/VLKic7akKimMl5O1q0lZl6kVXgVnshbjenST1WYGv3Xaz6LSyQFk7tbDiqXCJwGaO3VU5v0gpZVCzs0DtViLFrBNAmH+abJ6hMDpJ3pN1kPZJJ243vVojtdWQx7GKLDl26mNvHnzAsBARn1QxhcLdR6E53hnS6LjhGdm7Hm+UAB7c4thlst9c5XU3fl6m1yZBcat8Xoql54vLqcwhGvncwgAgOyRle2lk69NmcUhoAdlRX2tCSW/5A4PsjZQeDHftvrJF9KzHwnKyjMKDVsn8XDnlvjfdXt3O46TJc3VgaVN+yMlO1g4v0DT79OD3F4ycOBRVyas5ZNNlbrpHelA0PmIf/JitjyMq4hyVYaBntZYZUxzZaFpST0bgLbUsQ8O7XexLhQ3XKfDUw4w95MV4FsrJwT2splbY3mOII/ImAjEwvAd3Wb5Mf4S2M32KbMcyOcoa5WjFmPPkPFdlNqDEMvCWG1+V23SD6Kcqc/cLal0SFwu1cjx5/7ishMWCoh9XWmgC1dly15HWxMWcYYA/I+063NVEFyXOnlv1eSaCrz5xe7l6IkepbcYKxWnnPefpiq8rr1M2n+paoz+e1esWnAprhsxwrSNPa1y1sHq++H1kxrbaKbcs3itSfOJncdQc5rOnaeNxkUQqdOxEDwt3O7F9JcqpHeqjliG/Gh/m6UOfKMcFskBU7Mxfq8mkzF97DwVRQYx2PMrRpdwzZpaNV9VV7Txkqez5M7mr8mjqOI/AKJNakx+Fn4iv3UlplPGLTnWnVTNfkp6WdJb4F/iszrlf7kjRT4v0johCrR4lYbWmgATXx+LO5eFETPd6QE8MZcLRuZaUyNv7krZjVop+aXV+srnIukSvz/vIsL1Kvsspc48e4HhC2a63ZrWOrkn44PZbVpbChZppMMrGzj77BWZElJHT9GkvN45tXMTkoHNT/dDxbF5MmKVtR2EIqj5Dv5GaSiEywbGnA+9wVMWca6WOiJadyetDLNZnn9NVMc4EMDPC4jEOJUFeu7LtbU7PW6NRNrqi0BSemzNNOeMfCcNUcvJI9Iudr7YA2NPxcWRXdnGQylUcFwduM/fKZ8sI/OESuczo9DD3nRloSDekMy0CY9FLJTAPvHkXcn+nRIbnK+lqdXVREB0K2iNvD2pZjvbLrXRCZjKcLSvuxRziUbmoZq7arYKJ1B2lzZ6ro8oRDQ2xKhGqKdaBqtWoSfNJ8DpOXjOeN+rGZmeZvYsX7aEwxuO5zL62B0mK1k2bx+WYnd8igJg2y5Yt0iO2eJ1H7qwDcaBfLX/wRceNVsUcEZLyFE73E8U4MRbFzBnLeBhqIDzk6bPgDo3D0MokarrH6TIKzBttNJRUPHfV3XUbFAg1baaMJ4H5gFtPT507n0j3HV1E7YhKlxGLjJl1nCWrXHAgYt0jt1ZcZ6N8wgbuDjwzvXJE5uR26a3VwydHna7GsS5OE3XK8Vegp9Eymlwbjtnm08q1NtYp4gGz5nNsMMHYWIx4UwvWoxVzeFU3e2LxZScl+TYr5xrtQWhldkle7KrJc3GNqCV3s/YMNztM9EnwXFbS+KLNkTGsJRQJCBTXSMfJTkG67O+fQH6WSoy0OThYUre65RMrn80Ubr95hara5AH9GK2FNuTb+1GmZ08FUdID0r/lWd7R45nJ86kmj+r46e2zq/MqTsLzuAi9xC4XTHfTHsen5tGsAYZ59nKLqvZc/bDlwJcp88NS3+0Wo/1jfIxvPE0jrZ5YjNFRSnO2RvtmL1Tw+lLtYYbjkYT8S21MI5WiJMT8OWdxLzKLc4LX6EjN8zw2A/05lYV0v3s8gmQpAjN0r95RHusaiqLTs3RCIcRpM3tsUJRcG+3MKqdmixRtJo6mYr7wlVyNNLktF2efvry6WTFNR7z2nCrX6/RxfY5RwxELpx6CVLyQJarX5osydiP/2l0Xx8kjzkF3k1K+TUYqpInsh2syp+UF7E/MZf7rZC+1+0lB7m0EO9WFxlevzqORtUvtVa74oMyPZP5iuVXHnWyupuvSseV58hT9JdjvdQcBHgTrQhUdJLqdh8WsSvKRhuF5HAm1uTVQ+EYlQrm8ZU+Oniyea6ecTLeb49p4aK/7KfFPF57PaONORV5beZRxtsdP6XWSmVyAkEJIQ8QtpIKqVH5shLOwLSEcOJnNDhCOIpv44KvBLUguipRJZsZcm51ol7pgP13xeg0M8RBdIkkskfPZ6M4Urq5cDoyZdxUoZkW7Joy7Z7SVinaPGxsZDJ7T6ca2EZF/IGP5yqUmNIS1kMf+/RQf4HLxfNibS4p7QXyx428OisdO98VoV+bAwMLwEIjtaUu9XE7jPOMIvVily5cFXzUkTeVYndZIcLYpxrtELPVpiAU13nVGmugnGcUzPM2vl5FsxH4gsvaz09yiffH1oRVVuSv2lInixKPMi8jGU4Yj1rEjojvhLWcv9qGC6J4UFY/DuvWbtfhUpV07fk2y+nHMWGeBdPC+nFDjlYsMnnR9PU7bLbWZQTvr3nbRrHxb6uPRjKJGVv3aaaLD0ikf7zPEqk07SOwHD+e6wnoj6Jj3v5TgkrsAi7SRnjM5NLXNfSJbaubR0MHnhaVzDnpuiMvZ9vV6VcKFPymj6Wz/zLntYR0+hRhSQWOBbV+b0L53cN222FsyDexmyyRCdXF3nMWZj2MejbQNitO3YiofTpItIs2rM1tC/ibdzyFR587bhR9NqfbSyjhen8ut317R+FJSgChMdlHY+uaGWS7THopYxOkcXYWQKKqct63jnctIKFgasbeVJD63RppVm5f72khhrF9vsSHF06l4HbEba+msruJFWjibMcLXNhK3yzOiubQKTEquFfhaIM3Vrppy6zbxK7FfxXE8A+5cJ6FtnKV0W5TI92f6IlB3y607DWX1cRsj2YnySJtQq7pAdN2mEAuU7bWQiMx4cS5UN2hGFH+TXSMJqzHPH5y1ex1H2w0wKlmTPa3d1vKvThD9wY86UcZZowKHbW2+8HG+TZqvHV59nOdxHENKuk9T/4iXVUw+S9jz7+/o+zRDz3ydoefp38fj/3uGvk5DfnbId6d8/dCcV+B63PzDl4Z/lqEfXfBrbAv8ZVzl+23Zo/sT3iIt0R//RIdPfvj5Ybl4PtIIKRVlRfWnp47ifovvHnw2C/A+C575H/JEZJEpJISGXPPFXTbsh/s8vrwzGofgG/tId967v5iEQAKsPv/KuK9e//vltEOeHo/k2+ki1GA/wLeCeYcbfB8ZlhMv/cYrcK9nVZQf3/Lw7fcAf/3qHGhx/5V2LNN/1vw8vQB+tn5S5P5vX71AWMM/H/r4FTi/F9d/CGJ+9OWLgPkP73z+9A1J35hq+vIrMP85FH/rHXX/GhTT9B+gOHw8r2HS/udWlFWMYDQgGCNYhH8/EMH06IuJ0+99E9SPw++3vmnw34/fWyz54XkdldWA3J9ie796kS//NXK/9SLfHwfc73iH2f/qBL/7fRP81Waxe0/wz1AUdNfTbHdXdc25V9TirrT+7kQt7GSz1xpNqw3lypW7hA6vsx0dSo5a22N73YbX9Tq8jfeTeeG4pjQ+TaLZ6RWewqW6vF1ut+LIcfLddbz7LDE7Zuc0Y72AoDXRZI3yLofnrWtdW74+V49ZBulU9rzjL6+IX1HR65yZB05azg/85emiWKqxlsGl5Plrl5npbS6fVG58qEQthbwebzxe4+ZxjUtT7GrKG1nOOJwnnhFsWtbclP7ipTqhs15bYbE3U+5xNULuji507v5ktH11skfRvvt0+E6eqaUXX+bpXFqv7cvF1bPN/aal+nkv4DQWTHwvrdX5XOa7Ojdn4rOw9f0ZkjszPLkfsYcr39ZnatEUylKbvCCgFT35ScnZoXqdGiE4LsXZuBvLO/o1349KxYSQKqcviceLZyOozR294VdsbJZHbynNL3U3P59qKtnXEaShpSwYF/IpOIod/whc1jzd6omYP24LVhRnyVjS48DYr47CkT0KwYuJIF1xosTdmm5zyBps+QsFiJp652q2hr9mugiRetnpLgef56GaHEIymY8+snvR0YucjfG5R/Vire0TCvu9ldSF7BJduTjAkZ16Ue3dmmOe7NFzheCZLERnpzG7W/BMxzhBmbdlvIBpGO6ua+4jYhb6Oe3oq1iPnx51P66vx+Ymxj87zvo5P7FKnXUOVlCkXMIhO7poT466Z7zM02yeZ17h2GlN/yW8xCMkQjbLQ3FWx1Nb7VTpNdoex2tkkMPgqN5u1/thHkrifrnUzfV4C6tIAJ+3tl7o8iSxE/HQiDt1PV2Le0l018V+B/ktbpezgdAc1pCOyPKJujYFHnkH8QTJvoW1T/yuuij6+Pii1YOIbisdm2Q6wbg5mI9xaWtUcJ0cTrouc/Ei1ZUje2ZbZsYuc3Ud5vWTkw/XtJ4G8tgAGx1C8r4a3+vnRrwzU93WGtGNXa2kc515ueG8Zc+RaNQ55Ep8ObB3gbJcyKUs1qlY6nK9UJ6yiD52afwQY3O2XVaU5TX7Sdd4poIC4+J0Fcbpvg4UPTbqwhB1eUS1o3ITo2fZu9iemrbmOVqsSpEqeaoUOhr6P1MlV5M8JT68qM1cahQxs61AFrw9GrjEdkXvbB1T9OjIldR9scxn6nW8kdXxgdEe3sgM0skhsxo7FkvjEvvjGROXlCbOS6NK/PrlSp0YKnl8pzSmLhk85VRLPpK9JcIcjsqsmik3cb3OdkT6UfiiK2V1FMjzLBJz2likXF6Ps84CzbTO03h5EttVPHuNo7ilNNSHQpW2quRr0g7hQJUu6H9NOkjiDg3w2t7ZMNIHD8bARc8Obe2M+x3aUw2PARoPn+HAJBhl9LQM/IW9W2eOzokK5IckBY4FrhNm6tzzZ53JeOpMVlP2frxFGToxnOV1fD1fl0eOktTVFBZb0KN1tp666SwBLT83aTiaSoav3Jsguyphat3MZ33TEy/QXml8F8PoohqZOVn75kaz/UU7qZ3KXJwmwuL2Wmz0zIhr+7JFHlufNZGXm09F8nYrnyvMvca3ds2VhuitVc9bXxoHDHHzKorSUNnJqW0vr1VGMcJrfn06jzrhl4HMU8fZ9HxZHm55bpynJzu88MtocwqZvK6ur9Fl2qwMGqa/j0/FNFL3PA/3YrN7HRHcFk9mMz1uPNTWKzu53usTpFovvrdiIKbUZlrrNb43T3gqM0WtRHANu8W+Fnx3mpmCIIWZC5MfW4RbtQknZ5+9wnIT+hluwq2G3T8D9nFJjZxuxIQbvHjKDR+jg2hsd/JL7oyEumwEp/ZjPyou+WR0zQI25couDWnN1QPePVJ6Yu6UKojF63IqHTpVC0/qWWBOU562jl2VTzr2UdhPIS0WNHfcWEtm3O3y03ZBR15qTEdxpBszvlhwjCYs1R1/pYqlAUgNI4095flG2QZJJzgr4R7oh8szUaOGgUkuyT+wD7wkZsvTh+clP/pHvfP2yDeJCILiXkQwFUX70qw6ZRRyZy5GCot+l7r8WPoPxiN/u7BsY9VuxCc13zjnl7ngSv1lUftX05bV6uyJyPpA6g8vhzJiZGIivLikWihKa4pJnLNq1M0ENi1PdEHV3W3FInsaClNenLbxZaY30n5247eYSIoLiovYKthONSU4yvok3dONFqUNNbHZZWlSVmnEiqyn3NLyZE6cZaJN12HwMDuxkffb1zWfiklHCyYTP5fwXdMFc5wmEccc8lNpJWFoZRZyxnnEatFLRmYyEm9hGdb8vH3dDtvX4bkyo3W81mpY3MNobNHdXh2doQ+vZleLnHGT91qtx8tqvp9JEtrqaJvEy3IW7qfSVW3U0+pGhUdGl9uRw8CV2xWfr0aMq3ZjU9++ptSM4cSltlpeQgpRuxG3QuZcm0hRWApLNCB11y1kWOCQXXaed926p7kbLHhkbq16G1Vg265ZPAm2t1l3Myzp2HmdNkH24hRys6Tkn8Fyf+ensYnoahKKjYjwYqu93MH+wa+iSrbUiPDrSCGGA/wiGy3OVAl+4URkofD15B4fcUOu7+8B16v9PeD6/h7v69/3+Fe2YSQ9boeNsQjsahrO+LW68cTz6vTU5q0gLBBGpa4+eXKVBOraXZvIl0u/mr/85R9d3p1iSUufc8crt6sx3vlH6esfEH3SNLy67LMA9BupE174/Rtv5kV7v3wf5j8XhX5rmfm/P30ypP9+SfqP/eJFcdz7vr8u/fcdb5f+5VmU4UUjw4tGhheNDC8aGV40MrxoZHjRyPCikeFFI8OLRoYXjQwvGhleNDK8aGR40cjwopHhRSPDi0aGF40MLxoZXjQyvGhkeNHI8KKR4UUjw4tGhheNDC8aGV40MrxoZHjRyPCikeFFI8OLRoYXjQwvGhleNDK8aGR40cjwopHhRSPDi0aGF40MLxoZXjQyvGhkeNHI8KKR4UUj/8yLRn5AwQz3jReN/NSXi3y7amY0VM0MVTND1cxQNTNUzQxVM0PVzFA1M1TNDFUzQ9XMUDUzVM0MVTND1cxQNTNUzQxVM0PVzFA1M1TNDFUzQ9XMUDUzVM0MVTND1cxQNTNUzQxVM0PVzFA1M1TNDFUzQ9XMUDUzVM0MVTND1cxQNTNUzQxVM0PVzFA1M1TNDFUzQ9XMUDUzVM0MVTP/WNUMD1Uz7OTjz68voBGGApqhgGYooBkKaIYCmqGAZiigGQpohgKaoYBmKKAZCmiGApqhgGYooBkKaIYCmqGAZiigGQpohgKaoYBmKKAZCmiGApqhgGYooBkKaIYCmqGAZiigGQpohgKaoYBmKKAZCmiGApqhgGYooBkKaIYCmqGAZiigGQpohgKaoYBmKKAZCmiGApqhgOZvF9CMeP7r1858WkDzL3gFzXiooBkqaIYKmqGCZqigGSpohgqaoYJmqKAZKmiGCpqhgmaooBkqaIYKmqGCZqigGSpohgqaoYJmqKAZKmiGCpqhgmaooBkqaIYKmqGCZqigGSpohgqaoYJmqKAZKmiGCpqhgmaooBkqaIYKmqGCZqigGSpohgqaoYJmqKAZKmiGCpqhgmaooBkqaP6xChr8Cpp/QdXMZKiaGapmhqqZoWpmqJoZqmaGqpmhamaomhmqZoaqmaFqZqiaGapmhqqZoWpmqJoZqmaGqpmhamaomhmqZoaqmaFqZqiaGapmhqqZoWpmqJoZqmaGqpmhamaomhmqZoaqmaFqZqiaGapmhqqZoWpmqJoZqmaGqpmhamaomhmqZoaqmaFqZqiaGapm/nbVzHg8/vq9Mxz18Yf/5RU0DPXnFTSP4nk9QoWLQv/GSnWSVtHm5odwtH74UByTVPmlP+wHZXF5VpH4CDdpF733fvzEoY9l9SjOkdt3m4E9iX8s6r6y5pReLpv++ehu0AG8Ty4uxQO3iaUogQkEtD9++McUCel97Fpcow9P+GLnD5MzRX0mZ/Y9qp8IdvwNwb73/fNSpb8h1dGlgnEsUEeReIk00N77s4D99Mc/0eETEe/HfXLxfKTRAx2yovrTU0dxv8V3Dz5DzvsseOZ/yBNFdAJN3xpyzRd3ucWSH57XUVm974d6H3z5DLSPdOK9+wu4IuFWX2Cyr+QKkZBRH74u8crT4xEulx4RaqYf4FsBEm9Feq2wdHjpN16Bez2rovwI7G/i7FOsvnehFvcawDL9Z83P0wtAZusnRe73ez8BuYZ/fiZw+e8DLv/DgMv8DxT03a7xb4yc7qTluqYW07gQ0Y+1cRLVidFfhxD9N3Vl0UNbmVqWIzhBStyLYtPS3KbMOJzNX4f8Uh5sUVzMtcl5S+s1JYbNbrKtNoudknjrMEzsWbiR73qa7e6qrjn3ilrcldbfnaiFnWz2WqNpyLdeuXKX0OF1tqNDyVFre2yv2/C6Xoe38X4yLxzXlManSTQ7vcJTuFSXt8vtVhw5Tr67jnefJWbH7JxmrBdIOaVEkzXKuxyet651bfn6XD1m2QgdYM87/vKK+BUVvc6ZeeCk5fzAX56uaQeNtQwuJc9fu8xMb3P5pHLjQyVq6RpdyBuP17h5XBHbELua8kaWMw7niWcEmxaFSaW/eKlO6KzXVljsERl7XI2Qu6MLnbs/GW1fnexRtO8+Hb6TZ2rpxZd5OpfWa/tycfVsc79pqX7eC6YN0GO0Ymmtzucy39W5OROfha3vz+hm5gwd42YRe7jybX2mFk2hLLXJCx2aip78pOTsUL1OjRAcl+Js3I3lHf2a70elYs7ROTl9STxePBtBbe7oDb9iY7M8ektpfqm7+flUU8m+jiKETykLxoV8Co5ixz8ClzVPt3oi5o/bghXFWTKW9Dgw9qujcGSPQvBiIh8MGiXu1nSbo8uFLX+hAFFT71zN1vDXTBcrKCTtdJeDz/NQTQ5hLYrKEbDG7kVHL3I2xuce1Yu1tk+51HgrqQvZJbpycYAjO/Wi2rs1xzzZo+cKwTNZiM5OY3a34JmOkfJK57wt48UBRuqua+4jYhb6Oe3oq1iPnx51P66vx+Ym4gf9v/cTq9RZ55ASaCmXcMiOLtqTo+4ZL/M0m+eZVzh2WtN/CS/xiEyytFkeirM6ntpqp0qv0fY4XiODHAZH9Xa73g/zUBL3y6VursfbDbop4PPW1gtdniR2Ih4acaeup2txL4nuutjv0B07bpezgdAc1gY6Ocsn6toUeOQdxBMHYrT2id9VF0UfH1+0ehDRbaVjk0wnGDcH8zEubY0KrpPDSddlLl6kunJkz2zLzNhlrq7DvH5y8uGa1tNAHhtgoxH/1aRqfK+fG/HOTHVba0Q3drWSznXm5Ybzlj1HolHnyOtLvhzYu0BZLuRSFutULHW5XihPWUQfuzR+iLE52y4ryvKa/aRrPFNhtLY4XYVxuq9RZB4bdWGIujyi2lG5idGz7F1sT01b8xwtVqVIlTxVCh0N/Z+pkqtJnhIfXtRmLjWKmNlWIAveHg1cYruid7aOKXp05ErqvljmM/U63sjq+MBoD29kBunkkFmNHYulcYn98YyJS0oT56VRJX79cqVODJU8vlMaU5eMVou2Vks+kr0lLtCYqMyqmXIT1+tsR6QfhS+6UlZHgTzPIjGnjUXK5fU46yzQTOs8jZcnsV3Fs9c4iltKQ30oVGmrSr4m7RAOVOmC/tekgyTu0ACv7Z0NI33wYAxc9OzQ1s6436E91fAYoPHwGQ5MglFGT8vA5cVbZ47OiQrkhyQFjgWuE2bq3PNnncl46kxWU/Z+vEUZOjGc5XV8PV+XR46S1NV0k18CerTO1lM3nSWg5ecmDUdTyfCVexNkVyVMrZv5rG964gXaK43vYhhdVBRVTta+udFsf9FOahQyL04TYXF7LTZ6ZsS1fdkij63PmsjLzaciebuVzxXmXuNbu+ZKQ/TWquetL40Dhrh5FUVpqOzk1LaX1yqjGOE1vz6dR53wy0DmqeNser4sD7c8N87Tkx1e+GW0OYVMXlfX1+gybVYGPUWtPz4V00jd8zzci83udURwWzyZzfS48VBbr+zkeq9PSK20i++tGAEFW9pMa73G9+YJT2WmqJUIrmG32NeC704zUxCkMHM7GGaEW7UJJ2efvaKPY/oZbsKtht0/A/ZxSY2cbsSEGw0sgRs+RgfR2O7kl9wZCXXZCE7tx35UXPLJ6JoFbMqVXRrSmqsHvHuk9MTcKVUQi9flVDp0qhae1LPAnKY8bR27Kp907KOwn0JaLGjuuLGWzLjb5aftgo681JiO4kg3Znyx4BhNWKo7/koVSwOQGkYae8rzjbINkk5wVsI90A+XZ6JGDaODqvsHFrF4yfK2PH14XvKjf9Q7b498k4ggKO5FBFNRtC/NqlNGIXfmYqSw6Hepy4+l/2A88reL7qWv2o34pOYb5/wyF1ypvyxq/2raslqdPRFZH/EpyuhXjI0YmRiAbMJVC0VpTTGJc1aNupnApuWJLqi6u61YZE9DYcqL0za+zPRG2s9u/BYTSXFBcRFbBduppgRHWZ+ke7rRorShJja7LE3KKo1YkfWUW1qezImzTLTpOgweZic28n77uuZTMelowWTi5xIq4wvmOE0ijjnkp9JKwtDKLOSM84jVopeMzGQk3sIyrPl5+7odtq/Dc2VG63it1chdS4zGFt3t1dEZ+vBqdrXIGTd5r9V6vKzm+5kkoa2Otkm8LGfhfipd1UY9rW5UeGR0uR05DFy5XfH5asS4ajc29e1rSs0YTlxqq+UlpBC1G3ErZM61iRSFpbBEA1J33UKG1EV22Xnedeue5m6w4JG5teptVIFtu2bxJNjeZt3NsKRj53XaBNmLU8jNkpJ/Bsv9nZ/GJqKrSSg2IsKLrfZyB/sHv4oq2VIjwq8jhRgO8ItstDhTJfiFE5GFwteTe3zEDbm+vwdcr/b3gOv7e7yvf9/jX9mGkfS4HTbGIrCraTjj1+rGE8+r01Obt4KwQBiVuvrkyVUSqGt3bSJfLv1q/vKXf3R5d4olLX3OHa/crsZ4pzRfO7z6OM/jOIaAqw/CfkT0SdO/M59/r8y3Mie88Dv/jRBU+J0d/agolP2fTJ+Ej+c1TNr/3Iqyih9ROSRRcBJFhH8/MInCTr74bqRvYJhmfmoWhRvwO+D3e/ErMJ8nAblfj19+wO+A3+/F74T/Er/0r8bvaMDvgN+/a3//BfxB+EX4LW/+9f8E4Ud0uyBMfYJScst/CryX6FQN0P0AXXokfG562dG3vhZ09FPBOx7AO4D3O8DLfTH5zbNfL8f5ucj91rfZDsgdkPs1Y/jy25h/sc1lv7WKbEDugNyvY7XRF4SB/+WEgf3fXCy3eAbR4xpVH+O04PEN3P7/OHJjhElwHP1IO/ylGZ58heSfunqO/dbquV+H5D8+v29N+EFWH09kJxH8+6xZf64kzB+lNC7PssLtR3em/1AFBv35NfrzBQHnx1/pz+Sn6s+38s7/j6w+nX7f6tOb497fq0+vm9rR9jNnkWwS+37euVenfp5sI8/s82IzWsb5S6o91CTtXiyNcsQtH4UO77oQZ+louTmPZP0Ci+nGt0rPGs6ITwI6yE2EipPs+0reGeZDuqf+wbe34XYauI2jPOokVmeUrqde4/AT43Yw6hks62THqxltlPfuZaJPk+X+vnuZZ/FKxeOW3Sp6xcXXakQdTUFoppp7TrYplHbMm7jlYeGLmdBM5LDd2Z1JB+5gZcYtivS63GWlZ+SKG17s1I90nbvfnWBkH3fX5GXuDo/j2hvxwjjxp+PqYZjHQ5FtpuJ6QyVSotd+e89E+7Z2s7W99Y/3FUgonc326NGLvbLg21w5u6q4W6TbmYJXoL6EkpZ5cxtc9EK5wsJAZhXMFpLiSYcb1HloiA9J1ZPN5QiMjXQfURWyTJovTj1YdQgLUNgqjNeUMbb2Wsta+/PqadVScuOp9iCGrTPZLqSNOOlWo1tS16H4oh5GPqlOhwZdG0vn++J1VvZSLev1brF/HbkJK3UVSGd1XN2F+pins0Xq2BFig/BzSDfUURVXY1gcWUM/CoO5nms4dqk32qUDDNJkRapkp2kQ4euO1Mah8Qopb6bF3EIcGZl6AWQy+42zlnazzBeO9NHJTV6ND3ZgLrPJ2HQZkQscq7ZKCVHEFVu7U2UlNtvj7rjXptprs/X3yzRK9fSQHprbgRrTq2g1gdWMP3kFxff9aJvOtLd1Ep6uFH8iy5SPS8WrU1WmbVOLVaWOXV/Sn6IYqfLWm2u1KZ6qAo3UnDLWy8Y/yiC5sNbweipYpWLEH7Y6rFghW1mVa7QV0gUtRGIoziTJ1DVxqcpiv41nokS2srSwmo2C9kuS/vFavFVkyYat9AhX5RkWvAmAnf12jf5GZlqr2oXUiY5yFR9TQasoQ5aOhZrWspRxvGH76Nm4rbUeRdPceVaj9r4PZWrK2fqcfgXZuJuNkrYT6gnTGgtYYg1dVOVloh+VON5L5p1rX6Pa5YOLb+hrI5Xso+LoGrUUbSlvC3ZaSU/Ur7PDxqwoFZt5gQYh3kliK0sR9wxy/gkrHucRaGMXZrpr28a9pcPxxJ3P7LtdBo1+qXUnj7uZIsnrMAldYQnr/QCxmr6xQkl9tWKqOXWSMdbTM+ibFFSFa3jm2TfVuabhtXT5Yb4XirEvyKYThOrmxPK5q57cYi8fBBOW1T+ugt3tIK6R5u3LKQtG0G7yU1EmdXBcK6ZPvSzkwLTkUAo+lXOcrIxHmj9+bU8zt7p5VB3v6Wzsad68BANwKVYbLXShsfFxympRopfy/BhfkxFjg6TCtBTH4nbF5elDqMe8OnmeYJXwqBZqPmAneahfYOnq7Ib+O7D+i31lnfT/tXdmXcoqSxr+RX0Wo8MlAioqKAoq3ikqimOJyvDrO2Ogag/f6XMuus/evZb7xl1faQlJZsQbEZnxXNJkYR81czU+p+NetF9oK+d8s7ebUBucvc2u5/TPuRyU2vR4cZqNOA5aNlgs/bit1sXDvCuj5tey07wowjBYygGerRvnqnhPb+1OcQegFzmXcfrov+zjqnPbrT17tFxLpn08x9PDFQxddbfvraFtFm4/8AJTmAHYu9nIml+brrlKJ1nvtNMCK36PVm0FHq5rbsUbAlPajZrP91craenecDdXdhv4e858Zpv6pXkZtsbVJe1+SftDzxTfdhfqahu98tvCmO6Cu7Ndjwaq8AjzzX63DyZ9VfLM6fLgP4fb9X19dJdf2qLTsvVJ+E4KOVzBV2vw1B4yzNSwSsJl3DvGkr08NbybGMT4YbSl0dnXle4X2KcR7Otuy4a2HV0ai5U0et0Pp1e7516jt5zEjW7D1001esjOXNgGyZ03x5J/LcadzU1yutqqNK9yMjw8Lr4dP5ZifV6MzB9lfta/rlP5lhp22UgiN5cn+mlfnhaa0z1E5cbJHuVld5aG2m2UBFV2Xk83SV6c+o9nu9/PdqvYnG4OxiGeP6Kzdb9PS9hf26wU30+WI6Pqn17VUlYyz9wdJmJ2dc1OuGyrbns2u100Nd0sDuBsTUPLXGGyT7B+ymehXC5N9euQpZBWOV9CfXw5yKOLqa6z8rGYN1+qVPpVsugOvP14cn1fo6OSiNHzo0su1nty8wJYuf754A0XYbNp3Y2XYx3tq+suY894DQ3NLA4p7MfVg1ca2gdTykdv46scH+Kw12kNlMlpXHbj3nDvDy14X6ur5QfjqeweetredEEZyFreEkZhZjZnlnRpd/VSf7Tc20vYoNNhtgmblhoVynU0sqZBggdHF+X6kT87w3PhKM25iL+7KEqUVn8iXubqtoi2ebiczrr6/AEz020XL5zYRdxeeF9vM7QvSzvdPMNg/9zcts/mpZy0y2d2jHbXpjJcNpN8vu/Yryf42qDq7/YTDxZrZd+8pNc878BmthaTbHDuHKWR1D81pfnEkJTpAWbYet+eveJmY9+JXmooVl9nr817V69hl13hcbxbethW9mPprtYwh5s78klP5WZpX43Ybc1HncZ2pitmdN3epSZ4cE9Tx4fl7GpOq9br3LsWB+PlmaexkUVrY9wzNSEjXwEMgLBDnXTZafhOrxiuDnJT2NF1t4g7TqmvFGFvOsfVaDFdvNPh7pW3muP2eBevuk9TH1j7yzrqBddFFWmd9muyUmXXfJiHudldvWGl+fuHvjzjzElg92aUa8Kmm1pvKdbBrVgL9aG31HzbkHZaaCsK3FneTUc9PObb7R7ydgP3d6apGN5guMwsNZ4eQaeu+urqcmv1Y/OwWqwmPS8uvxKIIy4FnOvpKnoEf2X8dtdwHqrv9ITtGB+2ZlcKu7eWFpmXi22qG8nINtF4trbN8jgT8tM2ta+zuLpDchvazzAyl4/zpDkcSvLjKjeLjR0G2rXRLZLbQFFAP4vVtfqyD47R0fyBl5/6BvjqvbY0ny84mjBrWZLWPS21yLHWjvXep4Oe0y2cx+v5vm/2N68vzHDuCE87unbj5+utL3du4tuVdU+TpdVJ9no/2RWrQf8Fh6bg4RpSHPoHJ74tgra/tnYXad6AEyjCryarwfHhtK3tUFggKQkH70m7nV3ey0hJ/E0i/uGRNc7ZUnHTtDV5Xu0qEpbKH7wOX7bZ6IXhYzPKDnnXfNzWB+M931uF1XrLRycs7XTd6by8zqmjv8FnVeHX+Uss9P3w+CWEQm5vO83JVAIRnG9vX9olbhvHpbOR2yOrrcmX7iET3n7d6c6VieWcS0dvz86m05Gnp+Vu5EdP88s8Kl+7Bzie2PTzwH1Od9dD/NCWgWp24qGddmGUR3P/tL6hIpJGF1neL80i9Edn47BytJ6zqnT1cmsrY9/fxTEsdThMtrOPnViY4CRyQHeEtuUOupkjhJafmtNVYuSntebYRpI2wcw0h49W3Gma04XpR7P96150JV984ta1dG2gFFlffxwU5WkeHgfn7F8mAaYato+tOvFvq042SZLbSpsNouxVKk19dd4tlMb+4S4rd9PF01uyAYrFyayNu9Ldr+ZlPbd2o2htHSeT60JE0HGz+7iUKzvLGstx0NkLMxn3x0/traySRjZ89hd2Ndm0O9W2M1tq+e4ZGqOuUFUGoAWmG7ubi0satDfvbfvd1/VRXvhqP/XnybBc3Uat1sBU38Gq09lNi/KSZC11cN/hubx+Oi7FOlJv71Nj0YcTFuAvupeHfqu0xuQ2EIIk28e9Zdsa7RVPUgaKN3qM7E3zYfZeN21VhIpstXdF9difGxv3Lo8KqXN7zZ9fi0UhAqGiTK1We+M+4E/39tVi0O58zeXbrmxIx/FOFcbM2/YPk0szhIh1Kdza7dpvXNrqZjEHzyxPQkPZL6rjvXTilybtr6Ntftu37Z6mX0Ge7sa79rXQlODSey68mbff6TBn3VdrpGxao2FDd7ubaWOirN7Pu9/29KzqHC5Bmg26cgShlVsdLrf5s/0aXnfSWLt7S0Xrr9pOe/MaPzutc3cgjGHeEZFAU9KKzrY9t8RaXi9fIH7GViJW82bldarJ/dL2Lv2+pS1n79i2i8tGfjSmzTLWy6+tKfdflpCkK7H+xidzLrf6rq4fu2YjT17RMr2G79u++doO4+jgZ0Oluncf21c3ktURGOb+e3F75tHCWQVw4mxuDVZjZz4fNpaP1ja567IsVnXbU0aVZBUnr7Esls0nlKO6DzduNTrCv+Sj3s6WpI5Q/lo4/LqlnhZu3o2tNLhcEnfjvHpm87icPx6bw9gAZbi4Rluz5XRFFH08dfYihNjP+o+ZiJLzUdn3sp5bioXWNxo94zJcWxd71zXLbHhay+d31b/1jg29++pZ12vvcX21xIULFS49mp3bW1iifOq+pdG8sb5W1+G2/UgusHYPmXlvN2xIE/mPhavsx2ewP4mWrt97f9z92lvCvTXgH+1hV3++q0gW1u7oTy4WnNdsPg6Dt6pNKm3va7mcZ6FvDg4vYz0G+z2tIMCDYL353K06cjmIb/3n4dLo4vTcNpq5G4xE+CYdmtn4nr40uT18TcOs3Qtm2+no0X1/7Q/r/VnXU3n0Je2i8hlJo5PfenXee1O5NCGkaB7jLRxkkZ623hrF/bjMIBzYuwWceVqKyCZZre3NfXM4W52046bKtZgbfuY0/dfCuF43I2O1O+86Riacz8wJe/Dp50IDYxZdm5IykRcujHs0Kp+2+OdW4QuDoWuOXPi+EPIPYSzfl04Rj5rT5iVZf+2TFXzcOK2W7ljS3hBfzPV7KOKx/dewMc8uoMDieLUxyn0gvRdaV4tGW7iLyXH89iDT2OmZid3LxYPzXSOZH4zM6cX4oFrzanQ8OHtTxDO6rE/HO3OUrDeG6r+q7uJWvvV8VRq2Wd2WkivixK2pG8LGS6PQyJPQEH8JXzV/uIwtIfc6u9tjNS3XxdR42Z152Xq30/yxTdVwKNbg17gttSYLOPV4fT/2QSDN+nCdOdsuWTXvY6fV6EtSw8vf864RqvJRT5apUNWuvzn4Dx3eu2hOZ00Hdf/b2pwvC5gWx6Lz6pux2519tU3PTiMZbvB1VuWLBnc+Msb94P1+P5tnfW81ev3l66IFq2n8aiaQCmo11fI9i/2vCj4X3JaeKYO6CZRD83lezDVPcx/by67RnYk4PTCO5mrf8Q2x8vLU7wh/c1wOIFG3GJTD9a4nlefSxHh9YJbr8irGV+pshIRJz5aa3xdxejHlSEQsRg/OQouZaNhaFOTJfKF0RLDUUO+TjvEKRsf0OXsv3rNOnDjXezLqJL2ecW2oM28cTq7GuTMMZy0xv4KdEYxPQubKNigpM7eAkdtddK9d617NkvfBf9+2rT5o5/wQ+6NT5xjcMuH7U2e4sefjYNGLTftxb03hjFmj25Ym+U3IdV8SKtD0p82DobSGp5u92BQNSb+bi9EhfrZ0fRVOF9fWLpiBojK7ZtQtg9z8qxNE/+Q/u2210sIGDVv6+m2N+bb/3PkcWar5ynWCXv5zgl5XfpGgl/9Rd77538/R///cW/up1v7l1Sb5u9/S36Ze+1fts/3/Ua9V/uki+Kygv2oF/d0qtr/aMvl3q9hmIG46hZhfijnpe8qq7GibRfGKq7sGIPt1JR3X/akUW7f3SN2q21JX3VJ/x5f4De0nx7M2va90kl1PzjZXaGV3kLYiJBqVbfGJ+LWt3NdGHVxH0GjdMt6xuro6x46yXsxV/9LWEDVvGYlbIThdcSxXRgjm0UlWva4UzeRXtJDPk9nAjZbnc3w0CvH5O7b963X11XKwd2cIuO/9wOU9abcozpOjTq0vl8/LelFk46Obe6mhuFbU3M/4d9f6d97FW0TyKg1/fqfy767zcmMmKdzratEtR4ttGS2m99VCl4bBs40N+/rnTER4rfo9U3VwWPXO583VT1bf/z+dby5nCRpeb5feOT57IqL1zoGiz2MlPDr9UynGo6CG+In4OYGfK2gN61YO/L74w+/hZwSXu+IZrBe+GP/ivrnMD/FRvsaX7kmM82uLHTpgVDaqkcTq9B2bcrpRinecSkdHPJlxsDpFMyfZKKtLrMwlfCrcZvGffcoQY7k9/OFTPy0Ms/VCP0fLQf/7KiytNVJ+ntFv2zc6OdwDtG1GsL0FLYKh5XAIjSihlXHhYcNJG6Gj0B7UDSICNgc2QR4kFyAQBbUi9gmkkUbUKhmANdiWMeQm+NiwXI4qaC3rMzwUwY8lAs+gIbtlK2MEouUygszKvAaUydhsG9rWIowzF9dmMMQLwbXcLB6ab7oEKAqgzS/AZEJumm7nDKRTuNk4ARaxuSe0hD3JDAF4IryEWg9L49AtEXJl8jghyNWQCVruSgi5AIAaArOhzS7AaQx1AcCywEHIKzWGZxgOQq5sGGOG4gHAxiBoZVk3tPcJ5kUwpZLa3zI051i3R2YA3jEn+CSBNqBpOzb/HmMr2RM2KnUXCHVgiMvcAUASwIwIaIrjUEMTCOhkShUDHQHUogOwAcEG0OKb4Nj6GKHbCJkGaOVv7xWgAyq2XYb2rguC344JmEnARIKq6AgQpqb3AK1RCcJqc3tdaMPr6AR/BIiNy3BlADFwq2Rs/wstewn2AGA3D+4DgBOVga2isfVsZRPwu8wZOoIQyIqbvavcdlknUA4Bhl2EANUAbWh/DG2hqWEsPROELyv0t2IGT7jUAH5WQ9DDktqdAtiKAE8MkZYJhkMgBAZDM4CTobYAzUaQR8ywOASOUXtuBL5Ae15oPjs9cAvuioAA3PweG8gyxE1xa4Bo9QOldn4g3gFCnKCdq0LgiRpeD41mI4aW2Aymip6wtt1qwG3Hk+z7tW5fTO2Xse34T0tnR6PG+f6TW4BrDHogkDJBs57cLlzlJrg5Qb25NTq1bib4ZuUyMJNarKMNqlusE0SIrw2BWAVDswjwGRAQFp+n5ePcoDbJJ25Ji23EZc92C4I5iHkE7egDAH4AGAHmH7ZerhggqIl5jjAvBLghBKEew4jAgQG1pY8AyATz+TeQAoIHAJBzkCKY5FiDVn1ak3VzYYQEHDoMhsq+X2vwWQBwAjf3qz+3AAe4FbaQBvBngMAPGmMLYDXz9SaAtt5gR2MlEu8Zoz1zqV08AAVMBICDrec1CM8C1zWCLAleheBcnvsA7vbFOKwshJJaDthfGVvCA7gRW/DbJTdHlhG6BvCZoFtwm19szIwwmoBgNAQFiag5MjRuBpBOeiKoG7ReJ+hBSf4FwDMOtZ0meE1BwB2Gh8J1n1wNYaMEXK9B1HAP2LAZ275DK/vKRfAuthPGuQTzABpKuwR8gjmFbYsBAgVwXfcbuIMgryOuKwlsHDSXdhk0R23IHRxfbIcMPoKAiwh8RSAFgpAIWoKYBAXBcCrD2SqGAiH8D233TJLqtuwEOWIQKGEIFFwDKQKfZIIhi9+bNH4I5SDYi8oAKYSO/4DkAWqiEQSkth1oP12CWJU5Q6EkgkIdc4ZCSQyFyhkKJf1AdBHmjs9DYciuhHbCOgj7EkkM/60BVjVOQmZYNKAYqjgIS8QYoK1g+FXA8Ctsnx5x+/K8ZOCTRO3VE5naziPkjW0RQSQBcgbfQU23AfAS1rYbIDewlhE86hIGgKGTMfsocU0Iq6ltrY1rl+28iqAtBIxEDDFJGF8AMFGHAUoGgWUYSIpQlSAmcD3BnjX2QTUAUiYANIDOXIKJfD83X6tBiwjMwvchlLKia0BwUsZwtSfB1RCdIDO6AqFI2Dp+htA7wEOU6L8JKs3AoIRgKgDWJN/IkMMQ4G7Q8LwGxEO8UY1qhMQRQVbw7NUYIXwOwapryJKFELq0xja4OGcMBjEi+JFhpgiXAz+OMD2XdAWAvgmGZzEMD+FUIeg4aKBeYexDGA54D4H1UAv8QO8IBeJSS3MEPeXkt7gZO+hkl9YA6lyyPyeEwRAcNEHwCoA1wQYjAA4A0wDaTI0aQyF/g2LQn9sMaw/Rb8Fa9FDTYcv0up1/yUA1nSGvOs5JAtOBLSuFhq0IJAPXAxBeRwfwEkD7PIJMluJ3CAQHQNzYAlsbIqSMgFAEsqX1gcAvjTU1gRoDAPKEDEsmSDJB46iRPYJgEeOBEEAEbKNNhEb6BMikeYIgRtA3Ec5FgtgiViBDTXbMAUdQw8R/tBLiNbrY9p2hihqBnfwn3n8PbJ7BEEY7Y1RHwXqagFSoPUPCUCB2QitoPjsE9DvWehmvUSJoEfo5AjkR3kAjCABA4VBDKgRQdQmCBXaNgFAyQMB/CRwl5AzeVwRrgaCjCA6g/ydALoFcCVzKgOP6tQaXavysxDgeUl6POoNUK7S/CEaTQCsQWBBjIocASFbEQFuGDhEwW2HgqR4FJ8Z01K+MDkGdEypsWxiamzB4CXUVgyMjGi+C+0mMtVBJx0IsFmK85JENU2oAG8ai+J4QAQ0jAq2qjKapAW0yAZ4ihZACpPdiuIcZ66Ej2gUG6+YM1kWYFsBR69caAF0SMsOXIvSn4nlQPKMiFoRAzwphThIEgFEcCfFinDE+RiV9idBgjUCuBKisgekEG0aQph6XHJ8igMqu9TvqEMJrENiOtBZqYIXsP4LrCC4Heg/G32IAG9lqBeeAFeF6h39j5AzHHxHDO3Htg49g0LDzIhAF6FIX5x/6yaVLcS3Fi4TngXWlgG7ICWGDPg7XLHwPxJoZw6PJVsB9pQirVdDHBwigzghADd8Z82dtBEmiHwbYWRBTvFqizlBdQFiaqJVFPHrCGB7jsdQgiIcVS6zH9fHCLRioKhG01WUoKWKdAD/1QvtcYnxdsTapUN/V2mWWUzxLfkomLR9XBBoB0BagoZwnA6Er9qWK989A4xjPgJ1i0Dg+R1jrODYq2SyEqVWEbok1wKkgzAOvG/T6KWcIrUx2+BcgxYuEPkhoaBHXMHbFzMV4bmluVfGzxmYBgoOBswhTJNuXIJyMr1VhMCBo65JsOc1nzrdoZP/AHsR1vP1kWFmGUGEEzhKEHeGEBKFUEV1k4muF/k0B2Jq4y1rDl6TjCHqJsSBB6dDGRaQjCJhGcXNKNg30HNsz+RtmPKvjrgTB4TQvHPZpDMBl30aanSDk44XN0Dm31iIUi4F+n1GMD9BFEXs9KV8Bfs2vwbhoUwFmSvkbGBNcP6BvCwK8Erh7LOIiHB8Co2us/xUGuaPORWg76AKE8bHuDxwCRp7cwkNgNKwH4ZcJNVbn10SMcyJ78Uf43/zuiVgtpzgtlKPA1XAugraq8VF13A+IN/Ez6DxPaA601TMCE3oQ71WgPRAyA/bkB0U1/wU6afYndNKf5nEEcaSFAE/Awemk8RIClmJ8jKDujP0iwVmthGNnA7Unx+84tuN/ti7RdhsUx6dzRkjxd6FmdzkPaYCPrhjiqHjkFzSCeAOw0CG4L+K/XM4hEZgctTcg6I4IcIZ1TrEPApwdBEdSXBxRrgTsCmrMEHMEiNSCa0HbE7N2Ap/L2DrShjVMV+V5UzH+rQZ9VuhPLJfui+JMhfJfiNnT2A5A7P5kGw56GOMrimlDBoOHpPvAF6gEBPW+YcIhYasQpusjHBn8rYdxFemb8YwBwwhMtOnvgP6he8a1R9cdEz4IYySK3SgWQ0Akgk7J/kqkA4414NVBZB9pfIJvgmYmCCTk2niuoU+W0CczTB7s35PzcTkh12LW+AbaJbb3OeW7nCfHWjnlejsHguLa2fdrHdemPuWP+gR3RX1fct6V4nLWKRDbOuyX3Tr+UxgkVaDdPko13jAnO0R2hHSj9IPtQoBuiPYSQZOYJ44xTvEoT07IrwB1lk4aEWJMn2oD4H97ts656vr1G/uIeQYGygufK/GzhFher0HT6H8Ingn6sF5/nDeEHAhCtuC+MrApZNsGHYoNTtn3a217KXeGgFR+ZjoB6UPKFZGt1cm3Qm6LdPQv9TjmRcIM9ThpcY47CebLehjBuLS2QolzmbyeML4hPwJweZNRlxAjYGxsEyj0SPlhQNxRnhXzPBXZjhAhsATATSTOZQKqkIDlGD9ijoMgzvj3Yc6hbtAZqajFCFy1CVaGcUaM8NGownVasB2s6ufFPgbBrBCroa/GHGAd08A6x9wxx3A2+kWa5w6BcANcfxXnMHTKc3Atw8S8IeQadAacaRx/KBzrKxRfQ2yGPlainD3UWMQYWOAH4Lug1gMQUwacog1MGLCOsWPGYFVac4Qj0xEPiXEzzsXMrfgZYe4/h7gI/KiEeTHQm6gfXMKYEsy4RKyesJEu2diCtRfZ32ONE3U49iTYNK5H8g8VrS3SWZRDlRSycQS7h3+v1wLr1Yr1GOUp0PafCIVZUh57TOB1ynkEnPOgGlOJ9RZcE1iL+saVUk4uJE1Oc4ByL4hOZf3DuZAx1TY4jhOakXQmw9BtGmeMIeJa58D36hQvY46GtfaJsJ9YO4QalC/Wd0g2HethqH9k0sOJeO40JlxPyuk+EIL8Qh9Nr5TvsvwaH1pQLAg2KyRtcEQbpX1/FuqWJeWZcT2hlo64DoNaVOEcSEWfszGehziE8ywEcLdOlA8Gzaq6OeWiGJUIYOYU7tcjSHQa8/y0KXd6JESqi88B4mWoY/oFA+HFdcG6smnMaa1rpA+nBdWD0BdwXYhzoBbnQIPv/GkB+R2v+q4ZPD30AZgPljAviWub7DLmTb99Nvi9mOYZ1a8415sXnHPiXG9e1ChUyvXm7HugZoT5R4XzVVyLg+vFHDLNLfRVoUR5b59iJ9IsuE7wmaKPkdB3egSWL3DscYywppaPEPCIfhN1A+Yuy5xz4xLnxrEmUMPKS9Sl+DcgbwXxsQfrJxtRLEx2HTWLTWOE8xjmvKOS/XFIYwZQU8Gfc0a5aug3UKPZVLfA74F6RMh2GHNKGj9vQsUKneGhfcQ8a8m5fonmKeblIW9akE6E50W1Rcrl49zWqG7mfNcjPHyfWGcYH2FuR3KvhFOk9ZNTzcWimouLzw3zBDLrkIKRogX7WJ3GA6HfCtaxMH/tYnxOGgfzA/CcAP+sQD3ZRVg8xK2MY6Y5i/haj/3Fn+LW3+NTU5yPqF/AhsIeA5/rsDbV7zGHTnmSn3qLTfU/jKEkgmxC7ZVq/1md6+Y8ikr5XnhWlH/i/JL0nZsGv1Hy5zEvjH5U5nwH730AnwRjT5phjD7doRwNxEaAL0ZwqE/6rCSs5gj2GaBewZpKyddP8VxIGtGjvJ/k/Wiu344R1OI1sPvC72ItHWJRjLHFvY5Nrq2hf7Sp/kr5jKIGpnJcUJLmwji/JA3oljHl9krKRyWoYyBHBQBWXlMF5Yk5/0Q+U63r3qzdVbgOxC1DDIb3jbko3tPh4zpwyd4XVMN1ML/mmfV+CYfqeumBc78IQZUIguqUUVnHJjhHaU7h88N9KWKcxGyhmA61B+aQwK6Br8F8GPhaqOX/ATR7vj8p3wf1IYP3ebike6BOXuaURyPcOeFJGXv9nTe2OBeWUgzCsaNEtSrALzuo/VwEyue8l+Ugrtep13jFeWeMs13wM1T7wrzeHxCta+fYSTe9bgX7oSKlAN90HFVaK+51pbXZOYn42/tTDmABvgj27bhU+8NcSkK5FvSfmC+XSJ/k/AxsRjeLeYLxM+o7BXUi1cUztHFVN6VcOulMfB6Uk9NovaE+1kifCNuIdQHIQ9lcO4Oc9rwDfmqMNRxf/21tfYx4agfnJPlEjIEy0gShxvUpxJhT7Hei/Hf5gyF3g1WK9Un8+5FOcR1qyBL1Ha7zUCMb7GaMt35yvpy1NNd1ZvQMN2CjA9ACOBYF1fYhpk9IgxFy/kl1G9x/QbkbymdR3QdzxWDvDLRjnFN7Yr2eauFsw2yuNbI+Th2K3Y5U56P3GBS3mt/wYHjO0i+QueUvYcCwa6tybZsivjry+6nYFpgdAuUsrFdEK0Li6hBVtmEl406zw0FYSsjAfquUOuMHFXT3grtZJMzooOfBzESBKwWUokU7LchS5XqdycSMbEBZEdp9YNQRMs0KisRYsVPGnasxBJuvMylHyFLALMNMMVWohJWgXQY/ipqyRrQ7hTO/FBmjJwi5wuK8aFah95c4K4e7XPwKVIGt8CyG6pmMcGOs4OYK7bqDa5jyThw/+36lcX+yFcSdOHgPDBt3MaNxYrC5X++sgahZqbOhLilk9mxgVcKMqyywC6Pw6+ot7T6qq1eKS9a2dINOipE87ehBdQuZRt79I7lsCWHl0k4XVBkSR6sSeQiXPDI+k5iiaAs9CT0TUKMmZAwkrqBjhanCqh4oEZMyrB7uRgLr7tAOKK4ke1hJI0WJlUnw3AFGH4pLuxgkVKtUwcEsIShf2j3x/YrKrFa9nLmBnXKQedHwM1ihTEjVEXwdFLHEVWydo6GcM+QFq0aqnuBuONhBAO+D7APsdAClBRWLrfAEaI01qmCEqKD+uPsHK65VZx0Ev8m6LuyfDG6VlPRdXMWzwNrDz+CB5pZYOzpWujDDDzuFXIww650mY4C4QxQVzMV32JA9KanKCREFZ8MwckMr/GQLiZUzvGa8f9iJUWd9bJ2ef8ReJMHdUKCwxvWuL8qocQYPvY1Cu8wMnKfwnDYWXK9LkXiYlJ614uytjxUNiAQIJC5RFRLHEiwlWOGz5cJ1YSYF1gnc/0mn74BnEmIl6fteu7/IYpd/ymL/Vp0dMNLkNYmvdSb9iJETZuloB0tIu7dgLKhSCpGixLvQKHqm6EuizJ5Pma2S5iKoVK/83jEg8RwsKfIKaYdFCkoasfcaRYEuAdmx4uly9oCzgXVUe8x/Mokp2TR8VgHtJhXfq1L0EdcV8goqVmBbcOcoqlUDd5/gmFhYrYMsPa6BfxtF/3tddsF9rBLVRmHlYD5Jo7qcC3ts0IPgiod9ZBBzUq74if4VdS2ORMW50pL0Jep3DfctWLj3oHSvmJuknBrtzaX8YHoAzVBQ7Rr3uNG1fGu+BGKJjGpQvHeIctdyvW8CY5MSY+uS9RP693rPCWhCF3MAYkWmq4piONS+tQ8XXkToQ6gTwR47nOmkBTFuSKEWhqsadBl4R4iHlX971P+oLsX3wdWCv4O7Tmi3DI4AqiWZohQXVR1FTVBJPFEGHyqFgcM76RzcfQHZrzGpfI1sHkbekrirwv3t3WDFx5a5SoM7QdA/wt/AO0RF/D/NJzhPcImxA8b/zYGZRkv6R6P1uyMzqtL8h9b48zHK1j8a6i8Ozij/aP8fHZ3R/mXP5r+sKfP9cbys4RY/TZn/ikb4fweKg/YvmzL/ZbMze66v281ndv6HZucfm91rfzWmQftVl+XPmcPPmcPPmcPPmcPPmcPPmcPPmcPPmcPPmcPPmcPPmcPPmcPPmcPPmcPPmcPPmcPPmcPPmcPPmcPPmcPPmcPPmcPPmcPPmcPPmcPPmcPPmcPPmcPPmcPPmcPPmcPPmcPPmcPPmcPPmcPPmcPPmcPPmcPPmcPPmcPPmcPPmcPPmcPPmcPPmcPPmcPPmcPPmcPPmcPPmcN/8zSX9jc+c6j+q1Ndv0TMSVJT2TR/j5j7O0Ie77fsmTx22X/d7rvH+nl7fLh0/6EDjL8/IqbWUNLfTHZZ/Y+eEdM+E/0z0f/3z5Nr/7mJDkj3Gzz179/1Huv7wb1td/CO/wY= \ No newline at end of file diff --git a/docs/static/drawio/streaming-standby-external-repo.xml b/docs/static/drawio/streaming-standby-external-repo.xml deleted file mode 100644 index 1452fcc73a..0000000000 --- a/docs/static/drawio/streaming-standby-external-repo.xml +++ /dev/null @@ -1 +0,0 @@ -7L1Zu6LI0jb8a/pw98UoesioKKCoiHgGiAyKqKAMv/7LyMQaVlXvru6nu7r3+7HqWsWSMTPjjog7IjPkF1bOm+nDvyVmcYwuvzDUsfmFVX5hGJqmGLSBPW2/h2J4sid+pMd+3+cdm7SL3if2e5/pMSq/OrEqikuV3r7eGRbXaxRWX+3zH4+i/vq0U3H5+qk3P46+2bEJ/cu3e930WCXvjo0mnw/MojRO+kePGYEcyP33yX1PysQ/FvUXu1j1F1Z+FEVF/sobObrA6L3HxdVb92KcR9O5Xd59R1psrd1/yM20P3LJpy48omv1p2/NhPFhFGyNZ3Sq2/Hc3QQP+X3rl3959uPV97Vq3wOYVPkF/UX/wkqv6FGlaGgNP4guq6JMq7S4omNBUVVFjk7wL2kMO0LU0OiBdlzgTMkPz/GjeF6PcnEpHvi27An/fHFTsb+2Km5ob1k9ivMniTGw5z38FPpw9MskOvYf0JEbNDZvYgDxr2lRCr+mCFDlr+GleKLTpFN6uXzxcI2Bf58e8+URGf6hIz845u8BRJ2Imi8Q18tgGhV5VD1adEp/lGN7fPUKxffoqj+Dk5n0KpZ8gUtaoH4V+v1+rxPxp7t/ljv6oxf9H4AB9fswKJ7VJb2ioXqrKfUjgPi+cL+ByRcY+yhMvy7ZX4NneI6qb+VIC2MqpL6R47W4RnByca2+hzh042MafT72Ph3de9N3uCwuMGBfg+7vggTP819Bgma/xcSI+hYSI/rXvwsQ7O8DAis01kEQW52kVbS5+SEcrZHkvpaqH6ARfVaR+Ah7F4H3fv7E/b7Of08+H/BwotE/5rdF/F2Y/F1iFaivpMpMvpUqN/mOWNm3w/m/SDW2/xOr7OXFp/xaUlJpmmbUf4RvhBgdkbPsPxaPKini4upf1M97pc9iBhl8PscoQJWxGLOoqtpekP6zKr4WfdSk1R4uR2Aln7z+ZvC30nz5oX1/uKLuwkX/oX6lKPa9B1/460jg3js+X40/fXX5KnqkaNjAvnztMug/LPOyeD7C6L+oC9cTG/8RR9Xvu1sY9f+KoEd08av09TWF+cuVnPvfVHKKEphA+JcoOWrNV1r+SXm/0PLxd5R8/Bfo+HeFKvwPCrWX0Q/J80sA/G0cjf59odLMz5Tq5H9Qqv86VWW/lir3HZrFUN/jWX+XVOkfCL8Gsf6OWCf8v02qzCDVv1xZ+Z8p1u+S5x+Ikf+NUv1XhUQj5l8XE/0vKuu/jS+N+H+QL31Xqv+j+Yt/lQkej/9BvvRdqQ4B61+gq9yP8KXvJKD/Nqnyg1T/AqmO/mV0afKNEH9GrvGdN/yUBSQpQ4b/nYzhV8nBLzKW3PiXL3KWv04mo1/+a94SffiYd/xjIv/dVON7DvWLVON/s5V/Xaqxv3RVpKgfn5PcH7Pc3AdAkQ71VzFfTE9+uBFN8ZNfKYH69MN8dV+a+ZUaTT7/sKOvH0PG45vHYOh+6u2fR/M7IfRz4fwbqORG9BeopH9FlugPo/KTptB/UFP+fjj/twTY3w1n/gPn+TSJ80fhzH9MNn00tH83YOl/EWCFL2d+6P+K1b8YXj8JNh/jWoH+k7Dhxr+R9vhZsPmWe/1zsGF+HDafHf8XFsz78uBfbs7+m9f9Xe/M/yRcTj5YocmfxOXoQ5aV/5iP+btxOfrncfkZYhw3+dprstyfmpL+Y+D7XVBRPwdUY+438gJ/FFTjD1ZzTP1kUP0j6yF+A1RfA+qvZWG/ixz2JyGH/x1S9KPIET5M0Ar8T0bO+J9HzttNfhWgvj3m73hJ+u9E24+Sst9fLfOXL5f5Da7+YQ2H8NG5/SgsPy4G+emk/x9JuvwGe6PYP7He6wssel/i9Le86t+fb/lRsP6kfAv7wYQKf9aEfrzRz3a+7yzvf0v7vtcCpzle5P9nlxvjy8Xy9nndsv/+cEobgK7UP0E5+pX/CyuSj4xWvuJfGKlBUGfk1cxiDq3EBW7zDLsbF0wvT7+jUn+2pkKleBnskT22PGu2/CvMw5e5PfPLzYSc1+pxNKXL4GpO9DyhjjNxZLQTdEX4PHbmM2DnV6PTa1MRXyF7uOqpxPjujrXzCbfa6LWuiLHZnTkjOzO6YtLLDceYqR4fphrlbein59KX1WZuevvLJUzFBl1/OyhUup1q/GE/P5kbHfVjNV1fDlcT/ppZVOQ2l1XKZ8HMrMx9lftuUy5Ts7YykTEVTzht+mPX9zErt1yPPmTO52Nsf+y6awM5zqCvB1drDffYeu76dnB5arGtJit50h1nl/Kw5cbvc9bsPDlML5fgaseHT3+vd0F+oUI0rse9dQkvVu251mXL8LuQcVJ9dm7ReDTW1nuaWYw+x/C5s5TwaXY6HG8+HIfPlCVzjYlk4Ls2Gv/mFuS7JEzpa5hrZzTOz6Os96MSsGIcsutXKNNZwDSvMKNSHUlmuT2cvY0eB8whD5kdhaWSjv/rVSIay2Py4aqmv0qelL7LX7z9fPapFQo3NpjPMpKS4zSOsRy3eg19WG5q1FfxaSrHzNiGtJE5rbHhaHNDNZaiPpeKWlnKGfpemVuvRccbY6ui6zjKoszGStE95LoxO/tpbtXSytA4bb3SUryn1YWluXUoI1NZQ9HRvXXa6zzGyOwW3QNtdcpQ1NZs0fPSukV/M0uZY1Cb0GeuM1u8bS20RftbC+1HbWbRuahtIro3anemoy3GMbqnjp5joueEnbH1OnR/dE+HwdutWlstx1hpjXDO1VZKofNsOBcfQ9fT6PloHBzo59NSzNJEfVw6ZmsiLFhyP07buDQzEa7j0bMoU0b3amt0DvT/XKJxQPcSWfdMNcstaluGrt2u0djaaOuUpkzxaDxgjOG5qI1oPBSxMmHMW9wuNF7oXAWNd4vlg8Y8Ruege7Uch9pPkXGLS9QfCn1uUBsYC59nN3AekkmLZFIbSG6Wolemi9qHxhOND/q8083MZryWgusos8XjUJmZCn2u0D1bS6Y6dD66J4XGVeRthFaQs4mwgq5Bz1CRHcLPaNF+GvX/y776wYZiLWgTOm66SKfRmKHzkb7g+7Z4f8rx6DlYj1AfOiRfFrUPjZ3KoT4gmXk0+ozGKkR9dVgjM1vyWURjca6x3OAcBe3PTIwFa2sj/KF+dGicO8D0uVyC7DqEyw3GEcIHyNqDbYcxoyBswr1QnxC2CQYyE+TBwXhDm9H4QDsqNB7oXnFFZILaiDEM90LPyNA4ZybaYtxA31qsS4r5XG7RuG5qdD46T1FL1HeEazh+BnlBeys0Dkg+0HYdncuh8UP4kUE2IWdsTSJb/DwYB/R3ZsJ+1LZ1YoLM0BjiNoFegOzhXqCzcC/GrJag6zL0xesI3vWGbOE+CCf4ecgHbOoGj8NWRc+OkSxAF9TKRGOIx1jxKtBts5sn5tYm+vDeYszhsaZQ/9C4HjPUL4o8V+dgnNC5gDGkgzXoLLSlJboE2MTHaoQrkDfftw/1SUWYP1Pks43H1OpMeB46z2aQLtXYBm2wvqA2OM8lfha0DckzpQCzNL7X1kE4UUGXGCxPxcbYQPauw+1RYrBjIAPaUk1s1yzA0RZsmYme5zUEfyIPGLIwRmoO4RzhRoVxpsBWfB5DjDXQdR7+9rYIy4Dnt5zgWOYgGcSoT/MMYQPpOGAD9UuxiU5ukC0Evd+CHU4kE+SFsP5p2487sltgk2q7UymkbxwaE9A3HmSKfBkaEzQeHXrOFvRFJWOs2EgOOz/Yekg+YEdDxkPnLLE9g/Mc9MxziXV2g219r4MgC6zXYJcbPJ5gI+U39tFYKTYah4OC2onap4P9RTwD2e4O7gl2VCX+BMkGjRu0szW3GvYv2CZtKLATHNgtc7tDNhTZhszjiZ4gbHchtA3GpwVdAbuH9LUl/sUG/410wMGyJLj2WGwDES6wPTibHOiymXmAbdTWGPQR+tBgvwS2DfklE/EpU7lkJsgIYwlwoIL+YVtjAaaQDqM+I1ulN1jnFXRPaF/nVWBL0fMpsHEWap8JfU2hPTa2yTC+aPw67CPamsF9Tzl2uUe6DnLoEIa7M9h1zgRdBp1GNhdwAVhBcmGwLQY7sKEo+GwR29zbL9TWDfYtDNaBzIN70Oh6HuNeJuNnwDjAWG5Ad3We9MMEP8SCL4br0DMA959tB7afCKNg06BN4DfAt6S4jx30awm2ZYP1gsb+EnGLT3qMrgUcWdDmFtswCtsJJUH2xaOI/VcZ8PsWxjnWG/CVLJIND2MYbh2QOUVsxZnCbd+a6Blq43VgBz2wg9jvQB9Q2ygL4yGm8ZjL4HO93hapcC60GXgJ6C4agzPCmvO23WiswYcjjgP+c4P7BlylJVxCJ21qoY9vW6ti3e3tPAsyRH2liI8D2wb+PSQ+PwPMeoTbdGdoK2AU3UvkwH+YwCEwVwffjH0QtleYP6Qga8R9FLBF+hdyQzqO+olkCRgCzAAmGuIDPTIGhEeBzQRs1BhfMuCfArsK9wGcoXGjWmzzkN5g/43sDJxL9AHZzgy3j+59I/ZvaB/qF3o+1mMbtxXGDPQH614Kn7GPYMNOJ5xug7lOb0fQed0hQ8/o3v4ZOBbxGWfAA+k32FHix6Hv4GM6bI9bwD2SBeYRYJdNMo4Z8C50zgbrJ7rehHOAn9GEC8D9+/HewDiaNMEs8hm4bchvYR9pYp5sEh3APJfYnzP4DOz/CW8EX4/+7mCcQsyXkG2pUHxEbHILXArxbqTHJvbnoGfg4x3st0AXLczpzqBvHWAP45psebB9xN5jX45kSIEtaxGH7cBHW7g9IvSLR9ezSAc4bEPAVnRn8JmIPwFOwNY6MEbwnBZjQAY7DPoBugV2WO85vAoYR/JAPP+zfNG9QZ7YhwMPw/uITUA+F+wd2MSWehJfGROctKDPwG88jEVkQyvsf8CWAidLa2QfQVcdsAmfuRL48UzrAFcW5hoicEg07naF+z8Fm4fwgn2ZWhJbRDU9n2YxD8PcE8YZfC7GZEPwrJck5njzZdxG4CIl4XBgAx3iZ9AYYT+dmSC3Ctth8Be4T9CPEOsC+EFrqjbL3g/0W2LbMM6wz2I90AVsR6C/Tv93iHmUtT1XOAZlVPADNfarZNvbSGwf8PgvlSTr9ZEnvA8wZUMMguw0BVwB23oTx0QQM4HMYOx7XKKxAIwBpyf4jnlvewZ8ouveW2yXG8JzHKa3LbyH/XHMY3kRXkWeBb5ExnaE7ePAsrfpHYnFHBwvWcSGMT3neOJYFJ/jdAbYEMTfAFsgU2yrtzqxhZijo9gK/H1H+F4Ifdj0fCjFdqEj/g7hH3TcVUFfOrAF/baXCQW+gwMf42F/iuRB4hngATDWJAYDGQJmIBbGcSTEiyG2bxb2qcAvwR8gO5XFqL04VmiAYyNs4HYjW8qB3oRtH5+CXmzVN3/HPATzHwXb/IZwLcyBGWL/Id4NGxLrIZsD4w/YgdiU2GoGYwA9E/Qd9mF/Trg4GivQPY/gOcU8qMLcG9qxefNSE+MP+8m9SeJaEi/imAvrFQO8AbASMsTHYZ2F50CsSXg56TPEOsRfpZi31Ng+Q9s3hC/1HKD3MR7xwxmMS0ji1RbzDBZF44wpY66M4tEzjuFxPJaJDYnxQqrn4/zSNcFvN8T32yALDmMMuBzwXfC1YJ9bHF93PTfpML97c5dNTeJZ4qdowuVDPI4IA+i5DnDQCtt1hNnelzKg+1bv1/vtm9ewxE7BfZANxnIEXcdjwxKbBTiK4RkQt3KI6/PAZTAHxXEwxCbQH5UmdhiNBcgf234bdI1xcwr7IMShUVyjYl6Ano3G80iw1YUVzlEAv0U2FPtFsF+dSjguYHzrVX1bGYznDY7fW2LLCZ77fAtH7B/Yg/Adb2O7jjky4KEFTlKD3wN5PzEnR/0FPcW2F8YZ9JZBeFJQL98cviU8DvMHEgvS5P5g4zzCI1IsYxI3Z8SmAZ/r7RkN3BQf27zjLuCEas8J9d6nHUjupPdthLNjjtEuXZXkBwAPhIuQWAz4+4bE+F4HPjWuSL4C/Brm9zhusnC+i3qS/A2MCdYf4LfIVoEvAe4Gdk0j4wPtTgnnAptFbHmIea4J/BF4AbJthE+BvmD8debZbADTS7Dn4JexTqvv/BqKcc7EXmztN28DXlQvdzcLxWo1idMc2tuaHMYicCucu/A+x/2Zhz8Dz7MQ58C2GuSDOS3EnsA9PODFYE84pFdgp2lrV6T61Cq9vdWtNnOSB97or1XW1N5+XehTe6JDPu0Djj2IIxE/wzmGDfhlj8TvOO8I8THYwLDs/SJFfGfcx84i5p59/I7Hdvlbeoltt0ji+AzHofWnZ2HObvZ5SBF8NMYyicEIb7JkzI8hn9VgXwicEed0dOyfUGxRYe6NYg/g5kucW6ZI7AP5AsSLsa/DcbFHciVgVzDHdHCOAHgmjh+w7Ql77gQ+F8cpdM8NkR+qyf0IbjpsK7ANwJy7w/5EMUm/SJzJkPxXDL6L6+0AxO5Vb8OBD+P4isS0DsRATywrBes2Z7Em5sZEX7GPwnzA7C4kL9phjgXP5t/8ZklwzuC8GTwL7gP8h/QZ6x5pd4g5KonvSexGYjGVJscdwquBd3ZEL8lxHXDRc3yzJP4V4WT/zrX1WMM+mcI+mXAZsAmgy3YfAyMeooQ9xxexXertfU3yXXrVx1o1yfVKCeYH0Jb39h3XonHE8coMczocW4Id6O0fxOU9T4HYVu/9svmO/3AOgPgM84n7S+xMTewQsSOEN2IeyZP8BfTFwfYSjU9F8sQhjlMskidvcK5ri3kWTzgixJg2mRsA/ztV+T5X/d6+/VmD8wwp7geDfC7VyxJieZ6MIYlLcXyS4VzXW//6vCHkQACrmLeWYFOIbZtLJDY4l5+2b9tLcmc0thFEZlhnADdWz/cwBrFvhdwW4dHf5eM4L+KUmI8TLt7HnSHGYM+Hwf+1RLccqs9l9vqE4xviR1DMR2IEisQIODZWK8yrU5IfRm3v86w4z9MR2wEY854Y/5g3qiQWxfYc+gu4wzkOzIHJ/QFzmDfw6B7gy7iwM4kObDC3BTlCTgf5DqynTW8Hu7e8eh9T4xxTS+FcG8kBvmMa0HOcO+5jOBX7RYJzHfNgzA0hB0RyGDzJc/RzGTLOG0KuAewLi+0LiT+YPtZnSHwNsRn2sRTJ2cMcCxoDBfwAPAvmekIW8w5s28G2kNwv9k+YF4GtDonOIX63xHnKmsQD+Pnouq6XEc791xAXgR+lcF4M+CbmDybErBS2ZzLOhYC9YUxiY5ueexH7m+L5GZrYWwfnRsDOYX0k/qEjukV4FsmhUgyxcV7V53erty70fLXr+RjJU2Dbf8Z8GecNNth/1MRuQv6pz3mQOaYWz7dgncBzUcQHbEh8B/zDJLjuCBcyWczFu57/9LmQJZnb6OM4xBkJz+RI3KuSccYxRPjmOfBcnsTLOEfTc+0z9s9k7hDmoGyk3w6x6Xg+DPMfmvDhGMmdjEk/n1STfug4LsQ+mmxJvkvp7coG20/CyzF/wPEY2Cju07Uwb9mSPDPWJ8ylvX4eBnNRps+BdOQ6FcfzEIf0eRaa5H7PJB8MnJU1a5KLisnYdTA/Bv21MpL3Cnt8qiR3CnnmluR0SLwM85gwzxPSJK4FvVLJmBNd5wg/XDdkPgj7gn5eqM+BKn0OdPspf9pAfsfqPs0ZVBb2ATgfTOG8JNZtYpdx3vSTzwa/FxKckfmrPtdbN33Oqc/11n2ul+pzvXXve2DOCOcfmT5f1c/FQXtxDplgC/sqsKGgAzaJnQhnwXqCZYp9DIV9J85ngt7B2OMxwnNqtQG5mhT7TcwbcO4S4annZX1uHM8JkLwdjuE84FsVyVtBfGyB/pQGiYWJXcecRSVjhHEMmNdZYn90wjG3MKeCP9dkXhrOc7Aukbk0nMetyHyE09thnFPienlXMOeAuASeD4OcF56vILl+iuAU5+Uhb9oQngjyInOLJJePsc2ReTP903yEhc9DeobjI5zbocyr2nzyHST24kjeYZeZWG44T0D3PKQhc2XQf+xjeTIekHND9hrmSHD+2sTxOeE4OD8AcqLAxsF8MuRLACsW2AuIDQhmweZTVu8vvolbz6j/CH9LrA/zDOMR8xewobDGwO7nYVUyf49z6CRP8nm+RSXzfziGgrwhscn93H/5znX3eRSW5HtBViT/1OeXqE+5afAbbX89zgtjP0r3+Y5+7QP4JBh7whmW2KfrJEcDsdEG4lOdtB/4WQvcD/tL2sB8Bc+ptH37STznEI5okbwfbN+c68sxgrl4Duw+8rt4Lh1iURxjo74u5X5uDftHlcy/knxGn2v5FBe0hHPhOL8lHNBsQ5Lba0k+KsY8BnJUVm6+daoheeI+/0R8Jvue9+65OwvtAN8JvJDwBJyL6td02FgPTGLvGzKHq+P8miW/10voZF4vS/rcL+RmsP7jfKbXvmMTjFGCKSw/vC4FjRNCC4npMPfAOSSwa+BrcD4MfC3M5avYbuBzQCaXW0XyfTA/JPbrPEzCe2CevK1JHq0F/w1tEXEct8ScsM8bK30uLCMxSB87UmSu6twSnQBdRWMl1/1algS1V3/reNfnnXGcbYKfIXNfOK+3lAmW8BwiwoOeSlkw1TpYD+UxDfim1Oi4cTjVKF+Wzij+tr7JAbjgi2Ddjknm/nAuJSa5Fuw/cb6cIvyk7mUAOob72YY4fsb8jsE8kcyLl9jGdVpGcumEZ2J5kJwcR/QN82OO8BNkG/G8AOSh1H7uDHLaOwn81BLP4dj8l3PrEBPhPEUOc0Z6n4eGuK7uuYhO5qxAL3Hsdyb57/a9zgbnfzI8P4nv7/EkrsMcssX8Duu5wxEbbGLbZ2F54Xx5z6X7eZ0NkWEANnoLXACPRUPm9iGmjwkHw2sqvIrM2+D1FyR3Q/JZZN4H54rB3onYjvU5tQrP15O58N6Gqf1cY8+PM53EbimZ5yPniCRulbGu0NivIZ/kpnqMcJKELKwztErQ+fcqtS9Xk6mwaqszVZVEfO/I7/OMbYOzQ8CckfXyiEZQ/ewQmdkGTcYrzZIEWUrIwH5iKe+MH8ygmzlezULhjA72PDgz0WBNAaaokJUWxFLV/DuTiTOyW5IVIasPxHeETFBBIrGesZOMez8bg72r+c6kpJClAJThTDGZoUJWgqwy+MyoSdaIrE7pM78kMsaewOlnWPQnQRX2/lSflcOrXOwOWIHK9CiG2TNAWoc9KYoWyao7aMO6X4ljl5+2ZNyr3grilTi4DwpBiYkzGmCFzbrPANX9ihPmnQ01CUPuPRtYFafsZ1lgFUZjv2dvyeqj9+wVYxJr25pbKcORPFnRg9ktZBr71T+U2VtC0Fyy0gWzDKqPViniIUzikbFMQhJFK9iTEJkAG5UhY0D1M+h4hqnDs3rARGSSYbXwaiSw7jpZAdXPJFt4Jo0wSjwzCZ57i6MPxiSrGCjMVskMDs4SAvMlqyc+bTEze7PePnMDK+Ug88Lha/AMZUxYHZ5JggwgrG7As9h8Hw3VfYa86VkjmT3Bq+FgBQGcB9kHWOkATAtmLI7IE2BrzJEZDAczqI+rf/CMayf52+0XWVdX/ZzB7eKWPKufxVPA2sNn8EA7BekOj2e6cIYfVgqZOMJ8rzRZKiJmVdZ2h56hQvakJbOcEFH02TAcuWErXPUWEs+c4Tbj/sNKjHfWR+WJ/L3ei8R4NRQwrOV71RfJqPUZPOxtGLLKTMQ4BTkFCrTXJJG4E7eWcuiztzae0YBIADBHZlUgwjHJqkJshS+KCe3CmRTQE+j/mSfPAJk4eCbpU1+172Sx22+y2F+yswRHmr1O4u07k57iyAln6cgKFoes3oKxIDOlEClS/So0Ej2T6IsimT2bZLZagkVgqVb7acUA1WOwJZGXQ1ZYZMCkzx2eCSEZHswOLTzjafbZgz4b+I5q0/pzJjEjNg3LaktWk6LnsiT6CN8z5B3MWIFtwStHMVsV8eoTPCYKnq2DLD3Wge94n+a73udrXpbjdawUmRsFzcH5JI7My5mwxgZ7EKzxsI4MYk6SK66wf8W8Fo9E1+dKW8IvMX/n8LoFBa89aM0rzk2SnBpZm0vyg1kCnKEhc9d4jRtpyyfOF0MsUZI5qH7tEMld0+91Ezg2aXFs3fb8Cfv395oT4IQmzgEgjcwOHYnhMPd9+3DkRRA/hHkiWGOHkU64II4bMpgLw1oNvAy8I8TDzA+P+kd2iZ4HrQV/B72OyWoZPAKYLdEkSjExqyNRE8wknkkGH2YKt3q/kk7Hqy8g+7UkLJ8jNg9H3hTqVWN+2Rs846PS/SwNXgmC/SPcA/cQM+L/hieoJ8hD9Ncfq/b58S/UoT+WcrHCrxz7xReQ9PU+X3y/zrug5suv1+H4X98l9X/9N9f9wNdh/eMVNLdr/Asjpztpua6pxTQuRPRjbZxEdWL01yFE/01dWfTQVqaW5QhOkJLpRbFpaW5TZhzO5q9DfikPtigu5trkvKX1mhLDZjfZ3hz3riTeOgwT+7qpHW0/cxbJJrHv5517dernyTbyzD4vNqNlnL+k2kNN0u7F0ihH3PJR6FDeI87S0XJzHsk6GnItHd8qPWs4Iz4J6CA3ESpOsu8reWeYD+me+gff3obbaeA2jvKok1idUbqeeo3DT4zbwahnGroLO17NaKO8dy8TfZos9/fdyzyLVyoet+wWWSMuvlYj6mgKQjPV3HOyTQHN8yZueR1dYSY0Ezlsd3Zn0oE7oMjjFkV6Xe6y0jNyxQ0vdupHus7d704wso+7a/Iyd4fHce2NeGGc+NNx9TDM46HINlNxvaESKdFrv71non1bu9na3vrH+woklM5me/ToxV5Z8G2unF1V3C3S7QzBR+NmL6GkZd7cBhe9UK4btI9ZBbOFpHjS4QbQ1hAKperJ5nKElFGT7iOqeqA/fHGKBlva+Og/tgrjNWWMrb3Wstb+vHpatZTceKo9iGHrTLYLaSNOutXoltR1KL6oh5FPqtOhQdfG0vm+eJ2VvVTLer1b7F9HbsJKXQXSWR1Xd6E+5ulskTp2JGqALvGQbqijKq7GImpHDf0oDOZ6ruHYpd5olw4wSKPPyjGU7DQNInzdkdo4tIZ6IXkzLeYW4ghZoQsgk9lvnLW0m2W+cKSPTm7yanywA3OZTcamy4hc4Fi1VUpI4Vds7U6Vldhsj7vjXptqr83W3y/TKNXTQ3pobgdqTK+i1QQ1KxT/jT/apjPtbZ2EpyvFn8BGMdoRebI6VWXaNrVYVerY9SX9KYqRKm+9uVab4qkq0EjNKWO9bPyjDJILa01E58R7UbSN+NNW1+r3VlblGm2FdEELkRiKM0kydU1cqrLYb+OZKJGtLC2sZqOg/ZKkf74WbxVZsmErPcJVeUbtXQqAnf12jf5GDkOr2oXUiY5yFR9TQasoQ5aOhZrWspSheNL20bNxW2s9iqa586xG7X0fytSUs/U5/QqycTcbJW0n1BOmNRbo3jl0UZWXiX5U4ngvmXeufY1qlw8uvqGvjVSyj4qja9RStKW8LdhpJT1Rv84OG7OiVGzmBRqEeCeJrSxF3DPI+ecmvwTzCLSxCzPdtW3j3tLheOLOZ/bdLoNGv9S6k8fdTJHkdZiErrBEJ4NH9DR9Y4WS+mrFVHPqJGOsp2fQNymoCtfwzLNvqnMNiVKyvPww3wvF2Bdk0wlCdXNi+dxVT26xlw+CiZRGelwFu9tBaak0b19OWTCCdpOfijKpg+NaMX3qZSEHpiWHUvCpnONkZTzS/PFre5q51c2j6nhPZ2NP8+YlGIBLsdpooQuNjY9TVosSvZTnx/iajBgbJBWmpTgWtysuTx9CPebVyRMhTxJGtVDzATvJQ/1SoQfObui/A+u/2FfWSXkWu2rKyYflJVtOvZPLHfRLoR4Dh5tfrCCa6rNLTW9bbp3mujAKw+1YBYvFp8fObx7yjTGE+14ScgYZBoVJQLZmWLPonKlvIuxICWd5er7MHrOnmh6kIvIt1dj7lKyml3CdXMHQdTf1Nl6ocmPOttZWRmaAR7tHpXAPNPmQrcrpOeK2SvgyDhMGhGvKR3TCVqYiQ6he93E85q1FtGOiAO6n7zaqzOdCvhgvuzzT7tQpmcroabf0Wh29Z1244jra3vSjb8xZ5BF2wSk6bVczFlHo9T6xq8XRv/mpub9zrjRW+ZXzihvaOcCjOZDagwakOl3s7MNpGlLq/jyyCjSI4UOcUMbF5hntDvbJMMB50SJ3NPKRe6CM5y05PydT8+q96DgcaSObl1nvQes7ZBsocycsKfvaLKWgoHSNO7TylY4XySO31fCxR/qZi6VtlHY5u/oZXWSi2o5iz6zpFX8+tWeX07XEawO9fLR5dKEWXGHE2668+Osgrpvz7FFNZrMyOoTyOkjEJNw9vItyu63bKWqn0DG2He8NsZudn92eZhBbj5IVQpcmS85+gkjkZlPkHJsFbgLOVha50kQm+wz601YNk+cCe0/KDAp/L7nDL/OENnKZ9cv24e6EJ0shgh672tw6LVfX19VLmRiNnu3lNdL3uLC2oLn2JbEWriMIyk186kqqXk1zH1ricyFycpNkB3R7fvvMHDWRqdp4ifd2mYTOVBrPmdV52WrhdHGyFwqcN9a4OhErJnrw2STQgBnQXD1GRmEjCxuFyica3/KPsVk8kQ06J5vAERTWa5irYSjrbYy5stv6j7qSFpdGZ4Rdie6BSQkznq3QZsceG+9YO/v1RuN3D0CmOWmeGNhNOHGt+0t21HyvZkHlbE9VUBwrIW9Xk7YqUy+6CsxiL8T17iSpzwp87babRaeVBcraqYUVT4VLBDZz7K7K+UVKKYOanQVqtxIpZp0AwvzTZPMMhdFJ8p6sg7RPOnG76dUaqa2GPI5VZMmxUx978+ADhoWI+KSKKRTuPgrN8c6QRscNz8je9XijBPDgFscuk/3mKq+78fMyvTYJilvl81IsPV9cTmUO0cjnFgYA2SEp20sjW582i0NCC8iO+loTSnrLHxhkb6T0YLhr95Utomc9FpaTZRQetErm58op973p9up2HidNnqsDS5vyQ052snZ4gabZpwe/v2DkxKGoSl7NIZsuc9M90oOi8RH74MdsfRxREeeoDAM9q7XMmOLIRtOSejICb6llGRre7WJfKmy4ToGnHmbsIS/Gs1BODu5hNbXC9h5DHJE3EYiB4T24y/Jl+iO0nelTZDuWyVHWKEcrxpwn57kqswElloG33Piq3KYbRD9VmbtfUOuSuFiolePJ+8dlJSwWFP240kITqM6Wu460Ji7mDAP8GWnX4a4muihx9tyqzzMRfPWJ28vVEzlKbTNWKE477zlPV3xdeZ2y+VTXGv3xrF634FRYM2SGax15WuOqhdXzxe8jM7bVTrll8V6R4hM/i6PmMJ89TRuPiyRSoWMneli424ntK1FO7VAftQz51fgwTx/6RDkukAWiYmf+Wk0mZf7aeyiICmK041GOLuWeMbNsvKquauchS2XPn8ldlUdTx3kERpnUmvwo/ER87U5Ko4xfdKo7rZr5kvS0pLPEv8Bndc79ckeKflqkd0QUavUoCas1BSS4PhZ3Lg8nYrrXA3piKBOOzrWkRN7el7Qds1L0S6vzk81F1iV6fd5Hhu1V8l1OmXv0AMcTyna9Nat1dE3CB7ffsrIULtRMg1E2dvbZLzAjooycpk97uXFs4yImB52b6oeOZ/NiwixtOwpDUPUZ+o3UVAqRCY49HXiHoyrmXCt1RLTsTF4fYrE++5yuinEmgJkRFo9xKAny2pVtb3N63hqNstEVhabw3Jxpyhn/SBimkpNHol/sfLUFwJ6OjyO7souDVK7iuDhwm7lXPltG4A+XyGVGp4e578xAQ7ohnWkRGIteKoF54M27kPs7JTI8X0lXq6uLguhQ0B55e1DLcrRfbqUTMpPhbFlxL+YQj8pFNXPVbhVMpO4obfZcHVWOaGiIVYlQTbEOVK1GTZpPgtdx8prxvFE3NjvL7F28aA+FMR7PZfa1PUhStG7aPC7H7PwWAcS0WbZskR6xxes8cmcdiAP9avmDLzputCrmiJCUp3C6nyjGibEoZs5YxsNQA+EhT58Fd2gchlYmUdM9TpdRYN5oo6Gk4rmr7q7boECoaTNlPAnMB9x6eurc+US67+giakdUuoxYZMys4yxZ5YIDEeseubXiOhvlEzZwd+CZ6ZUjMie3S2+tHj456nQ1jnVxmqhTjr8CPY2W0eTacMw2n1autbFOEQ+YNZ9jgwnGxmLEm1qwHq2Yw6u62ROLLzspybdZOddoD0Irs0vyYldNnotrRC25m7VnuNlhok+C57KSxhdtjoxhLaFIQKC4RjpOdgrSZX//BPKzVGKkzcHBkrrVLZ9Y+WymcPvNK1TVJg/ox2gttCHf3o8yPXsqiJIekP4tz/KOHs9Mnk81eVTHT2+fXZ1XcRKex0XoJXa5YLqb9jg+NY9mDTDMs5dbVLXn6octB75MmR+W+m63GO0f42N842kaafXEYoyOUpqzNdo3e6GCF5hqDzMcjyTkX2pjGqkUJSHmzzmLe5FZnBO8RkdqnuexGejPqSyk+93jESRLEZihe/WO8ljXUBSdnqUTCiFOm9ljg6Lk2mhnVjk1W6RoM3E0FfOFr+RqpMltuTj79OXVzYppOuK151S5XqeP63OMGo5YOPUQpOKFLFG9Nl+UsRv51+66OE4ecQ66m5TybTJSIU1kP1yTOS0vYH9iLvNfJ3up3U8Kcm8j2KkuNL56dR6NrF1qr3LFB2V+JPMXy6067mRzNV2Xji3Pk6foL8F+rzsI8CBYF6roINHtPCxmVZKPNAzP40ioza2BwjcqEcrlLXty9GTxXDvlZLrdHNfGQ3vdT4l/uvB8Rht3KvLayqOMsz1+Sq+TzOQChBRCGiJuIRVUpfJjI5yFbQnhwMlsdoBwFNnEB18NbkFyUaRMMjPm2uxEu9QF++mK12tgiIfoEkliiZzPRnemcHXlcmDMvKtAMSvaNWHcPaOtVLR73NjIYPCcTje2jYj8AxnLVy41oSGshTz276f4AJeL58PeXFLcC+KLHX9zUDx2ui9GuzIHBhaGh0BsT1vq5XIa5xlH6MUqXb4s+KohaSrH6rRGgrNNMd4lYqlPQyyo8a4z0kQ/ySie4Wl+vYxkI/YDkbWfneYW7YuvD62oyl2xp0wUJx5lXkQ2njIcsY4dEd0Jbzl7sQ8VRPekqHgc1q3frMWnKu3a8WuS1Y9jxjoLpIP35YQar1xk8KTr63HabqnNDNpZ97aLZuXbUh+PZhQ1surXThMdlk75eJ8hVm3aQWI/eDjXFdYbQce8/6UEl9wFWKSN9JzJoalt7hPZUjOPhg4+Lyydc9BzQ1zOtq/XqxIu/EkZTWf7Z85tD+vwKcSQChoLbPvahPa9g+u2xd6SaWA3WyYRqou74yzOfBzzaKRtUJy+FVP5cJJsEWlendkS8jfpfg6JOnfeLvxoSrWXVsbx+lxu/faKxpeSAkRhsovC1jc3zHKZ9lDEIk7n6CqERFHlvG0d71xGQsHSiL2tJPG5NdKs2rzc10YKY/16iw0pnk7F64jdWEtndRUv0sLZjBG+tpG4XZ4RzaVVYFJyrcDXAmmudtWUW7eJX4n9Ko7jGXDnOglt4yyl26JEvj/TF4G6W27daSirj9sYyU6UR9qEWtUFous2hVigbK+FRGTGi3OhukEzovib7BpJWI15/uCs3es42m6AUcma7Gnttpb/6QTRb/yoE2WcNSpw2NbmCx/n26T52uHVx3kexzGkpPs09d/xuorJVwl7/v0dfV9m6JlvM/Q8/et4/H/P0NdpyM8O+e6Urx+a8wpcj5t/+tbwrzL0owt+kW2Bv4yrfL8ve3R/wnukJfrzn+jwyQ+/PiwXz0caIaWirKj+8tRR3G/x3YOvZgHeZ8Ez/0OeiCwyhYTQkGs+3GXDfrrP4+Od0TgE39lHuvPe/WESAgmw+vor4755AfDHaYc8PR7Jt9NFqMF+gG8F8w43+D4yLCde+oVX4F7Pqig/v+fh+28C/vblOdDi/ivtWKb/rPl5egH8bP2kyP1fvnmFsIZ//vCXzP2BVwGPPr4KmP/01ucv35H0nammj1+B+deh+HtvqfvXoJimfwPF4eN5DZP2P7eirGIEowHBGMEi/PsbEUyPPkyc/ui7oP4+/H7vmwb//fi9xZIfntdRWQ3I/Sm295tX+fLfIvd7r/L9+4D7A28x+1+d4Hd/bIK/2ix27wn+GYqC7nqa7e6qrjn3ilrcldbfnaiFnWz2WqNptaFcuXKX0OF1tqNDyVFre2yv2/C6Xoe38X4yLxzXlManSTQ7vcJTuFSXt8vtVhw5Tr67jnefJWbH7JxmrBcQtCaarFHe5fC8da1ry9fn6jHLIJ3Knnf85RXxKyp6nTPzwEnL+YG/PF0USzXWMriUPH/tMjO9zeWTyo0PlailkNfjjcdr3DyucWmKXU15I8sZh/PEM4JNy5qb0l+8VCd01msrLPZmyj2uRsjd0YXO3Z+Mtq9O9ijad58O38kztfTiyzydS+u1fbm4era537RUP+8FnMaCie+ltTqfy3xX5+ZMfBa2vj9DcmeGJ/cj9nDl2/pMLZpCWWqTFwS0oic/KTk7VK9TIwTHpTgbd2N5R7/m+1GpmBBS5fQl8XjxbAS1uaM3/IqNzfLoLaX5pe7m51NNJfs6gjS0lAXjQj4FR7HjH4HLmqdbPRHzx23BiuIsGUt6HBj71VE4skcheDERpCtOlLhb020OWYMtf6EAUVPvXM3W8NdMFyFSLzvd5eDzPFSTQ0gm89FHdi86epGzMT73qF6stX1CYb+3krqQXaIrFwc4slMvqr1bc8yTPXquEDyThejsNGZ3C57pGCco87aMFzANw911zX1EzEI/px19Fevx06Pux/X12NzE+GfHWT/nJ1aps87BCoqUSzhkWhftyVH3jJd5ms3zzCscO63pv4SXeIREyGZ5KM7qeGqrnSq9RtvjeI1sdBgc1dvtej/MQ0ncL5e6uR5vYRUJ4PPW1gtdniR2Ih4acaeup2txL4nuutjvIL/F7XI2EJrDGtIRWT5R16bAI+8gniDZt7D2id9VF0UfH1+0ehDRbaVjk0wnGDcH8zEubY0KrpPDSddlLl6kunJkz2zLzNhlrq7DvH5y8uGa1tNAHhtgo0NI3lfje/3ciHdmqttaI7qxq5V0rjMvN5y37DkSjTqHXIkvB/YuUJYLuZTFOhVLXa4XylMW0ccujR9ibM62y4qyvGY/6RrPVFBgXJyuwjjd14Gix0ZdGKIuj6h2VG5i9Cx7F9tT09Y8R4tVKVIlT5VCR0P/Z6rkapKnxIcXtZlLjSJmthXIgrdHA5fYruidrWOKHh25krovlvlMvY43sjo+MNrDG5lBOjlkVmPHYmlcYn88Y+KS0sR5aVSJX79cqRNDJY/vlMbUJYOnnGrJR7K3RJjDUZlVM+UmrtfZjkg/Cl90payOAnmeRWJOG4uUy+tx1lmgmdZ5Gi9PYruKZ69xFLeUhvpQqNJWlXxN2iEcqNIF/a9JB0ncoQFe2zsbRvrgwRi46NmhrZ1xv0N7quExQOPhMxyYBKOMnpaBv7B368zROVGB/JCkwLHAdcJMnXv+rDMZT53Jasrej7coQyeGs7yOr+fr8shRkrqawmILerTO1lM3nSWg5ecmDUdTyfCVexNkVyVMrZv5rG964gXaK43vYhhdVCMzJ2vf3Gi2v2gntVOZi9NEWNxei42eGXFtX7bIY+uzJvJy86lI3m7lc4W51/jWrrnSEL216nnrS+OAIW5eRVEaKjs5te3ltcooRnjNr0/nUSf8MpB56jibni/Lwy3PjfP0ZIcXfhltTiGT19X1NbpMm5VBw/T38amYRuqe5+FebHavI4Lb4slspseNh9p6ZSfXe32CVOvF91YMxJTaTGu9xvfmCU9lpqiVCK5ht9jXgu9OM1MQpDBzYfJji3CrNuHk7LNXWG5CP8NNuNWw+2fAPi6pkdONmHCDF0+54WN0EI3tTn7JnZFQl43g1H7sR8Uln4yuWcCmXNmlIa25esC7R0pPzJ1SBbF4XU6lQ6dq4Uk9C8xpytPWsavyScc+CvsppMWC5o4ba8mMu11+2i7oyEuN6SiOdGPGFwuO0YSluuOvVLE0AKlhpLGnPN8o2yDpBGcl3AP9cHkmatQwMMkl+Qf2gZfEbHn68LzkR/+od94e+SYRQVDciwimomhfmlWnjELuzMVIYdHvUpcfS//BeORvF5ZtrNqN+KTmG+f8Mhdcqb8sav9q2rJanT0RWR9I/eHlUEaMTEyEF5dUC0VpTTGJc1aNupnApuWJLqi6u61YZE9DYcqL0za+zPRG2s9u/BYTSXFBcRFbBduppgRHWZ+ke7rRorShJja7LE3KKo1YkfWUW1qezImzTLTpOgweZic28n77uuZTMelowWTi5xK+a7pgjtMk4phDfiqtJAytzELOOI9YLXrJyExG4i0sw5qft6/bYfs6PFdmtI7XWg2LexiNLbrbq6Mz9OHV7GqRM27yXqv1eFnN9zNJQlsdbZN4Wc7C/VS6qo16Wt2o8MjocjtyGLhyu+Lz1Yhx1W5s6tvXlJoxnLjUVstLSCFqN+JWyJxrEykKS2GJBqTuuoUMCxyyy87zrlv3NHeDBY/MrVVvowps2zWLJ8H2NutuhiUdO6/TJshenEJulpT8M1ju7/w0NhFdTUKxERFebLWXO9g/+FVUyZYaEX4dKcRwgF9ko8WZKsEvnIgsFL6e3OMzbsj1/T3gerW/B1zf3+N9/fse/8o2jKTH7bAxFoFdTcMZv1Y3nnhenZ7avBWEBcKo1NUnT66SQF27axP5cumf5i9/+EeXd6dY0tLn3PHK7WqMd/689DVL0/Dqsq8C0O+kTnjh1++8mxft/fhCzL8uCv3eMvN/f/pkSP/9I+k/9sOL4rj3K07+ufTfD7xf+h/PogwvGhleNDK8aGR40cjwopHhRSPDi0aGF40MLxoZXjQyvGhkeNHI8KKR4UUjw4tGhheNDC8aGV40MrxoZHjRyPCikeFFI8OLRoYXjQwvGhleNDK8aGR40cjwopHhRSPDi0aGF40MLxoZXjQyvGhkeNHI8KKR4UUjw4tGhheNDC8aGV40MrxoZHjRyPCikeFFI8OLRoYXjQwvGvm3vmiE5b7zopGf+nKR71fNjIaqmaFqZqiaGapmhqqZoWpmqJoZqmaGqpmhamaomhmqZoaqmaFqZqiaGapmhqqZoWpmqJoZqmaGqpmhamaomhmqZoaqmaFqZqiaGapmhqqZoWpmqJoZqmaGqpmhamaomhmqZoaqmaFqZqiaGapmhqqZoWpmqJoZqmaGqpmhamaomhmqZoaqmaFq5i+rmuGhaoadfP755wtohKGAZiigGQpohgKaoYBmKKAZCmiGApqhgGYooBkKaIYCmqGAZiigGQpohgKaoYBmKKAZCmiGApqhgGYooBkKaIYCmqGAZiigGQpohgKaoYBmKKAZCmiGApqhgGYooBkKaIYCmqGAZiigGQpohgKaoYBmKKAZCmiGApqhgGYooBkKaIYCmqGAZiig+dMFNCOe//a1M18W0PwLXkEzHipohgqaoYJmqKAZKmiGCpqhgmaooBkqaIYKmqGCZqigGSpohgqaoYJmqKAZKmiGCpqhgmaooBkqaIYKmqGCZqigGSpohgqaoYJmqKAZKmiGCpqhgmaooBkqaIYKmqGCZqigGSpohgqaoYJmqKAZKmiGCpqhgmaooBkqaIYKmqGCZqigGSpohgqav6yCBr+C5l9QNTMZqmaGqpmhamaomhmqZoaqmaFqZqiaGapmhqqZoWpmqJoZqmaGqpmhamaomhmqZoaqmaFqZqiaGapmhqqZoWpmqJoZqmaGqpmhamaomhmqZoaqmaFqZqiaGapmhqqZoWpmqJoZqmaGqpmhamaomhmqZoaqmaFqZqiaGapmhqqZoWpmqJoZqmaGqpk/XTUzHo+/fe8MR33+4f/xChqG+v0KmkfxvB6hwkWhf2GlOkmraHPzQzhaP3wojkmq/NIf9oOyuDyrSHyEm7SL3ns/f+LQx7J6FOfI7bvNwJ7EPxZ1X1lzSi+XTf98dDfoAN4nF5figdvEUpTABALaHz/8Y4rk9j52La7Rpyd82Pm3yZmivpIz+x7VLwQ7/o5g3/v+eqnS35Hq6FLBOBao70i8RBpo7/1ZwH7685/o8ImI9/M+uXg+0uiBDllR/eWpo7jf4rsHXyHnfRY88z/kiSI6gaZvDbnmw11useSH53VUVu/7od4HH5+B9pFOvHd/gCsSWvUBk30lV4jkjvrwbYlXnh6PcLn0iFAz/QDfCpB4K9JrhaXDS7/wCtzrWRXlZ2B/F2dfYvW9C7W41wCW6T9rfp5eADJbPylyv9/7Bcg1/PMzgcv/GHD5vw24zP9AQd/tGv/CyOlOWq5rajGNCxH9WBsnUZ0Y/XUI0X9TVxY9tJWpZTmCE6TEvSg2Lc1tyozD2fx1yC/lwRbFxVybnLe0XlNi2Owm22qz2CmJtw7DxJ6FG/mup9nuruqac6+oxV1p/d2JWtjJZq81moZ865UrdwkdXmc7OpQctbbH9roNr+t1eBvvJ/PCcU1pfJpEs9MrPIVLdXm73G7FkePku+t491lidszOacZ6gZRTSjRZo7zL4XnrWteWr8/VY5aN0AH2vOMvr4hfUdHrnJkHTlrOD/zl6Zp20FjL4FLy/LXLzPQ2l08qNz5Uopau0YW88XiNm8cVsQ2xqylvZDnjcJ54RrBpUZhU+ouX6oTOem2FxR6RscfVCLk7utC5+5PR9tXJHkX77tPhO3mmll58madzab22LxdXzzb3m5bq571g2gA9RiuW1up8LvNdnZsz8VnY+v6MbmbO0DFuFrGHK9/WZ2rRFMpSm7zQoanoyU9Kzg7V69QIwXEpzsbdWN7Rr/l+VCrmHJ2T05fE48WzEdTmjt7wKzY2y6O3lOaXupufTzWV7OsoQviUsmBcyKfgKHb8I3BZ83SrJ2L+uC1YUZwlY0mPA2O/OgpH9igELybywaBR4m5Ntzm6XNjyFwoQNfXO1WwNf810sYJC0k53Ofg8D9XkENaiqBwBa+xedPQiZ2N87lG9WGv7lEuNt5K6kF2iKxcHOLJTL6q9W3PMkz16rhA8k4Xo7DRmdwue6Rgpr3TO2zJeHGCk7rrmPiJmoZ/Tjr6K9fjpUffj+npsbiJ+0P97P7FKnXUOKYGWcgmHTOuiPTnqnvEyT7N5nnmFY6c1/ZfwEo/IJEub5aE4q+OprXaq9Bptj+M1stFhcFRvt+v9MA8lcb9c6uZ6vN2gmwI+b2290OVJYifioRF36nq6FveS6K6L/Q7dseN2ORsIzWFtoJOzfKKuTYFH3kE8cSBGa5/4XXVR9PHxRasHEd1WOjbJdIJxczAf49LWqOA6OZx0XebiRaorR/bMtsyMXebqOszrJycfrmk9DeSxATYa8V9Nqsb3+rkR78xUt7VGdGNXK+lcZ15uOG/ZcyQadY68vuTLgb0LlOVCLmWxTsVSl+uF8pRF9LFL44cYm7PtsqIsr9lPusYzFUZri9NVGKf7GkXmsVEXhqjLI6odlZsYPcvexfbUtDXP0WJVilTJU6XQ0dD/mSq5muQp8eFFbeZSo4iZbQWy4O3RwCW2K3pn65iiR0eupO6LZT5Tr+ONrI4PjPbwRmaQTg6Z1dixWBqX2B/PmLikNHFeGlXi1y9X6sRQyeM7pTF1yWi1aGu15CPZW+ICjYnKrJopN3G9znZE+lH4oitldRTI8ywSc9pYpFxej7POAs20ztN4eRLbVTx7jaO4pTTUh0KVtqrka9IO4UCVLuh/TTpI4g4N8Nre2TDSBw/GwEXPDm3tjPsd2lMNjwEaD5/hwCQYZfS0DFxevHXm6JyoQH5IUuBY4Dphps49f9aZjKfOZDVl78dblKETw1lex9fzdXnkKEldTTf5JaBH62w9ddNZAlp+btJwNJUMX7k3QXZVwtS6mc/6pideoL3S+C6G0UVFUeVk7ZsbzfYX7aRGIfPiNBEWt9dio2dGXNuXLfLY+qyJvNx8KpK3W/lcYe41vrVrrjREb6163vrSOGCIm1dRlIbKTk5te3mtMooRXvPr03nUCb8MZJ46zqbny/Jwy3PjPD3Z4YVfRptTyOR1dX2NLtNmZdBT1PrjUzGN1D3Pw73Y7F5HBLfFk9lMjxsPtfXKTq73+oTUSrv43ooRULClzbTWa3xvnvBUZopaieAadot9LfjuNDMFQQozt4NhRrhVm3By9tkr+jimn+Em3GrY/TNgH5fUyOlGTLjRwBK44WN0EI3tTn7JnZFQl43g1H7sR8Uln4yuWcCmXNmlIa25esC7R0pPzJ1SBbF4XU6lQ6dq4Uk9C8xpytPWsavyScc+CvsppMWC5o4ba8mMu11+2i7oyEuN6SiOdGPGFwuO0YSluuOvVLE0AKlhpLGnPN8o2yDpBGcl3AP9cHkmatQwOqi6f2ARi5csb8vTh+clP/pHvfP2yDeJCILiXkQwFUX70qw6ZRRyZy5GCot+l7r8WPoPxiN/u+he+qrdiE9qvnHOL3PBlfrLovavpi2r1dkTkfURn6KMfsXYiJGJAcgmXLVQlNYUkzhn1aibCWxanuiCqrvbikX2NBSmvDht48tMb6T97MZvMZEUFxQXsVWwnWpKcJT1SbqnGy1KG2pis8vSpKzSiBVZT7ml5cmcOMtEm67D4GF2YiPvt69rPhWTjhZMJn4uoTK+YI7TJOKYQ34qrSQMrcxCzjiPWC16ychMRuItLMOan7ev22H7OjxXZrSO11qN3LXEaGzR3V4dnaEPr2ZXi5xxk/darcfLar6fSRLa6mibxMtyFu6n0lVt1NPqRoVHRpfbkcPAldsVn69GjKt2Y1PfvqbUjOHEpbZaXkIKUbsRt0LmXJtIUVgKSzQgddctZEhdZJed51237mnuBgsemVur3kYV2LZrFk+C7W3W3QxLOnZep02QvTiF3Cwp+Wew3N/5aWwiupqEYiMivNhqL3ewf/CrqJItNSL8OlKI4QC/yEaLM1WCXzgRWSh8PbnHZ9yQ6/t7wPVqfw+4vr/H+/r3Pf6VbRhJj9thYywCu5qGM36tbjzxvDo9tXkrCAuEUamrT55cJYG6dtcm8uXSP81f/vCPLu9OsaSlz7njldvVGO+U5muHVx/neRzHEHD1QdjfEX3S9K/M198r873MCS/8yn8nBBV+ZUd/VxTK/k+mT8LH8xom7X9uRVnFj6gckig4iSLCv78xicJOPnw30ncwTDM/NYvCDfgd8Puj+BWYr5OA3D+PX37A74DfH8XvhP+IX/qfxu9owO+A3z9rf/8F/EH4h/Bb3vzr/wnCj+h2QZj6AqXkln8VeC/RqRqg+wm69Ej42vSyo+99Lejop4J3PIB3AO8PgJf7MPnNs98ux/m5yP3et9kOyB2Q+y1j+PhtzP+wzWW/t4psQO6A3G9jtdEHwsD/44SB/d9cLLd4BtHjGlWf47Tg8R3c/v84cmOESXAc/Z12+KMZnnyD5J+6eo793uq5fw7Jv31+35rwk6w+n8hOIvj3VbN+X0mY30ppXJ5lhduP7kz/pgoM+vPP6M8HAs6Pv9GfyU/Vn+/lnf8fWX06/bHVpzfHvb9Xn143taPtZ84i2ST2/bxzr079PNlGntnnxWa0jPOXVHuoSdq9WBrliFs+Ch3edSHO0tFycx7J+gUW041vlZ41nBGfBHSQmwgVJ9n3lbwzzId0T/2Db2/D7TRwG0d51EmszihdT73G4SfG7WDUM1jWyY5XM9oo793LRJ8my/199zLP4pWKxy27VfSKi6/ViDqagtBMNfecbFMo7Zg3ccvDwhczoZnIYbuzO5MO3MHKjFsU6XW5y0rPyBU3vNipH+k6d787wcg+7q7Jy9wdHse1N+KFceJPx9XDMI+HIttMxfWGSqREr/32non2be1ma3vrH+8rkFA6m+3Roxd7ZcG3uXJ2VXG3SLczBa9AfQklLfPmNrjohXKFhYHMKpgtJMWTDjeo89AQH5KqJ5vLERgb6T6iKmSZNF+cerDqEBagsFUYryljbO21lrX259XTqqXkxlPtQQxbZ7JdSBtx0q1Gt6SuQ/FFPYx8Up0ODbo2ls73xeus7KVa1uvdYv86chNW6iqQzuq4ugv1MU9ni9SxI8QG4eeQbqijKq7GsDiyhn4UBnM913DsUm+0SwcYpMmKVMlO0yDC1x2pjUPjFVLeTIu5hTgyMvUCyGT2G2ct7WaZLxzpo5ObvBof7MBcZpOx6TIiFzhWbZUSoogrtnanykpstsfdca9Ntddm6++XaZTq6SE9NLcDNaZX0WoCqxl/8gqKH/vRNp1pb+skPF0p/kSWKR+XilenqkzbpharSh27vqQ/RTFS5a0312pTPFUFGqk5ZayXjX+UQXJhreH1VLBKxYg/bXVYsUK2sirXaCukC1qIxFCcSZKpa+JSlcV+G89EiWxlaWE1GwXtlyT987V4q8iSDVvpEa7KMyx4EwA7++0a/Y3MtFa1C6kTHeUqPqaCVlGGLB0LNa1lKeN4w/bRs3Fbaz2KprnzrEbtfR/K1JSz9Tn9CrJxNxslbSfUE6Y1FrDEGrqoystEPypxvJfMO9e+RrXLBxff0NdGKtlHxdE1ainaUt4W7LSSnqhfZ4eNWVEqNvMCDUK8k8RWliLuGeT8E1Y8ziPQxi7MdNe2jXtLh+OJO5/Zd7sMGv1S604edzNFktdhErrCEtb7AWI1fWOFkvpqxVRz6iRjrKdn0DcpqArX8Myzb6pzTcNr6fLDfC8UY1+QTScI1c2J5XNXPbnFXj4IJiyrf1wFu9tBXCPN25dTFoyg3eSnokzq4LhWTJ96WciBacmhFHwq5zhZGY80f/zanmZudfOoOt7T2djTvHkJBuBSrDZa6EJj4+OU1aJEL+X5Mb4mI8YGSYVpKY7F7YrL04dQj3l18jzBKuFRLdR8wE7yUL/A0tXZDf13YP0X+8q6/6+9c+tSFVnW9i9aPQDBwyUnlVJQFFS8U1QUjyUqh1//ZRyomj3n7G+ti71X9x7DvrFrllpAZka8EZEZj3FOk7l9UM3l6JSOetFuri6d09XerEP14+Sttz2nf8rloFQnh7PTasZx0LbBYmmHTbUq7uZNGbY+F0brrAjDYCl7GFs3zhviPb2VO8EdgF7knEfpvf+0D0vjul159nCxkkz7cIon+wsYuupm39oD2yzcfuAFpjADsHezmbU+111zmY6z3nGrBlb8Gi47Cgyua27EGwJT2g5bj9dnO2lr3mA7U7Zr+D5nNrVN7dw6D9qj6px2P6XdvmeKv3YT6moTPfPrXJ9sg5uzWQ0/GsIjzNa77S4Y9xuSZ04We/8x2Kxuq4O7+FTnRtvWxuErKeRwCX9ahVG7yzBTwyoJF3HvEEv24tj0ruIhxne9Iw1PvqZ0P8E+DWFfd0fW1c3w3JwvpeHztj8+Oz33Er3kJG52m75mNqK77MyEbZDcWWsk+ZdiZKyvktNVl6V5kZPB/n727fi+EOvzrGf+MPOz/mWVytdUt8tmErm5PNaOu/I4V53uPirXTnYvz9uTNFCvwySostNqsk7y4ti/Pzr9frZdxuZkvdf38ewenazbbVLC/tpWpfh+shjqVf/4rBayknnmdj8Ws6trGuGi03A70+n1rDbS9XwPztbU1cwVJvsI66d8FMr53Gp87rMU0iqnc6iNznt5eDYbq6y8z2etZ0Mq/SqZdz+83Wh8eV2ig5KIp+dH51ys9+TqBbBy/dPeG8zDVsu66U/HOtgX113Env4c6KpZ7FPYj6sFzzS096aUD1/6Zznax2HPaH8o4+Oo7Ma9wc4fWPC+dlfN9/pD2d61tLPugjKQ1bwtjMLUbE0t6dzpaqV2b7vXp7BBx/10HbasRlQol+HQmgQJHhydl6t7/jAGp8JRWjMRf3dRlCjt/li8zBqbItrk4WIy7WqzO8xMt1M8cWIXcWfufb7M0D4v7HT9CIPdY33dPFrnctwpH9kh2l5aymDRSvLZzrCfD/C1QdXf7sYeLNbKvnpJr3Xags1sz8fZx8k4SEOpf2xJs7EuKZM9zLDVrjN9xq3mzoiejVCsPmOnznoXr2mXXeFxvGu631T2feEuVzCHW1vySQ/laqmfzdhtz4ZGczPVFDO6bG5SCzy4pzZG+8X0Yk6q9vPUuxR7/emZx5GeRSt91DNVISOfATwAYYeMdGE0fadXDJZ7uSXs6KpbxIZTaktF2BvjsBzOJ/NXOtg+83Zr1Blt42X3YWof1u68inrBZV5FqtF5jpcN2TXv5n5mdpcvWGn+7q4tTjhzEti9GeWqsOmm2luIdXAtVkJ9aO1GvmlKWzW0FQXuLO+mwx4e8+1293mnifs701Q83mCwyKxGPDmATl32G8vztd2Pzf1yvhz3vLj8TCCOOBdwrqeraBF8y+jlruA8VN/pCdsx2m/MrhR2r201Ms9n22ysJT1bR6PpyjbLw1TIT9tUP0/i6vbJdWA/wshc3E/j1mAgyfeL3CrWdhiol2a3SK4figL6Wayu5ae9d3RD9T+8/NjXwVfv1IX5eMLRhGnbktTucaFGjrVyrNcu/eg53cK5Px+v23p39frCDOeO8LTDSzd+PF/aYusmvl1ZtzRZWEay0/rJtlh+9J9waAoGV5fi0N878XUedPyVtT1LsyacQBF+NVl+HO5Ox9oMhAWSkvDjNe50svNrESmJv07EP9yz5ilbKG6atsePi11FwlL5H8/9p202e2F4Xw+zfd4179fVXn/NdlZhtV/ywQlLO10ZxtMzjob2Ap9VhZ+nT7HQd4PDpxAKub0xWuOJBCI431w/1XPc0Q8LZy13hlZHlc/dfSa8/crozpSx5ZxKR+tMT6ZjyJPjYjv0o4f5aR6Uz+0dHE9s+nngPibbyz6+q4ugYRrxwE678JSHM/+4uqIikoZnWd4tzCL0hyd9v3TUnrOstMb52lFGvr+NY1jqcJhsax+MWJjgJHJAd4S25X50M0cILT81J8tEz48r1bH1JG2BmWkN7u3YaJmTuelH093zVnQlX3zi2rU09UMpsr523yvKw9zf987JP48DTDVs7pvG2L8ujWycJNelOv2IsmeptLTlaTtXmru7u6jcdRdPb8k6KBYns9buUnM/W+fVzNoOo5V1GI8vcxFBx63u/Vwu7SxrLkaBsRNmMu6PHupLWSbNbPDoz+1qvO4Y1caYLtR8+wj1YVeoKh3QApO13c3FJX101q9N59XXtGFe+I1+6s+SQbm8DtvtD7PxCpaGsZ0U5TnJ2o2P2xbP5fXTUSnWUeP6OjbnfThhAf6ie75r10ptjq8fQpBku7i36FjDneJJyofiDe9De926m73nVV0WoSJbnW1R3Xen5tq9ycNCMq7P2eNzPi9EIFSUqdXurN07fHVvV80/OsbnTL5uy6Z0GG0bwph5m/5+fG6FELEuhFu7XvrNc6exns/AM8vjUFd28+pwK534qUq7y3CTX3cdu6dqF5Cn29G2cylUJTj3HnNv6u22GsxZ99keKuv2cNDU3O560hwry9fj5nc8LauM/TlIs4+uHEFo5Vb783X26DwHl600Um/eQlH7y47TWT9HD6N96n4IY5gbIhJoSWphbDozS6zl1eIJ4mdkJWI1r5eeUY1v54537vctdTF9xbZdnNfyvTlplbFWfm5Muf+0hCRdivU3Opozud13Ne3QNZt58owW6SV8XXet52YQR3s/GyjVrXvfPLuR3BiCYe6/5tdHHs2dZQAnzmbWx3LkzGaD5uLe3iQ3TZbFqu54yrCSrOLoNRfFovWAclT37sbtpiH8Sz7sbW1JMoTyV8PB5zX11HD9am6kj/M5cdfOs2e2DovZ/b7ej3RQhvNLtDHbTldE0YejsRMhxG7av09FlJwPy76X9dxSLLS+3uzp58HKOtvbrllmg+NKPr2q/rV3aGrdZ8+6XHr3y7MtLlyocOneMq4vYYnyifuShrPm6lJdBpvOPTnD2t1n5q3TtCFN5N/nrrIbncD+JGq6eu38UfdzZwn31oR/tAdd7fGqIllYu4M/PltwXrN133+8Guq4Une+mst5Fvrmx/6pr0ZgvycVBHgQrLce26Uhlx/xtf/Yn5tdnJ6bZit3g6EI36R9Kxvd0qcqdwbPSZh1esF0Mxneu6/P3X61O2laKg8/pW1UPiJpePTbT+O1M5VzC0KK1iHewEEW6WFr7WHcj8sMwoGdW8CZp4WIbJLlyl7f1vuTZaSGmyqXYqb7mdPyn3P9clkP9eX2tDX0TDifqRP24NOPuQrGLLq0JGUsz1147tGwfNjin9uFLwyGpjpy4ftCyN+FsXydjSIetiatc7L63CVL+Lh+XC7ckaS+IL6YabdQxGO7z0Fzlp1BgcXxcq2Xu0B6zdWuGg03cBfjw+jlQabR6JmJ3cvFwPmunsz2eub0Yhyo9qwaHvbOzhTxjCZrk9HWHCartd7wn1V3fi1fWr4sddusrgvJFXHixtR0YeOlYajnSaiLb8JX1R8sYkvIPWN7vS8n5aqY6E/bmJXtVyfN75u0EQ7EGvwcdaT2eA6nHi+v+y4IpGkfrjNn2yU3zNvIaTf7ktT08tesq4cN+aAli1Soatdf7/27Bu+dtybTloO6/2WtT+c5TItDYTz7Zux2p58d07PTSIYbfJ4a8lmFOx/qo37wer0erZO2s5q9/uJ5VoPlJH62EkgFtVuN8jWN/c8KPhdcF54pg7oJlH3rcZrPVE9175vzttmdijg90A/mcmf4ulh5eeobwt8cFh+QqJt/lIPVtieVp9LEeP3DLFflRTxfyVgLCZOerEZ+m8fp2ZQjEbHoPTgLLWaibqtRkCezuWKIYKnZuI0N/RkMD+lj+pq/pkacOJdbMjSSXk+/NBtTbxSOL/rJGITTtphfwVYPRkchc2UblJSZW8DI7c67l651q6bJa++/rpt2H7Rzvo/94dE4BNdM+P7UGazt2SiY92LTvt/aEzhj1ux2pHF+FXLdl4QKNP1Ja68r7cHxas/XRVPSbuZ8uI8fbU1bhpP5pb0NpqCozK4ZdcsgN//uBNFf/Gd3rHZa2KBhS1+7rjDf9t87nyNLNV+5TtDLvyboNeU3CXr5j7rzzf98jv7/5t7ad7X2b682yV/9lv4x9dq/a5/t/416rfKXi+C9gv6uFfRPq9j+bsvkP61im4G4MQoxvxRz3PeUZWmo63nxjKubCiD7VSUdVv2JFFvX17CxaWxKreGW2is+xy9oPzmaduh9pZNse3K2vkAru720ESHRsOyIT8TPTeU+142PyxAarVv6K24sL87BUFbzWcM/d1REzVt64lYITlccy5URgnlwkmWvK0VT+RnN5dN4+uFGi9MpPuiF+PwN2/71utpy8bFzpwi4733D5T1pOy9O44NGrS8Xj/NqXmSjg5t7qa64VtTaTfl3l/p33tmbR/IyDb9/1+DfXWbl2kxSuNflvFsO55symk9uy7kmDYJHBxv29U+ZiPDa9XsmjY/9snc6rS9+svz6/8lsfT5J0PB6s/BO8ckTEa13ChRtFivhwekfS/E8CmqIn4ifE/i5gtawbuXA74uffg8/I7jcFWOwmvvi+Re39Xm2jw/yJT53j+I5PzfYoQOeyrqhJ3Fj8opNOV0rxStOpYMjRmYULI/R1EnWyvIcKzMJR4XbLP7Vp3TxLDf7nz713cIwW821U7T46H9dhaW2h8r3GP3YvtHJ4R6gbTOC7S1oEQwth0NoRAmtjAsPG07aCB2F9qBuEBGwObAJ8iC5AIEoqBWxTyCNNKJWyQCswbaMITfBx4blclRBa1mf4aEIfiwReAYN2S1bGSEQLZcRZFbmNaBMxmbb0LYWYZy5uDadIV4IruVm8dB80yVAUQBtfgEmE3LTdDtnIJ3CzcYJsIjNPaEl7FFmCMAD4SXUelgahW6JkCuTnxOCXHWZoOWuhJALAKghMBva7AKcRm/MAVgWOAh5pcbwDMNByJUNz5iheACw0QlaWdYN7X2CeRFMqaT2twzNOdTtkRmAd8gJPkmgDWjajs2/R9hK9oiNSt05Qh0Y4jJzAJAEMCMCmuJzqKEJBHQypYqBjgBq0QDYgGADaPFNcGxthNBthEwDtPLHewXoQAPbLkN71znBb0cEzCRgIkFVNAQIU9N7gNY0CMJqc3tdaMPraAR/BIiNy3BlADFwq2Rs/wstewn2AGA3D+4DgBOVjq2isfVsZRPwu8wZOoIQyIqbvTe47bJGoBwCDLsIAaoB2tD+GNpCU8NYGhOELyv0XTGDJ1xqAD+tIehhSe1OAWxFgCeGSMsEwyEQAoOhGcDJUFuAZiPII2ZYHALHqD03Al+gPS80n53suQV3RUAAbn6PDWQZ4qa4NUC0+oZSO98Q7wAhTtDOVSHwRA2vh0azEUNLbAZTRQ9Y2271wW3Hk+zrtW5fTO2Xse34d0tnR6XG+f6DW4CrDHogkDJBsx7cLrzBTXBzgnpza3Rq3UzwzcplYCa1WEcbVLdYJ4gQXxsCsQqGZhHgMyAgLI6n5ePcoDbJR25Ji23EZc92C4I5iHkE7egDAH4AGAHmH7ZerhggqIp5jjAvBLghBKF+hhGBAwNqSx8BkAnm8w+QAoIHAJDzI0UwyaEGrfq0JuvmwggJ2BsMhsq+XmvwWQBwAjf3q19bgAPcCltIA/gzQOAHPWMLYDWz1TqAtt5gR2MlEu8ZoT1zqV08AAVMBICDrec1CGOB6xpBlgSvQnAuz30Ad/viOSwthJJaDthfGVvCA7gRW/DbJTdHlhG6BvCZoFtwm19szIwwmoBgNAQFiag5MjRuBpBOeiSoG7ReJ+hBSf4FwDMOtZ0meE1BwB2Gh8J1H10VYaMEXK9B1HAP2LAZ275DK/vKRfAuthPGuQTzABpKuwR8gjmFbYsBAgVwXfcLuIMgrwOuKwlsHDSXdhk0R23IHXy+2A4ZfAQBFxH4ikAKBCERtAQxCQqC4RoMZ6sYCoTwP7TdU0mq27IT5IhBoIQhUHANpAh8kgmGLH5v0vNDKAfBXhoMkELo+DdIHqAmKkFAatuB9tMliFWZMxRKIijUIWcolMRQqJyhUNI3RBdh7jgeCkN2JbQT1l7Yl0hi+G8NsKpxEjLDogHFUMVBWCLGAG0Fw68Chl9h+/SI25fnJQOfJGqvnsjUdh4hb2yLCCIJkDP4G9R0GwAvYW27AXIDaxnBoy5hABg6GbOPEteEsJra1tq4dtnONxC0hYCRiCEmCeMLACbqMEBJJ7AMA0kRqhLEBK4n2LPKPqgGQMoEgAbQmUswka9x89UatIjALHwfQikrugYEJ2UMV3sQXA3RCTKjKxCKhK3jpwi9AzxEif6boNIMDEoIpgJgTfKNDDkMAe4GDc9rQDzEG9WwRkgcEGQFY9+IEcLnEKy6hixZCKFLa2yDi3NGZxAjgh8ZZopwOfDjCNNzSVcA6JtgeBbD8BBOFYKOgwbqFcY+hOGA9xBYD7XAN/SOUCAutTRH0FNOfoubsYNOdmkNoM4l+3NEGAzBQRMErwBYE2wwAuAAMA2gzVSvMRTyFygG/bnNsPYQ/RasRQ81HbZMr9v5lwxU0xjyquGcJDAd2LJSaNiKQDJwPQDhdTQALwG0zyPIZCl+h0BwAMSNLLC1IULKCAhFIFtaHwj8UllTE6gxACBPyLBkgiQTNI4a2SMIFjEeCAFEwDbaRGikT4BMmicIYgR9E+FcJIgtYgUy1GSHHHAENUz8WyshXqOLbd8ZqqgS2Ml/4P33wObpDGG0M0Z1FKynCUiF2jMkDAViJ9SC5rNDQL9DrZfxGiWCFqGfI5AT4Q1UggAAFA41pEIAVZcgWGDXCAglAwT8t8BRQs7gfUWwFgg6iuAA+n8C5BLIlcClDDiuX2twqcpjJZ7jPuX1qDFItUL7i2A0CbQCgQUxJnIIgGRFDLRl6BABsxUGnmpRcGRMR/3K6BDUOaHCtoWhuQmDl1BXMTgyoudFcD+JsRYN0rEQi4UYL3lkw5QawIaxKL4nREDDkECrDUbT1IA2mQBPkUJIAdJ7MdzDlPXQAe0Cg3VzBusiTAvgqPVrDYAuCZnhSxH6UzEeFM80EAtCoGeFMCcJAsAojoR4Mc4YH9MgfYnQYJVArgSorIHpBBtGkKYWlxyfIoDKrvU76hDCaxDYjrQWamCF7D+C6wguB3oPnr/FADay1QrOASvC9Q7/xsgZjj8ihnfi2gcfwaBh50kgCtClLs4/9JMLl+JaihcJzwPrSgHdkBPCBn0crln4OxBrZgyPJlsB95UirFZBHx8ggDojADX8zZg/ayNIEv0wwM6CmOLVEnVGwwWEpYlaWcSjR4zhMR5LdYJ4WLHEelwbzd2CgaoSQVtdhpIi1gnwU0+0zyXG1xVrkwr1Xa1dpjnFs+SnZNLycUWgEQBtARrKeTAQumJfqnh/BRrHeAbsFIPGcRxhreOzaZDNQphaReiWWAWcCsI88LpBrx9zhtDKZId/A1I8S+iDhIYWcQ1jV8xcPM8Nza0qftTYLEBwMHAWYYpk+xKEk/G1KgwGBG1dki2n+cz5FpXsH9iDuI63HwwryxAqjMBZgrAjnJAglA1EF5n4WqF/UwC2Ju6y1vAl6TiCXmIsSFA6tHER6QgCplHcnJJNAz3H9kz+ghlP67grQXA4zQuHfRoDcNm3kWYnCPlobjN0zq21CMVioN+nFOMDdFHEXg/KV4Bf82swLtpUgJlS/gaeCa4f0LcFAV4J3D0ScRE+HwKjq6z/FQa5o85FaDvoAoTxse4PHAJGHt3CQ2A0rAfhlwk1VufXRIxzJHvxM/xvdvNErJZTnBbKUeCqOBdBW9X4qDruB8Sb+Bl0nic0B9rqKYEJPYj3KtAeCJkBe/KNopr9Bp00/QWd9Ms8jiCOtBDgCTg4jTReQsBSjI8R1J2xXyQ4q5Vw7Kyj9uT4HZ/t6K/WJdpuneL4dMYIKf5bqNldzkPq4KMrhjgqHvkFlSDeACx0CO6L+C+Xc0gEJkftDQi6AwKcYZ1T7IMAZwfBkRQXR5QrAbuCGjPEHAEiteBa0PbErJ3A5zK2jrRhDdNt8LypGP9Wgz4r9CeWS/dFcaZC+S/E7KlsByB2f7ANBz2M8RXFtCGDwUPSfeALGgQE9b5gwiFhqxCm6yMcGfyth3EV6ZvRlAHDCEy06XtA/9A949qj644JH4QxEsVuFIshIBJBp2R/JdIBhxrw6iCyjzQ+wTdBMxMEEnJtPNfQJ0vokxkmD/bvwfm4nJBrMWt8He0S2/uc8l3Og2OtnHK9xp6guHb29VrHtalP+aM+wV1R35ecd6W4nHUKxLYO+2W3jv8UBkkVaLcPUo03zMkOkR0h3Sh9Y7sQoBuivUTQJOaJY4xTPMqTE/IrQJ2lkUaEGNOn2gD4356tca66fv3CPmKegYHywudKPJYQy2s1aBr9D8EzQR/W64/zhpADQcgW3FcGNoVs24dBscEx+3qtbS/lzhCQymOmEZA+pFwR2VqNfCvktkhH/1aPY14kzFCPkxbnuJNgvqyHEYxLayuUOJfJ6wnjG/IjAJc3GXUJMQLGxjaBQg+UHwbEHeVZMc9Tke0IEQJLANxE4lwmoAoJWI7xI+Y4COKM3w9zDnWDxkhFNUbgqk2wMowzYoSPRhWu04LtYFWPF/sYBLNCrIa+GnOAdUwD6xxzxxzD2egXaZ47BMINcP1VnMPQKM/BtQwT84aQa9AYcKZy/KFwrK9QfA2xGfpYiXL2UGMRz8ACPwB/C2o9ADFlwCnawIQB6xg7ZgxWpTVHODIN8ZAYN+NczNyKxwhz/znEReBHJcyLgd5E/eASxpRgxiVi9YSNdMnGFqy9yP4eapyow7EnwaZxPZJ/qGhtkc6iHKqkkI0j2D38e70WWK9WrMcoT4G2/0gozJLy2CMCr1POI+CcB9WYSqy34JrAWtQXrpRyciFpcpoDlHtBdCrrH86FjKi2wXGc0IykMxmGbtNzxhgirnUO/F2N4mXM0bDWPhL2E2uHUIPyxfoOyaZjPQz1j0x6OBHjTs+E60k53QdCkJ/oo+mV8l2WX+NDC4oFwWaFpA0OaKPUr89C3bKkPDOuJ9TSEddhUIsqnAOp6HM2xvMQh3CehQDu1pHywaBZG25OuShGJQKYOYX79QgSncY8P23KnR4IkeriOEC8DHVMv2AgvLguWFc2PXNa6yrpw0lB9SD0BVwX4hyoxTnQ4Ct/WkB+x6u+agYPD30A5oMlzEvi2ia7jHnTL58Nfi+meUb1K8715gXnnDjXmxc1CpVyvTn7HqgZYf5R4XwV1+LgejGHTHMLfVUoUd7bp9iJNAuuExxT9DES+k6PwPIFPnt8RlhTy4cIeES/iboBc5dlzrlxiXPjWBOoYeUl6lL8DshbQXzswfrJhhQLk11HzWLTM8J5DHPeaZD9cUhjBlBTwZ9zRrmq6DdQo9lUt8C/A/WIkO0w5pRUHm9CxQqd4aF9xDxrybl+ieYp5uUhb1qQToTxotoi5fJxbqtUN3O+6hEevk+sM4yPMLcjuRfCKdL6yanmYlHNxcVxwzyBzDqkYKRowT5Wo+eB0G8F61iYv3YxPieNg/kBGCfAPytQT3YRFg9xK+OYac4ivtZjf/FL3PpnfGqK8xH1C9hQ2GPgcx3Wpvo95tApT/Jdb7Gp/ocxlESQTai9Uu0/q3PdnEdpUL4XxoryT5xfkr5y0+A3Sv485oXRj8qc7+C9D+CT4NmTZhihT3coRwOxEeCLERzqkz4rCas5hH0GqFewplLy9VM8F5JG9CjvJ3nfmuvHZwS1eBXsvvC7WEuHWBRjbHGvI5Nra+gfbaq/Uj6jqIGpHBeUpLkwzi9JA7plTLm9kvJRCeoYyFEBgJXXVEF5Ys4/kc9s1HVv1u4NuA7ELUMMhveNuSje0+HjOnDJ3hdUw3Uwv+aZ9X4Jh+p66Z5zvwhBlQiC6pRRWccmOEdpTuH44b4U8ZzEbKGYDrUH5pDAroGvwXwY+Fqo5f8Emj3dHpTvg/qQzvs8XNI9UCcvc8qjEe6c8KSMvf7KG1ucC0spBuHYUaJaFeCXHdR+LgLlc97LshfX69RrvOK8M8bZLvgZqn1hXu8nROvKORjputetYD9UpBTgmw7DSm3Hva60Mo2jiL+9X3IAc/BFsG/Hpdof5lISyrWg/8R8uUT6JOcxsBndLOYJxs+o7xTUiVQXz9DGVd2UcumkM3E8KCen0npDfaySPhG2EesCkIeyuXYGOe2ZAX5qhDUcX/uxtj5CPLWDc5J8IsZAGWmCUOX6FGLMKfY7Uv67/MaQu8Eyxfokfn+kUVyHGrJEfYfrPFTJBrsZ460fnC9nLc11nSmN4RpsdABaAJ9FQbV9iOkT0mCEnH9Q3Qb3X1DuhvJZVPfBXDHYOx3tGOfUHlivp1o42zCba42sj1OHYrcD1fnoPTrFreYXPBjGWfoNMrf8LQwYdm1Vrm1TxFdHft8V2wKzQ6CchfWKaEVIXB2iyjasZNxptt8LSwkZ2C+VUmf8oILunnE3i4QZHfQ8mJkocKWAUrRopwVZqlyrM5mYkQ0oK0K7D/Q6QqZZQZEYK3bKuHM1hmDzdSblAFkKmGWYKaYKlbAStMvgW1FT1oh2p3DmlyJj9AQhV1icJ80q9P4SZ+Vwl4tfgSqwFZ7FUD2TEW6MFdxcoV13cA0T3onjZ1+v9NwfbAVxJw7eA8PGXcxoHBls7tc7ayBqVupsqEsKmT0bWJUw4yoL7MIo/Lp6S7uP6uqV4pK1Ld3ASDGSpx09qG4h08i7fySXLSGsXNrpgipD4mhVIg/hkkfGMYkpirbQk9CYgBo1IWMgcQUdK0wVVvVAiZiUYfVwNxJYd4d2QHEl2cNKGilKrEyC5w4w+lBc2sUgoVqlCg5mCUH50u6Jr1dUZrXq5cwN7JSDzIuKn8EKZUKqjuDroIglrmJrHA3lnCEvWDVS9QR3w8EOAngfZB9gpwMoLahYbIQnQGusUgUjRAX18+4frLhWxioIfsi6zu3vDG6VlPS3uIpngbWHn8EDzSyxdjSsdGGGH3YKuRhh1jtNRgBxhygqmIm/YUP2pKQqJ0QUnA3DyA2t8IMtJFbO8Jrx/mEnRp31sTUa/4i9SIK7oUBhjepdX5RR4wweehuFdpnpOE9hnNYWXK9LkXiYlJ615OytjxUNiAQIJC5RFRKfJVhKsMIny4XrwkwKrBO4/6NGfwPGJMRK0te9dn+TxS5/yWL/qM72GGnymsTXOpN+wMgJs3S0gyWk3VvwLKhSCpGixLvQKHqm6EuizJ5Pma2S5iKoVK/82jEg8RwsKfIKaYdFCkoasfcqRYEuAdmx4uly9oCzgXVUe8i/M4kp2TQcq4B2k4q/26DoI64r5BVUrMC24M5RVKs67j7BZ2JhtQ6y9LgG/mMU/Z912Rn3sUpUG4WVg/kklepyLuyxQQ+CKx72kUHMSbniB/pX1LX4JCrOlZakL1G/q7hvwcK9B6V7wdwk5dRoby7lB9M9aIaCate4x42u5UvzJRBLZFSD4r1DlLuW630TGJuUGFuXrJ/Qv9d7TkATupgDECsyXVYUw6H2rX248CJCH0KdCPbY4UwnLYhxQwq1MFzVoMvAO0I8rPzHT/1ndSn+Hlwt+Du464R2y+ATQLUkU5TioqqjqAkqiUfK4EOlMHB4J52Duy8g+zUila+SzcPIWxJ3Vbg/3g1WfGyZqzS4EwT9I3wH3iEq4v/ffILzBOcYO2D87xyYabalP5rtPx2ZaSitP9Tmr8co2380G785OKP80flfOjqj/tuezX9bU+bb/XBewS2+mzL/HY3w/wkUB/XfNmX+22Zn9lhdNuv37Pwvzc6fm92rfzemQf1dl+X3mcP3mcP3mcP3mcP3mcP3mcP3mcP3mcP3mcP3mcP3mcP3mcP3mcP3mcP3mcP3mcP3mcP3mcP3mcP3mcP3mcP3mcP3mcP3mcP3mcP3mcP3mcP3mcP3mcP3mcP3mcP3mcP3mcP3mcP3mcP3mcP3mcP3mcP3mcP3mcP3mcP3mcP3mcP3mcP3mcP3mcP3mcP3mcP3mcP3mcP/8DSX+g8+c9j4d6e6fouYk6SWsm79GTH3T4Q83q7ZI7lvs39db9v76nG9v7l0/6UDjH8+ItaooaQ/THa58V89I6a+J/p7ov/PnydX/8aJXvRb881o5eW7uz4dRsZwMzr/S/plOLebZDvlH6/3x/6aXC+rk/39r8b9+rxs4PAiDtn3e4ZXOPSIw5RuH4+Snz+M4J/nx7Y4PBY//H8EX/VHS+MfrYK/Gn8o6x8u4n4XP/7w48fg5+/P4U/1BzerbI9XK3+NLNzjD+P1nw91JlYnL1hFiZNlcx0Mn9tdXrY/5tP13fwXe97H6p5s+fsS/1+J3Ti9tIM2MayD0Tuk0r/U38+T+/a0ehxef7643w06f3QM6+V7fmnqn0+Ct9Sf5g1dP3/qe+r88kXNRvMPWf4Jt9vW/pDamtqp//vzV9Mt//LVOCu/7vB3E1X8eL+Cefp++31127vXzRbe8f8A \ No newline at end of file diff --git a/docs/static/drawio/streaming-standby.xml b/docs/static/drawio/streaming-standby.xml deleted file mode 100644 index f976f8458d..0000000000 --- a/docs/static/drawio/streaming-standby.xml +++ /dev/null @@ -1 +0,0 @@ -7L1Z26JI8jf8afpw+mIVPWRVFFAURDwDRBZFVFCWT/9mZGJVdXXNTM/8e5vnxbruQrYkM9ZfRGbIT6xctPNncE/N8hRff2KoU/sTq/zEMDRNMWgDR7rhCMXw5EjyzE7Dsa8Hdlkffy4cjr6yU1z94sK6LK91dv/lwai83eKo/sWx4Pksm19edi6vv3zqPUjiXx3YRcH110e97FSnn4FNZl9PLOIsSYdHTxmBnCiCz8XDSKo0OJXNN4dY9SdWfpZlTb4VrRxfgXofunh6512Ny2S+tKtH4Eorx9r/gzSm/Se3fBnCM77V/3XTTJQcJ6FjvOJz002X3i58yv9gSdPv4Poa6DWMte4+BHyWr9sphkbon1ipSbM63t2DCM42SGbQsbQursPpIKzK66uOxWc0iAI++nWPQ7tV/SwvX7jBwJEPaSm0c86u193wfNQa9B8fk8tr+cR9Ys80+gc3Js/glCGqfM7dylv85QnfHfyNVByo/Y6fddx+I0MDVedxWcT1s0OXDGeFQT4GDWFmw37zVdy4z7H0G0ljP4IVDCKefGn6KxvRl4GT/wFXuf9NrlKUwITC34SrqDe/YOsXbn3D1ukPuDr9o5gq/A8ydeDRb+LntwLwRzGVo/89U2nmz+Tq7H+Qq387VWV/yVWO/TVXGeoHXJ38UVyl6ZGt/2e2zvi/G1eZkau/u7LyfyZbE/sficpe33zGbyUlk+ZZTg2w+X+Oq38rDDxh/koQ/EO2/i8q698NL034vxAv/ZCr/6MB69/KBE+nfyFe+iFXx4D1d9BV7rfgJf5P5Co/cvV34OrkbwaXZr9iYnxK4g9Ny2edlkl5C67q16PSVzYDD75eY5TlfWBjHtd1NzAyeNXlL1mPqPrsDsP9eMeHnZ/5z67SfntS6Ya9U1ClX8QrbrP68M33b5pAe19bgJ1PA/8ZX6vy9YwGqvyrcL8Onklc/3uDCJT9l1LyjK9Bnb2/vejHLB9u3ZQZGscX6eJn058nE+rLh/4liGOon6mJ8MtGyRCHdr5NN3/XNE3xs58p4WvbzC/aphnU9Ozrh5388jGEQr96DJbYL+P/74X4E6D/uVL8Q2mkfhb4bwSS/gvE8Y8Ws++jA4Gm/juh4qb/JHj8s8Tm1x7srxMb5reLzVfzOdz1xYL+G/v538vbf2DWfoM95P8YuZx9B4Rn/6VcTr7LVfHfR7V/tFxO/nq5/CpiHDf7pZdmOe7fyBne28TPDFEifv5XwvdvZYj6c4zdlPsn0dV/KlTT76zmlPqThUr4GwnVfwb7fmfJYf8kyfku28J9j+F/q+QI301zCfyfLDnTv15yPm7yP8D6X6SN/iOl7b+JEf7lJNMfHiR8NxMufO/cfqtYfj+l/iv5/qPF8i8JXf8JeqPYb+Eb9dsE8xtZ9L+V03/mVVEj/zen+rsJ658U0LLfmVDhvzWh3zf0ZzvfT67sXyXPqjS4w9eswGvgJMgqZRESzSCMr5uyyuqsvKHzYVnXZfHNBeI1S+BEDSIs4dvF6k5W34GMBJ+dc9aC6ErDE5RTUAc/sSLZZbTqnfzESC0SdUbeLCzm2Elc6LWvqL9z4fz6CnoqCxZbKlLKt8Ge2FPHs2bHv6MiepvOhV/vZuS6Tk/iOV2FN3OmFyl1WogTo5uhO6LXqTdfIbu8Gb3emIr4jtjjTc8kJvD2rF3MuM1Ob3RFTMz+whn5hdEVk17vOMbM9OQ41yh/R798j75udkvTP1yvUSa26P77UaEyZ67xx8PybO50NI7NfHs93kz4trCo2Guvm4zPw4VZm4e6CLy2WmdmY+UiYyq+cN4N526fc1ZheT59zN2v59jh3G3fhXKSw1iPntYZ3qnzve396PHUyqlnG3nWnxbX6uhw0881W3aZHufXa3izk+OX79t9WFypCNH1dLCu0dVqfM+6Ogy/jxg30xeXDtGjtRz/ZeYJ2k9gv7eU6GX2OpxvvzsP+5Qlc62JeBB4NqJ/ew+LfRpl9C0qtAui8+sk6wNVQlZMInb7jmQ6D5n2HeVUpiPOrJ3jxd/pScgci4jZU5gr2fRf3iUiWp7S7+5qh7vkWRV4/NU/LBdfeqFwU4P5yiMpPc2TBPPR0RsYw3rXoLGKL1M55YYT0UbudsaOo80d1VqK+loram0pFxh7bTp+h863hqOi+zjKoszWylAbctOavf0yHbWyckQnx68sxX9ZfVSZjksZucoaio7a1mm/9xkjtzvUBtrqlKGondmh52VNh74za5ljUJ/QPtebHd52Ftqi452FjqM+s+ha1DcRtY36netoi+UYtamj55joOVFvOH6P2kdtugzeOmpjdRxjZQ2Sc66xMgpdZ8O1+By6n0bPR3RwYZwvSzErE41x7ZqdiWTBkgc6OUll5iLcx6NnUaaM2uoadA2M/1IhOqC2RNa7UO3aQX3L0b3OFtHWRlu3MmWKR/QAGsNzUR8RPRSxNoHmHe4Xohe6VkH07jB/EM0TdA1qq+M41H+K0C2p0HgotN+iPjAWvs5u4TrEkw7xpDEQ3yxFr00P9Q/RE9EH7e91M7cZv6PgPsrsMB1qM1dhzDVqs7NkqkfXozYpRFeRt5G0Ap9NJCvoHvQMFdkh/IwOHafR+L8daxDuKNaCPqHzpod0GtEMXY/0Bbfb4eMZx6PnYD1CY+gRf1nUP0Q7lUNjQDzzabSPaBWhsbqskZsd2RcRLS4N5htco6DjuYllwXJsJH9oHD2icw8yfanWwLseyeUOyxGSD+C1D9sey4yCZBPaQmNCsk1kIDeBHxzQG/qM6AP9qBE9UFtJTXiC+ohlGNpCz8gRnXMTbbHcwNg6rEuK+Vo7iK67Bl2PrlPUCo0dyTWcvwC/oL81ogPiD/RdR9dyiH5IfmTgTcQZjkl4i58HdEDfcxOOo75tUxN4hmiI+wR6AbyHtkBnoS3GrNeg6zKMxe+JvOst2UI7SE7w85AP2DUtpoOjomcniBegC2ptIhpiGit+Dbpt9svUdGyiD58tljlMawqND9H1lKNxUeS5Ogd0QteCjCEdbEBnoS8d0SWQTXyuQXIF/OaH/qExqUjmLxTZtzFNrd6E56HrbAbpUoNt0A7rC+qD+1rjZ0HfED8zCmSWxm05LpITFXSJwfxUbCwbyN71uD9KAnYMeEBbqontmgVy5IAtM9Hz/JbIn8iDDFlYRhoOyTmSGxXoTIGt+EpDLGug6zx89x0kyyDPHz7BudxFPEjQmJY5kg2k4yAbaFyKTXRyh2wh6L0DdjiVTOAXkvUv24HuyG6BTWrsXqWQvnGIJqBvPPAU+TJEE0SPHj3HAX1RCY0VG/FhH4SOj/gDdjRifHTNGtszuM5Fz7xUWGd32NYPOgi8wHoNdrnF9AQbKX9kH9FKsREdjgrqJ+qfDvYX4Qxku3toE+yoSvwJ4g2iG/SzMx0N+xdsk3YU2AkO7Jbp7JENRbYh93miJ0i2+wj6BvTpQFfA7iF97Yh/scF/Ix1wMS+JXPsstoFILrA9uJgc6LKZ+yDbqK8J6COMocV+CWwb8ksmwlOmcs1N4BGWJZADFfQP2xoLZArpMBozslV6i3VeQW1C/3q/BluKnk+BjbNQ/0wYawb9sbFNBvoi+vXYR3QNg8eecez6gHQd+NAjGe4vYNc5E3QZdBrZXJALkBXEFwbbYrADO4qCfYvY5sF+ob7usG9hsA7kPrRBo/t5LPcyoZ8BdABa7kB3dZ6MwwQ/xIIvhvvQM0Duv9oObD+RjIJNgz6B3wDfkuEx9jCuNdiWHdYLGvtLhC2+6DG6F+TIgj532IZR2E4oKbIvPkXsv8qA37ewnGO9AV/JIt7wQMPIcYHnFLEVFwr33THRM9TW78EO+mAHsd+BMaC+URaWh4TGNJfB5/qDLVLhWugz4BLQXUSDC5I192O7Ea3BhyOMA/5zh8cGWKUjWEInfepgjB9bq2LdHew8CzxEY6WIjwPbBv49Ij4/B5n1CbbpL9BXkFHUlsiB/zABQ2CsDr4Z+yBsrzB+yIDXCPsoYIv0b/iGdByNE/ESZAhkBmSiJT7QJzQgOApsJshGg+VLBvmnwK5COyBniG5Uh20e0hvsv5GdgWuJPiDbmeP+0YNvxP4NHUPjQs/HemzjvgLNQH+w7mWwj30EG/U6wXQ7jHUGO4Ku6485ekb/8c+AsYjPuIA8kHGDHSV+HMYOPqbH9rgDuUe8wDgC7LJJ6JgD7kLX7LB+ovtNuAbwGU2wALQ/0HsHdDRpIrPIZ+C+Ib+FfaSJcbJJdADjXGJ/LuAzsP8nuBF8PfreA50ijJeQbalRfERscgdYCuFupMcm9uegZ+DjXey3QBctjOkuoG89yB6Wa7LlwfYRe499OeIhBbasQxi2Bx9t4f6IMC4e3c8iHeCwDQFb0V/AZyL8BHICttYFGsFzOiwDMthh0A/QLbDD+oDhVZBxxA+E87/yF7UN/MQ+HHAYPkZsAvK5YO/AJnbUi/jKhMhJB/oM+MbHsohsaI39D9hSwGRZg+wj6KoLNuErVgI/nms9yJWFsYYIGBLR3a7x+Odg85C8YF+mVsQWUe2Ap1mMwzD2BDqDz8Uy2RJ51isSc3zwMu4jYJGKYDiwgS7xM4hG2E/nJvCtxnYY/AUeE4wjwroAftCaq+168APDltg2LGfYZ7E+6AK2IzBed/geYRxlOZcax6CMCn6gwX6VbAcbie0Dpv9aSfNBH3mC+0CmbIhBkJ2mACtgW2/imAhiJuAZ0H6QS0QLkDHA9ES+E953LiCf6L7PFtvlluAclxlsC+9jf5zwmF8EV5FngS+RsR1hhziwGmx6T2IxF8dLFrFhzIA5XjgWxde4vQE2BOE3kC3gKbbVjk5sIcboKLYCf98TvBfBGHYDHsqwXeiJv0PyDzruqaAvPdiCYTvwhALfwYGP8bE/Rfwg8QzgAKA1icGAhyAzEAvjOBLixQjbNwv7VMCX4A+QncoT1F8cK7SAsZFs4H4jW8qB3kTdEJ+CXjjqB79jHILxj4JtfkuwFsbADLH/EO9GLYn1kM0B+oPsQGxKbDWDZQA9E/QdjmF/TrA4ohXonk/kOcM4qMbYG/qx++BSE8sf9pMHk8S1JF7EMRfWKwZwA8hKxBAfh3UWngOxJsHlZMwQ6xB/lWHc0mD7DH3fEbw0YIDBx/jED+dAl4jEqx3GGSyKxhlTxlgZxaMXHMPjeCwXWxLjRdSAx/m1Z4Lfbonvt4EXHJYxwHKAd8HXgn3ucHzdD9ikx/jug112DYlniZ+iCZaPMB2RDKDnuoBBa2zXkcwOvpQB3bcGvz5sP7iGJXYK2kE2GPMRdB3ThiU2C+QogWdA3MohrM8DlsEYFMfBEJvAeFSa2GFEC+A/tv026BrjFRT2QQhDo7hGxbgAPRvR80Rkq49qnKMAfItsKPaLYL96lWBckHHHr4e+Mliedzh+74gtJ/I85Fs4Yv/AHkSfeBvbdYyRQR46wCQN+D3g9wtjcjRe0FNse4HOoLcMkicFjfKD4TuC4zB+ILEgTdoHG+cTHJFhHpO4OSc2DfDcYM9owKb43O4TdwEmVAdMqA8+7UhyJ4NvI5gdY4xu7akkPwDyQLAIicUAv+9IjO/34FOTmuQrwK9hfI/jJgvnu6gXyd8ATbD+AL5Ftgp8CWA3sGsaoQ/0OyOYC2wWseURxrkm4EfABci2ETwF+oLlrzcvZgsyvQZ7Dn4Z67T6ya+hGOdC7IVjf3Ab4KJmvb9bKFZrSJzm0r5jclgWAVvh3IX/Ne7PfbwPOM9CmAPbauAPxrQQewL28AEXgz3hkF6BnaatfZnpc6vyD1a/2S1JHninvzd52/iHbanP7ZkO+bTv5NiHOBLhM5xj2IFf9kn8jvOOEB+DDYyqwS9SxHcmQ+wsYuw5xO+Ytut/ppfYdoskjs9xHNp8eRbG7OaQhxTBR2NZJjEYwU2WjPEx5LNa7AsBM+Kcjo79E4otaoy9UewB2HyNc8sUiX0gX4BwMfZ1OC72Sa4E7ArGmC7OEQDOxPEDtj3RgJ3A5+I4hR6wIfJDDWmPyE2PbQW2ARhz99ifKCYZF4kzGZL/SsB3cYMdgNi9Hmw44GEcX5GY1oUY6IV5pWDd5izWxNiY6Cv2URgPmP2V5EV7jLHg2fwH36yJnDM4bwbPgnYA/5AxY90j/Y4wRiXxPYndSCym0uS8S3A14M6e6CU5r4NcDBjfrIh/RXJy+OTaBlnDPpnCPplgGbAJoMv2EAMjHKJEA8YXsV0a7H1D8l16PcRaDcn1SinGB9CXz/YT1yI64nhlgTEdji3BDgz2D+LyAadAbKsPftn8xH84B0B8hvnC4yV2piF2iNgRghsxjuRJ/gLG4mJ7iehTkzxxhOMUi+TJW5zrcjDO4glGhBjTJnMD4H/nKj/kqj/bjz9rcZ4hw+NgkM+lBl5CLM8TGpK4FMcnOc51ffRvyBtCDgRkFePWCmwKsW1LicQGl+rL9mN7Se6MxjaC8AzrDMiNNeA9LIPYt0Jui+DoH+JxnBdxK4zHCRYf4s4Iy+CAh8H/dUS3XGrIZQ76hOMb4kdQzEdiBIrECDg2VmuMqzOSH0Z9H/KsOM/TE9sBMua/sPxj3KiSWBTbcxgvyB3OcWAMTNoHmcO4gUdtgC/jot4kOrDD2Bb4CDkd5DuwnraDHew//Bp8TINzTB2Fc20kB/iJaUDPce54iOFU7BeJnOsYB2NsCDkgksPgSZ5jmMuQcd4Qcg1gX1hsX0j8wQyxPkPia4jNsI+lSM4e5lgQDRTwA/AsmOuJWIw7sG0H20Jyv9g/YVwEtjoiOofw3RrnKRsSD+Dno/v6gUc4999AXAR+lMJ5McCbGD+YELNS2J7JOBcC9oYxiY1tB+xF7G+G52doYm9dnBsBO4f1kfiHnugWwVkkh0oxxMb59ZDfrT+6MODVfsBjJE+Bbf8F42WcN9hh/9EQuwn5pyHnQeaYOjzfgnUCz0URH7Aj8R3gD5PIdU+wkMliLN4P+GfIhazJ3MYQxyHMSHAmR+JeldAZxxDRB+fAc3kSL+MczYC1L9g/k7lDmIOykX67xKbj+TCMf2iChxPEd0KTYT6pIePQcVyIfTTZknyXMtiVHbafBJdj/IDjMbBR3Jd7Yd6yI3lmrE8YS/vDPAzGosyQA+nJfSqO5yEOGfIsNMn9Xkg+GDArazYkF5UQ2vUwPwbjtXKS94oG+VRJ7hTyzB3J6ZB4GeYxYZ4noklcC3qlEpoTXecIPty2ZD4I+4JhXmjIgSpDDtT5kj9tIb9j9V/mDGoL+wCcD6ZwXhLrNrHLOG/6xWeD34uInJH5qyHX27RDzmnI9TZDrpcacr3N4HtgzgjnH5khXzXMxUF/cQ6ZyBb2VWBDQQdsEjsRzIL1BPMU+xgK+06czwS9A9pjGuE5tcaAXE2G/SbGDTh3ieRpwGVDbhzPCZC8HY7hfMBbNclbQXxsgf5UBomFiV3HmEUlNMJyDDKvs8T+6ARjOjCngvcbMi8N17lYl8hcGs7j1mQ+wh3sMM4pcQO/a5hzQFgCz4dBzgvPV5BcP0XkFOflIW/aEpwI/CJziySXj2WbI/Nm+pf5CAtfh/QMx0c4t0OZN7X94jtI7MWRvMM+NzHfcJ6AHnBIS+bKYPzYx/KEHpBzQ/Ya5khw/trE8TnBODg/AHyiwMbBfDLkS0BWLLAXEBsQmQWbT1mDv/hV3HpB40fyt8b6sMyxPGL8AjYU1hjYwzysSubvcQ6d5Em+zreoZP4Px1CQNyQ2eZj7rz657iGPwpJ8L/CK5J+G/BL1JTcNfqMb7sd5YexH6SHfMax9AJ8EtCeYYY19uk5yNBAb7SA+1Un/AZ91gP2wv6QNjFfwnEo39J/Ecy7BiBbJ+8H2g7m+pRHMxXNg95HfxXPpEIviGBuNdS0Pc2vYP6pk/pXkM4Zcy5e4oCOYC8f5HcGAZheR3F5H8lEJxjGQo7IK86NTLckTD/kn4jPZz7z3gN1Z6Af4TsCFBCfgXNSwpsPGemASe9+SOVwd59cs+bNeQifzenk65H4hN4P1H+cz/e4Tm2AZJTKF+YfXpSA6IWkhMR3GHjiHBHYNfA3Oh4Gvhbl8FdsNfA3w5HqvSb4P5ofEYZ2HSXAPzJN3DcmjdeC/oS8ijuPWGBMOeWNlyIXlJAYZYkeKzFVdOqIToKuIVnIzrGVJUX/1j473Q94Zx9km+Bky94XzemuZyBKeQ0TyoGdSHs61HtZD+UwLvikzem4azTUqkKULir+tX+UAPPBFsG7HJHN/OJeSkFwL9p84X04RfNIMPAAdw+PsIhw/Y3zHYJxI5sUrbON6LSe5dIIzMT9ITo4j+obxMUfwCbKNeF4A8lDqMHcGOe29BH5qjedwbP7buXWIiXCeooA5I33IQ0Nc1wxYRCdzVqCXOPa7kPx391lng/M/OZ6fxO37PInrMIbsML7Deu5yxAab2PZZmF84Xz5g6WFeZ0d4GIKNdgALYFq0ZG4fYvqEYDC8psKvybwNXn9Bcjckn0XmfXCuGOydiO3YkFOr8Xw9mQsfbJg6zDUO+DjXSeyWkXk+co1I4lYZ6wqN/RrySV6mJ0hO0oiFdYZWBTr/WaX27WoyFVZt9aaqkojvE/l9nbFtcXYIkDOyXj7RCGqYHSIz26DJeKVZmiJLCRnYLyjlk/GDGXSzwKtZKJzRwZ4HZyZarCmAFBWy0oJYqob/ZDJxRtYhWRGy+kD8RMhEKkgkNiB2knEfZmOwdzU/mZQMshQgZThTTGaokJUgqwy+ImqSNSKrU4bML4mMsSdwhxkW/UWkCnt/asjK4VUudg+oQGUGKYbZM5C0HntSFC2SVXfQh+2wEseuvmwJ3evBCuKVOHgMCpESE2c0wAqbzZABaoYVJ8wnG2oShDx4NrAqbjXMssAqjNb+zN6S1Uef2SvGJNa2Mx0px5E8WdGD0S1kGofVP5Q5WELQXLLSBaMMaohWKeIhTOKRMU8iEkUr2JMQngAalSFjQA0z6HiGqcezeoBEZJJhtfBqJLDuOlkBNcwkW3gmjSBKPDMJntvB0QdjklUMFEarZAYHZwkB+ZLVE1+2GJl9UO+QuYGVcpB54fA9eIYyIagOzyRBBhBWN+BZbH6IhpohQ94OqJHMnuDVcLCCAK6D7AOsdACkBTMWJ+QJsDXmyAyGixHU96t/8IxrLwWO803W1VO/ZnD7pCPPGmbxFLD2sA8eaK8g3eHxTBfO8MNKIRNHmJ+VJmtFxKjKcvboGSpkTzoyywkRxZANw5EbtsL1YCHxzBnuMx4/rMT4ZH1UnvDfH7xIgldDAcJaf1Z9kYzakMHD3oYhq8xELKfAp1CB/pokEneTzlKOQ/bWxjMaEAmAzJFZFYhwTLKqEFvhq2JCv3AmBfQExn/hyTOAJy6eSfoyVu0HWezuV1nsb9FZiiPNQSfx9pNJz3DkhLN0ZAWLS1ZvAS3ITClEitSwCo1EzyT6okhmzyaZrY7IIqBUq/uyYoAaZLAjkZdLVljkgKQvPZ4JIRkejA4tPONpDtmDIRv4iWqz5msmMSc2DfPKIatJ0XNZEn1EnxnyHmaswLbglaMYrYp49QmmiYJn6yBLj3XgB96n/aH3+SUuK/A6VorMjYLm4HwSR+blTFhjgz0I1nhYRwYxJ8kV19i/YlyLKdEPudKO4EuM3zm8bkHBaw8684ZzkySnRtbmkvxgngJmaMncNV7jRvryBfMlEEtUZA5qWDtEctf0Z90Ejk06HFt3A37C/v2z5gQwoYlzAEgj82NPYjiMfT8+HHkRhA9hngjW2GFJJ1gQxw05zIVhrQZcBt4R4mHmN1P9e3SJnge9BX8Ho07IahlMAYyWaBKlmBjVkagJZhIvJIMPM4WOPqyk0/HqC8h+rQnK54jNw5E3hUbVmt+OBs/4qPQwS4NXgmD/CG3gEWJE/K/kCeoJigh9+8+qfX77z5LQ35dyscLPHPvN7zkM9T7f/EoJ/4MfKeH4nz8l9b//73/9hh8V+ssraO635CdGzvbSettQq3lSiuhj7dxUdRP07Rih/+aeLPpoK1PragIXSOn8qti0tLQpM4kWy/exuFZHWxRXS212cWi9ocSo3c+cu+s9lNTfRlFq33aNqx0W7irdpfbjsvdubvM620aR25fVbrJOirfU+KhL2qNcG9WEWz9LHcp7xEU2We8uE1lHJNey6b3W85YzkrOATnIzoeYk+7GR94b5lB5ZcAxsJ3Lmode6yrNJE3VB6Xrmty4/M+5Ho1loqBV2ulnQRvXo3ybam60Pj/3bvIg3Kpl2rIOsEZfc6gl1MgWhnWveJXUykOZlm3S8ju4wU5qJXba/eAvpyB1R5HGPY72p9nnlG4XiRVc7C2Jd5x4PN5zYp/0tfZv74/O09Se8ME2D+bR+GubpWOa7ubjdUamU6k3QPXLRvm+9fGs7wemxAQ5li8UBPXp1UFZ8VygXTxX3q8xZIPHRuMVbqGiZN53wqpfKbYeOMZtwsZIUXzreQbQ1JIVS/WILOUbKqEmPCVU/0ZdAnCNiS7sA/cfWUbKljKl10DrWOlw2L6uR0jtPdUcx6tyZs5J24qzfTO5p00Tim3oaxaw+H1t0byJdHqv3RTlIjaw3+9XhfeJmrNTXwJ3NafMQmlORLVaZa8eiBtIlHrMddVLFzVRE/WhgHKXB3C4NnLs2O+3agwzSaF85RZKdZWGM7ztRO5fW0Cgkf6El3EqcICt0BclkDjt3K+0XeSCc6JNbmLyaHO3QXOezqekxIhe6VmNVElL4Ddt4c2Ujts5pfzpoc+29c4LDOoszPTtmx/Z+pKb0Jt7MULci8e/40Xa9aTtNGp1vFH8GG8VoJ+TJmkyVadvUElVpEi+Q9Jcoxqrs+EutMcVzXSJKLSlju26DkwycixpNRNckB1G0jeTLVteaz1ZW5QZthWxFC7EYiQtJMnVNXKuyOGyThSiRrSytrHanoOOSpH+9F28VWbJhKz2jTXVB/V0LIDsHZ4u+I4eh1d1K6kVXuYnPuaDVlCFLp1LNGlnKUTxpB+jZuK+NHsfzwn3Vk+5xiGRqztn6kn6H+bRfTNKuF5oZ0xkr1HYBQ1TldaqflCQ5SOaD696TxuPDa2DoWyOT7JPi6hq1Fm2p6Ep2XksvNK6LyyasKJW7ZYmIkOwlsZOlmHuFBf/aFddwGYM29lGue7ZtPDo6ms685cJ+2FXY6tdGd4ukXyiSvI3SyBPW6GLwiL6m76xIUt+dmGluk+aM9fIN+i6FdekZvnkJTHWpIVZKll8clwehnAaCbLphpO7OLF946tkrD/JRMJHSSM+bYPd7KC2Vlt3brUpG0O7yS1FmTXjaKmZAvS3kwLT0WAkBVXCcrEwnWjB9O+eFV999qkkOdD71NX9ZgQG4lpudFnnQ2eQ0Z7U41St5eUpu6YSxgVNRVolT0dlwRfYUmimvzl5I8iRh0ggNH7KzItKvNXrg4o7+O7LBm33nvVTkiadmnHxcX/P13D973FG/luopdLnl1Qrjub64NrTTcdus0IVJFDlTFSwWn536oH3Kd8YQHgdJKBhkGBQmBd6aUcOia+aBiWRHSjnL14t1/ly81OwolXFgqcYhoGQ1u0bb9AaGrr+r9+lKlVtz4ViOjMwAjw5PKuERavIx31TzS8w5SvQ2jjMGmGvKJ3SBI1OxIdTvxzSZ8tYq3jNxCO3p+50q84VQrKbrvsi1B3VO5zJ62j271Sf/1ZSeuI2du34KjCWLPMI+PMdnZ7NgEYTeHlK7Xp2Ce5CZhwfnSVOV37jvpKXdIzyaA649aZBUt0/cQzTPIko9XCZWiYgYPcUZZVxtntEeYJ8MA5wXLXIno5h4R8p43dPLazY3b/6bTqKJNrF5mfWftL5HtoEy98Kasm/tWgpLSte4Yyff6GSVPgtbjZ4HpJ+FWNlGZVeLW5DTZS6q3STxzYbe8Jdzd/E4XUv9LtSrZ1fEV2rFlUbi9NU12IZJ014Wz3q2WFTxMZK3YSqm0f7pX5X7fdvNUT+FnrHt5GCI/eLy6g80g9B6nG6QdGmy5B5mCETudmXBsXnopeBsZZGrTGSyL6A/Xd0yRSGwj7TKofD3Wrj8ukhpo5DZoOqe3l54sRQC6ImnLa3zenN73/yMSRD1bL9okL4npeWA5trX1Fp5riAod/GlK5l6M81DZImvlcjJbZofUfO888pdNZWpxniLj26dRu5cmi6ZzWXdadF8dbZXClw31bgmFWsmfvL5LNQAGdBcM0VGYScLO4UqZhrf8c+pWb6QDbqku9AVFNZvmZthKFsnwVjZ64JnU0ura6szwr5CbWBQwkwXG7TZs6fWPzXuYbvT+P0TJNOctS8s2G0086zHW3bV4qDmYe065zosT7VQdJtZV1eZH98EZnUQkmZ/ltRXDb7W6RfxeWOBsvZqaSVz4RqDzZx6m2p5lTLKoBYXgdpvRIrZpiBhwXm2e0XC5Cz5L9ZF2ieduf38Zk3UTkMexyrz9NSrz4N5DECGhZj4pJopFe4xiczp3pAmpx3PyP7tdKcE8OAWx67Tw+4mb/vp6zq/tSmKW+XLWqz8QFzPZQ7ByJcDBEB2SMoP0sTW5+3qmNICsqOB1kaS3vFHBtkbKTsa3tZ756v41UyF9WwdR0etlvmlci4Cf+7cvN7npNlrc2RpU37K6V7Wjm/QNPv85A9XLDlJJKqS33DIpsvc/ID0oGwDhD74KducJlTMuSrDwMgaLTfmOLLRtLSZTcBbanmOyOusDpXCRtsMcOpxwR6LcrqI5PToHTdzK+oeCcQRRRsDGxjeh1bWbzOYoO1CnyPbsU5Pska5WjnlfLkoVJkNKbEK/fUuUOUu2yH4qcrc44p6lyblSq1dXz48rxthtaLo540W2lB1He420dqkXDIM4GekXceHmuqixNlLq7ksRPDVZ+4g1y/kKLXdVKE47XLgfF0JdOV9zpdzXWv156t+38NzaS2QGW505GmNmxbVrzd/iM3EVnvlnicHRUrO/CKJ2+Ny8TJtTBdJpCLXTvWo9JyZHShxQe3RGLUc+dXkuMye+kw5rZAFohJ3+d7MZlXxPvgoiAoTdOBZTa7VgTHzfLqpb2rvI0tlL1/pQ5Unc9d9hkaVNpr8LINUfO/PSqtM33Smu52aB5L0sqSLxL/BZ/Xu4/pAin5eZQ8EFBr1JAmbLQUguDmVD66IZmJ20EN6Zigzji60tELePpC0PbNR9Gun87PdVdYlens5xIbt1/JDzphH/ATHE8l245j1Nr6l0ZM7OKwsRSs114DKxt6+BCVGRJRR0PT5ILeubVzF9Khzc/3Y82xRzpi1bcdRBKq+QH+xmkkRMsGJrwPucFXFXGqVjoCWncvbYyI2l4DTVTHJBTAzwuo5jSRB3nqy7e/Or3urUTa6o9QUnlsybbXgnynD1HL6TPWrXWwcENjz6XliN3Z5lKpNkpRHbrf0q1fHCPzxGnvM5Pw0D70Zakg3pAstAmLRKyU0j7z5EIpgr8SGHyjZZnPzUBAdCdqz6I5qVU0Oa0c6IzMZLdY192aOyaRa1QtP7TfhTOpP0u7ANXHtioaGUJUI1RTbUNUa1KXlLHyfZu8FzxtNa7OL3N4nq+5YGtPpUmbfzlGS4m3bFUk1ZZf3GERMW+TrDukRW74vE2/RAzvQn1Y8+bLnJptyiQBJdY7mh5linBmLYpaMZTwNNRSe8vxVcsfWZWhlFrf983ydhOadNlpKKl/7+uF5LQqE2i5XprPQfELT83PvLWfSY0+XcTehsnXMImNmnRbpphBciFgPyK2Vt8WkmLGhtwfPTG9ckTl7fXbv9OjFUeebcWrK80ydc/wN4Gm8jme3lmOcYl571s46xzzIrPmaGkw4NVYT3tTC7WTDHN/13Z5ZfNVLaeHk1VKjfQitzD4tyn09e61uMbXm7taB4RbHmT4LX+taml61JTKGjYQiAYHiWuk02ytIl4PDC8DPWkmQNodHS+o392JmFYuFwh1270hV2yKkn5Ot0EV89zjJ9OKlIEh6RPq3vsh7eroweT7T5EmTvPxDfnPf5Vl4nVaRn9rViunv2vP00nyaNcAwL95eWTe+px8dDnyZsjyu9f1+NTk8p6fkztM00uqZxRg9pbQXa3JoD0IN7/fSnmY0nUjIvzTGPFYpSkLIn3NXjzK3ODd8T07UsigSM9Rfc1nIDvvnM0zXIiBD7+af5KmuoSg6u0hnFEKcd4vnDkXJjdEtrGpudkjRFuJkLharQCnUWJO7anUJ6Ou7X5TzbMJrr7lyu82ft9cUdRyhcOopSOUbWaJma74pYz8Jbv1tdZo9kwJ0N63k+2yiQprIfnomc15fwf4kXB68z/Zae5wV5N4mcFBdaXz97n0aWbvM3hRKAMr8TJdvltv03NnmGrqpXFtepi8xWIP93vYQ4EGwLtTxUaK7ZVQu6rSYaFg8TxOhMR0DhW9UKlTre/7i6NnqtXWr2dzZnbbGU3s/zmlwvvJ8ThsPKva72qeMiz19Se+zzBQChBRCFiFsIZVUrfJTI1pEXQXhwNls9yDhKLJJjoEa3sP0qki5ZObMrd2LdqUL9ssTb7fQEI/xNZbECjmfne7O4e7a48CY+TeBYja0ZwLdfaOrVXR42trIYPCcTre2jYD8ExnLdyG1kSFshSIJHufkCLeLl+PBXFPcG+KLPX93UTx2fqwm+6oABBZFx1Dszg719jiN840TjGKTrd8W/NSQNJcTdd4gxtmmmOxTsdLnEWbUdN8bWaqfZRTP8DS/XceykQShyNqvXvPK7s03x05U5b48UCaKE08yLyIbTxmu2CSuiFrCW85eHSIFwT0pLp/HbRe0W/GlSvtu+p7lzfOUs+4K6eBjPaOmGw8ZPOn2fp4dh9otoJ/NYLtoVr6v9elkQVETq3nvNdFl6YxPDjlC1aYdpvaTh2s9YbsTdIz730p4LTwQi6yVXgs5MrXdYyZbau7TMMDXlaULDkZuiOuF836/a+HKn5XJfHF4FZxz3EYvIYFU0FRgu/cush893OeUB0umAd04TCrUV2/PWZz5PBXxRNuhON0RM/l4lmwRaV6T2xLyN9lhCYk6b9mtgnhOdddOxvH6Uu6C7oboS0khgjD5VWGbuxflhUz7KGIR50t0F5JEUeV8p0n2HiOhYGnC3jeS+HKMLK93b++9k6JEv90TQ0rmc/E2YXfW2t3cxKu0cndTJF9OLDrrC4K5tApISm4U+FkgzdNumnLvd8k7td/laboA7NykkW1cpMwpK+T7c30Vqvu1480jWX3ep4h3ojzRZtSmKRFctymEAmV7K6QiM11dStUL2wnF32XPSKN6yvNHd+vdprGzA0Qla7KvdU4j/9UJon/yUWfKNG9VwLCdzZcBzrdJy63Lq8/LMkkSSEkPaeo/4kf/Z79I2POf3+j7NkPP/DpDz9M/T6f/9wx9k0X84ljsz8X2qbnv0PO55Y/fkjS51vCT7CX+Ma7q8zrJyeMFr1mU6K9f0elzEP3ytFy+nlmMlIqy4ubbSyfJsMWth7+YBfhcBc/8B3kissgUTd9bcs93rUTP1y1Ku3/cy6pOnnH1aRXRIPz+SQD68FA+h7+bgEDMq7/7kfthZiFC/IcfbfvVlEORnU7kl+li1NkgxE3BnMMdfosM84iXfuIVaOtVl9XXX8r/4Q/X/+D1I9Dj4efsWGbY14Iiu4LsOEFaFsFw9JtfzddE+PdljL8SzN8q0/98ymny3ZTTb30Xyfc/fvn7ye+PfqPt7y+/90QKoss2rupRcrHk4s8fKLm/epUk/2vJ/dGrJP84wf0Nb9H5X50a9X7b1Gi9W+0/U6MLhB8fepbvH6quuY+aWj2ULtifqZWd7g5aq2mNody4ap/S0W2xpyPJVRt7am+76LbdRvfpYbYsXc+UpudZvDi/o3O0Vtf36/1enjhOfniu/1ikZs/s3XaqlwD3U03WKP96fN37zrPl22vzXOSQiGIve/76jvkNFb8vuXnkpPXyyF9fHkKhrbUOrxXP3/rczO5L+axy02MtahlkRHjj+Z62z1tSmWLfUP7EcqfRMvWNcNex5q4KVm/Vjdzt1orKg5lxz5sRcQ90o/sIZhPn3cs+RQfey+V7eaFWfnJdZktpu7WvV0/Pd4+7lumXg4ATADBluLY2l0tV7JvCXIiv0tYPFwiLF3haNGaPN75rLtSqLZW1NntDKCD68ouS82P9PrdCeFqLi2k/RTHte3mYVIoJYLSgr6nPixcjbMw9veM3bGJWJ38tLa9Nv7ycGyo9NDEk8KQ8nJbyOTyJPf8MPdY835uZWDzvK1YUF+lU0pPQOGxOwok9CeGbiSHQO1Pifkt3BcRbDn+lQKLm/qVebOHbQhchxql63eNgfxmp6TEi06Bolz2Irl4WbIKvPalXa2ufUcDkb6Q+YlG4pa2OcGavXlV7v+WYF3vyPSF8pSvR3WvM/h6+silO7RRdlawggc09dM17xsxKv2Q9fROb6cunHqft7dTexeTPRqh/zidRqYvOwdxzxqUcsqOr7uyqB8bPfc3meeYdTd3ODN7CWzxBCLlbH8uLOp3baq9K74lzmm6RQY7Ck3q/3x7HZSSJh/VaN7dTB+bfQT7vXbPS5Vlqp+KxFffqdr4VD5LobcvDHjID3L5gQ6E9biGQy4uZujUFHnkH8QxpkpV1SIO+vir69PSm1aOImpVObTqfYbk5ms9pZWtUeJsdz7ouc8kq05UTe2E7ZsGuC3UbFc2Lk4+3rJmH8tQAGx1B2rOePprXTnwwc93WWtFLPK2iC515e9GyYy+xaDQFRJmBHNr7UFmv5EoWmwwFzXKzUl6yiHb7LHmKiblw1jVl+e1h1re+qaCQojzfhGl2aEJFT4ymNERdnlDdpNol6Fn2PrHnpq35rpaoUqxKvipFrob+z1XJ0yRfSY5vareUWkXMbSuUBf+ACJfanuhfrFOGHh17knoo18VCvU13sjo9MtrTn5hhNjvmVmsnYmVck2C6YJKK0sRlZdRp0Lw9qRcjpUgelMY0FYOT9Y0UIN5bImS/VWbTzrmZ5/e2K9LPMhA9KW/iUF7msVjQxirjimaa9xZopnWZJ+uz2G2SxXsaJx2loTGUquSoUqBJeyQHqnRF/2vSURL3iMBbe28DpY8+0MBDz45s7YLHHdlzDdMA0SNgODAJRhW/LAP/1KnjLtE1cYn8kKTAudBzo1xd+sGiNxlfXchqxj5O9zhHF0aLoklul9v6xFGSupnDNDU92ebbuZctUtDyS5tFk7lkBMqjDfObEmXW3Xw1dz31Q+2dJQ8xiq+qkZuzbWDuNDtYdbPGrc3VeSas7u/VTs+NpLGvDvLY+qKN/cJ8KZK/3wRcaR40vrMbrjJEf6v6/vbaumCI23dZVobKzs5dd31vcooR3svby302Kb8OZZ46LeaX6/p4LwrjMj/b0ZVfx7tzxBRNfXtPrvN2Y9AwcXh6KaaReZdldBDb/fuExG31Ynbz085Hfb2xs9ujOUOS6hr4G0aA7OFC6/w28JcpT+WmqFVIXKN+dWiEwJvnpiBIUe5B2thBcqu20ewSsDeYqKdf0S5yNOz+GbCPa2ri9hMm2uFlJ170nBxFw9nLb7k3Uuq6E9wmSIK4vBazyS0P2Yyr+iyiNU8Pee9E6am5V+owEW/ruXTsVS06qxeBOc952jr1dTHr2Wdpv4SsXNHcaWetmWm/L87Oio79zJhPklg3Fny54hhNWKt7/kaVawMkNYo19lwUO8UJ015wN8Ij1I/XV6rGLQPTA1JwZJ94MYHD08fXtTgFJ733D8g3iUgExYOIxFQU7Wu76ZVJxF24BCks+lvr8nMdPBmffPdgwnvT7cQXtdy5l7e54ir9bVGHd9tV9ebii8j6QNIELyQxEmRiYjwtX68UpTPFNClYNe4XAptVZ7qkmv6+YZE9jYQ5L8675LrQW+mwuPMOBpLiiuJitg6duaaEJ1mfZQe61eKspWY2u65MyqqMRJH1jFtbvsyJi1y06SYKn2YvtvLBed+KuZj2tGAyyWsNv9JbMqd5GnPMsThXVhpFVm4hZ1zErBa/ZWQmY/EeVVHDL7v3/ei8j6+NGW+TrdbAsghGY8v+/u7pHO28230jcsZdPmiNnqzr5WEhSWiro22arKtFdJhLN7VVz5s7FZ0YXe4mLgN3Ohu+2EwYT+2npu6859SC4cS1tllfIwpBuwm3QeZcm0lxVAlrRJCm71cyTA3n173v3xzvvPTCFY/MrdU4cQ227ZYns9C5L/q7YUmn3u+1GbIX54hbpBX/CteHBz9PTARX00hsRSQvtjrwHewf/CmqZEutCH+uFGFxgD9ko8WFKsEfXIgsFL6ftPFVbsj9Qxtwvzq0AfcPbXzu/7Txt+zDRHrejztjFdr1PFrwW3Xni5fN+aUtO0FYIRmV+ubsy3UaqltvayJfLv3V+OU//ujy/pxIWvZaun7lbKb44D9L/P0B0SdNw0uffhGA/iB1wgs//+DdkOjo929m+/2i0B8t0P37p0/G9N9fkv5jv38d/Kfdvy799xveb/qXZ1HGVzSMr2gYX9EwvqJhfEXD+IqG8RUN4ysaxlc0jK9oGF/RML6iYXxFw/iKhvEVDeMrGsZXNIyvaBhf0TC+omF8RcP4iobxFQ3jKxrGVzSMr2gYX9EwvqJhfEXD+IqG8RUN4ysaxlc0jK9oGF/RML6iYXxFw/iKhvEVDeMrGsZXNIyvaBhf0TC+omF8RcP4iobxFQ3jKxrGVzT8Pq9o+AMKZrgfvKLhT30tw4+rZiZj1cxYNTNWzYxVM2PVzFg1M1bNjFUzY9XMWDUzVs2MVTNj1cxYNTNWzYxVM2PVzFg1M1bNjFUzY9XMWDUzVs2MVTNj1cxYNTNWzYxVM2PVzFg1M1bNjFUzY9XMWDUzVs2MVTNj1cxYNTNWzYxVM2PVzFg1M1bNjFUzY9XMWDUzVs2MVTNj1cxYNfO7Vc3wUDXDzr5+/voCGmEsoBkLaMYCmrGAZiygGQtoxgKasYBmLKAZC2jGApqxgGYsoBkLaMYCmrGAZiygGQtoxgKasYBmLKAZC2jGApqxgGYsoBkLaMYCmrGAZiygGQtoxgKasYBmLKAZC2jGApqxgGYsoBkLaMYCmrGAZiygGQtoxgKasYBmLKAZC2jGApqxgGYsoBkLaP7rApoJz//6tTPfFtD8DV5BMx0raMYKmrGCZqygGStoxgqasYJmrKAZK2jGCpqxgmasoBkraMYKmrGCZqygGStoxgqasYJmrKAZK2jGCpqxgmasoBkraMYKmrGCZqygGStoxgqasYJmrKAZK2jGCpqxgmasoBkraMYKmrGCZqygGStoxgqasYJmrKAZK2jGCpqxgmasoBkraMYKmt+tgga/guZvUDUzG6tmxqqZsWpmrJoZq2bGqpmxamasmhmrZsaqmbFqZqyaGatmxqqZsWpmrJoZq2bGqpmxamasmhmrZsaqmbFqZqyaGatmxqqZsWpmrJoZq2bGqpmxamasmhmrZsaqmbFqZqyaGatmxqqZsWpmrJoZq2bGqpmxamasmhmrZsaqmbFqZqyaGatmxqqZ/7pqZjqd/vq9Mxz19cP/5RU0DPXvK2ie5et2ggoXhf6JlZo0q+PdPYjgbPMMoDgmrYvrcDoIq/L6qmPxGe2yPv4c/brHod2qfpaX2BuGzcCRNDiVzVBZc86u193wfNQaDAAfk8tr+cR9YilKYEIBHU+ewSlDTPqcu5W3+MsTvjv4h/GZon7BZ/ZD1W8YO/0BYz/Hfn+u0j/g6uRaAx1LNFDEXsINdPTxKuE4/fUrOn0m7P16TC5fzyx+olNW3Hx76SQZtrj18BeS87kKnvkP8kQRXUDT95bc810r90QKoss2rupPe2j04ffPQMfIID6HvxNXxNz6O5kcKrkixGQ0hl+XeBXZ6QS3S88YdTMIcVMgifcyu9WYO7z0E69AW6+6rL4K9g/l7FtZ/RxCPR40gGWGfS0osiuIjBOkZREMR78Rcg1//kzB5X+b4PJ/mOAy/wMFffdb8hMjZ3tpvW2o1TwpRfSxdm6qugn6dozQf3NPFn20lal1NYELpNS7KjYtLW3KTKLF8n0srtXRFsXVUptdHFpvKDFq9zOn3q32Supvoyi1F9FOfuhZvn+ouuY+amr1ULpgf6ZWdro7aK2mId9646p9Ske3xZ6OJFdt7Km97aLbdhvdp4fZsnQ9U5qeZ/Hi/I7O0Vpd36/3e3niOPnhuf5jkZo9s3fbqV4i5ZRSTdYo/3p83fvOs+Xba/Nc5BN0gr3s+es75jdU/L7k5pGT1ssjf315ph221jq8Vjx/63Mzuy/ls8pNj7WoZVt0I28839P2eUNoQ+wbyp9Y7jRapr4R7joUJlXB6q26kbvdWlF5QGDseTMi7oFudB/BbOK8e9mn6MB7uXwvL9TKT67LbCltt/b16un57nHXMv1yEEwbRI/RyrW1uVyqYt8U5kJ8lbZ+uKDGzAU6xy1i9njju+ZCrdpSWWuzNzo1F335Rcn5sX6fWyE8rcXFtJ/Ke/q9PEwqxVyiawr6mvq8eDHCxtzTO37DJmZ18tfS8tr0y8u5odJDE8dIPqU8nJbyOTyJPf8MPdY835uZWDzvK1YUF+lU0pPQOGxOwok9CeGbiQMwaJS439JdgW4XHP5KgUTN/Uu92MK3hS7WUEja6x4H+8tITY9RI4rKCWSNPYiuXhZsgq89qVdra58LqfU3Uh+xa3Tn6ghn9upVtfdbjnmxJ98Twle6Et29xuzv4SubIuWVLkVXJasjUOqha94zZlb6Jevpm9hMXz71OG1vp/Yu4gf9v/dJVOqic0gJtIxLOWRHV93ZVQ+Mn/uazfPMO5q6nRm8hbd4QiZZ2q2P5UWdzm21V6X3xDlNt8ggR+FJvd9vj+MyksTDeq2b26mzQ42CfN67ZqXLs9ROxWMr7tXtfCseJNHbloc9arHn9gUbCu1xa6CL82Kmbk2BR95BPHPARuuQBn19VfTp6U2rRxE1K53adD7DcnM0n9PK1qjwNjuedV3mklWmKyf2wnbMgl0X6jYqmhcnH29ZMw/lqQE2GuFfTaqnj+a1Ex/MXLe1VvQST6voQmfeXrTs2EssGk2BvL4UyKG9D5X1Sq5kscnESpeblfKSRbTbZ8lTTMyFs64py28Ps771TYXRuvJ8E6bZoUGReWI0pSHq8oTqJtUuQc+y94k9N23Nd7VElWJV8lUpcjX0f65Knib5SnJ8U7ul1CpibluhLPgHRLjU9kT/Yp0y9OjYk9RDuS4W6m26k9XpkdGe/sQMs9kxt1o7ESvjmgTTBZNUlCYuK6NOg+btSb0YKUXyoDSmqRitEW2tkQLEe0tcIZqozKadczPP721XpJ9lIHpS3sShvMxjsaCNVcYVzTTvLdBM6zJP1mex2ySL9zROOkpDYyhVyVGlQJP2SA5U6Yr+16SjJO4Rgbf23gZKH32ggYeeHdnaBY87sucapgGiR8BwYBKMKn5ZBi4vdtwluiYukR+SFDgXem6Uq0s/WPQm46sLWc3Yx+ke5+jCaFE0ye1yW584SlI3811xDenJNt/OvWyRgpZf2iyazCUjUB5tmN+UKLPu5qu566kfau8seYhRfFVRVDnbBuZOs4NVN2tQyLw6z4TV/b3a6bmRNPbVQR5bX7SxX5gvRfL3m4ArzYPGd3bDVYbob1Xf315bFwxx+y7LylDZ2bnrru9NTjHCe3l7uc8m5dehzFOnxfxyXR/vRWFc5mc7uvLreHeOmKKpb+/Jdd5uDHqOen96KaaReZdldBDb/fuExG31Ynbz085Hfb2xs9ujOSO10q6Bv2EEFGxpC63z28BfpjyVm6JWIXGN+tWhEQJvnpuCIEW51wOZkdyqbTS7BOwN7U7pV7SLHA27fwbs45qauP2EiXYaWAIvek6OouHs5bfcGyl13QluEyRBXF6L2eSWh2zGVX0W0Zqnh7x3ovTU3Ct1mIi39Vw69qoWndWLwJznPG2d+rqY9eyztF9CVq5o7rSz1sy03xdnZ0XHfmbMJ0msGwu+XHGMJqzVPX+jyrUBkhrFGnsuip3ihGkvuBvhEerH6ytV45bRQdWDI4tQvGT5Dk8fX9fiFJz03j8g3yQiERQPIhJTUbSv7aZXJhF34RKksOhvrcvPdfBkfPLdQ23pm24nvqjlzr28zRVX6W+LOrzbrqo3F19E1kd8iTL6ExMjQSYGRDbl6pWidKaYJgWrxv1CYLPqTJdU0983LLKnkTDnxXmXXBd6Kx0Wd97BQFJcUVzM1qEz15TwJOuz7EC3Wpy11Mxm15VJWZWRKLKecWvLlzlxkYs23UTh0+zFVj4471sxF9OeFkwmea2hMr5kTvM05phjca6sNIqs3ELOuIhZLX7LyEzG4j2qooZfdu/70XkfXxsz3iZbrUHuWmI0tuzv757O0c673TciZ9zlg9boybpeHhaShLY62qbJulpEh7l0U1v1vLlT0YnR5W7iMnCns+GLzYTx1H5q6s57Ti0YTlxrm/U1ohC0m3AbZM61mRRHlbBGBGn6fiVD6iK/7n3/5njnpReueGRurcaJa7BttzyZhc590d8NSzr1fq/NkL04R9wirfhXuD48+HliIriaRmIrInmx1YHvYP/gT1ElW2pF+HOlCIsD/CEbLS5UCf7gQmSh8P2kja9yQ+4f2oD71aENuH9o43P/p42/ZR8m0vN+3Bmr0K7n0YLfqjtfvGzOL23ZCcIKyajUN2dfrtNQ3XpbE/ly6a/GL//xR5f350TSstfS9StnM8UHpeXW5dXnZZkkCQRcQxD2R0SfNP0z88vflflR5oQXfuZ/EIIKP7OTPyoKZf8n0yfR83WL0u4f97Kqk2dcjUkUnEQR4d8fmERhZ9/9NtIPZJhm/tQsCjfK7yi/v1V+BeaXSUDur5dffpTfUX5/q/zO+O/ll/6r5Xcyyu8ov/+t/f0b4AfhL5Lf6h7c/k8i/IzvVyRT30gpafL3Et5rfK5H0f0iuvRE+KXpZSc/+lnQyZ8qvNNReEfh/Q3Cy303+c2zv16O8+dK7o9+zXaU3FFyf40Yvv815r/Y5rI/WkU2Su4oub+O1SbfAQb+LwcM7P/mYrnVK4yft7j+GqeFzx/I7f+PIzdGmIWnyR9ph783w7NfSfKfunqO/dHqub9Okv/59UNvoi+8+nohO4vh3y+69e+VhPlnKY3rq6px/1HL9D9VgVF//hr9+Q6A89Nf6c/sT9WfH+Wd/x9ZfTr/batP7673+Kw+ve0aVzss3FW6S+3HZe/d3OZ1to0ity+r3WSdFG+p8VGXtEe5NqoJt36WOrzrQlxkk/XuMpH1Kyymm95rPW85IzkL6CQ3E2pOsh8beW+YT+mRBcfAdiJnHnqtqzybNFEXlK5nfuvyM+N+NJoFLOtkp5sFbVSP/m2ivdn68Ni/zYt4o5JpxzqKXnPJrZ5QJ1MQ2rnmXVIng9KOZZt0PCx8MVOaiV22v3gL6cgdrdy4x7HeVPu88o1C8aKrnQWxrnOPhxtO7NP+lr7N/fF52voTXpimwXxaPw3zdCzz3Vzc7qhUSvUm6B65aN+3Xr61neD02ACHssXigB69OigrviuUi6eK+1XmLBS8AvUtVLTMm0541UvlBgsDmU24WEmKLx3vUOehITwk1S+2kGMwNtJjQtXIMmmBOPdh1SEsQGHrKNlSxtQ6aB1rHS6bl9VI6Z2nuqMYde7MWUk7cdZvJve0aSLxTT2NYlafjy26N5Euj9X7ohykRtab/erwPnEzVupr4M7mtHkIzanIFqvMtWOEBuFzzHbUSRU3U1gc2cA4SoO5XRo4d2122rUHGaTJilTJzrIwxvedqJ1L4xVS/kJLuJU4MXL1CpLJHHbuVtov8kA40Se3MHk1Odqhuc5nU9NjRC50rcaqJAQRN2zjzZWN2Dqn/emgzbX3zgkO6yzO9OyYHdv7kZrSm3gzg9WMf/IKit/20Xa9aTtNGp1vFH8my5RPa8VvMlWmbVNLVKVJvEDSX6IYq7LjL7XGFM91iSi1pIztug1OMnAuajS8ngpWqRjJl60OK1bIVlblBm2FbEULsRiJC0kydU1cq7I4bJOFKJGtLK2sdqeg45Kkf70XbxVZsmErPaNNdYEFbwLIzsHZou/ITGt1t5J60VVu4nMuaDVlyNKpVLNGlnKON+wAPRv3tdHjeF64r3rSPQ6RTM05W1/S7zCf9otJ2vVCM2M6YwVLrGGIqrxO9ZOSJAfJfHDde9J4fHgNDH1rZJJ9Ulxdo9aiLRVdyc5r6YXGdXHZhBWlcrcsERGSvSR2shRzr7DgX7DicRmDNvZRrnu2bTw6OprOvOXCfthV2OrXRneLpF8okryN0sgT1rDeDyRW03dWJKnvTsw0t0lzxnr5Bn2Xwrr0DN+8BKa61DS8lq44Lg9COQ0E2XTDSN2dWb7w1LNXHuSjYMKy+udNsPs9xDXSsnu7VckI2l1+KcqsCU9bxQyot4UcmJYeKyGgCo6TlelEC6Zv57zw6rtPNcmBzqe+5i8rMADXcrPTIg86m5zmrBaneiUvT8ktnTA2cCrKKnEqOhuuyJ5CM+XV2esMq4QnjdDwITsrIv0KS1cXd/TfkQ3e7DvvpSJPPDXj5OP6mq/n/tnjjvq1VE+hyy2vVhjP9cW1oZ2O22aFLkyiyJmqYLH47NQH7VO+M4bwOEhCwSDDoDAp8NaMGhZdMw/MLV4BaPl6sc6fi5eaHaUyDizVOASUrGbXaJvewND1d/U+Xalyay4cy5GRGYC1m5NKeISafMw31fwSc44SvY3jjAHmmvIJXeDIVGwI9fsxTaa8tYr/v/bOrEtZZUnDv6jPYnS4REBFBUVBxTtFRXEsURl+fWcMVO3hO33ORffZu9dy37jrKy0hyYx4IyIznrmy28Dfc+Yz29QvzcuwNa4uafdL2h96pvi2u1BX2+iV3xbGdBfcne16NFCFR5hv9rt9MOmrkmdOlwf/Odyu7+uju/zSFp2WrU/Cd1LI4Qq+WoOn9pBhpoZVEi7j3jGW7OWp4d3EIMYPoy2Nzr6udL/APo1gX3dbNrTt6NJYrKTR6344vdo99xq95SRudBu+bqrRQ3bmwjZI7rw5lvxrMe5sbpLT1ValeZWT4eFx8e34sRTr82Jk/ijzs/51ncq31LDLRhK5uTzRT/vytNCc7iEqN072KC+7szTUbqMkqLLzerpJ8uLUfzzb/X62W8XmdHMwDvH8EZ2t+31awv7aZqX4frIcGVX/9KqWspJ55u4wEbOra3bCZVt127PZ7aKp6WZxAGdrGlrmCpN9gvVTPgvlcmmqX4cshbTK+RLq48tBHl1MdZ2Vj8W8+VKl0q+SRXfg7ceT6/saHZVEjJ4fXXKx3pObF8DK9c8Hb7gIm03rbrwc62hfXXcZe8ZraGhmcUhhP64evNLQPphSPnobX+X4EIe9TmugTE7jshv3hnt/aMH7Wl0tPxhPZffQ0/amC8pA1vKWMAozszmzpEu7q5f6o+XeXsIGnQ6zTdi01KhQrqORNQ0SPDi6KNeP/NkZngtHac5F/N1FUaK0+hPxMle3RbTNw+V01tXnD5iZbrt44cQu4vbC+3qboX1Z2unmGQb75+a2fTYv5aRdPrNjtLs2leGymeTzfcd+PcHXBlV/t594sFgr++YlveZ5BzaztZhkg3PnKI2k/qkpzSeGpEwPMMPW+/bsFTcb+070UkOx+jp7bd67eg277AqP493Sw7ayH0t3tYY53NyRT3oqN0v7asRuaz7qNLYzXTGj6/YuNcGDe5o6PixnV3NatV7n3rU4GC/PPI2NLFob456pCRn5CmAAhB3qpMtOw3d6xXB1kJvCjq67RdxxSn2lCHvTOa5Gi+ninQ53r7zVHLfHu3jVfZr6wNpf1lEvuC6qSOu0X5OVKrvmwzzMze7qDSvN3z/05RlnTgK7N6NcEzbd1HpLsQ5uxVqoD72l5tuGtNNCW1HgzvJuOurhMd9u95C3G7i/M03F8AbDZWap8fQIOnXVV1eXW6sfm4fVYjXpeXH5lUAccSngXE9X0SP4K+O3u4bzUH2nJ2zH+LA1u1LYvbW0yLxcbFPdSEa2icaztW2Wx5mQn7apfZ3F1R2S29B+hpG5fJwnzeFQkh9XuVls7DDQro1ukdwGigL6Wayu1Zd9cIyO5g+8/NQ3wFfvtaX5fMHRhFnLkrTuaalFjrV2rPc+HfScbuE8Xs/3fbO/eX1hhnNHeNrRtRs/X299uXMT366se5osrU6y1/vJrlgN+i84NAUP15Di0D848W0RtP21tbtI8wacQBF+NVkNjg+nbW2HwgJJSTh4T9rt7PJeRkribxLxD4+scc6WipumrcnzaleRsFT+4HX4ss1GLwwfm1F2yLvm47Y+GO/53iqs1ls+OmFpp+tO5+V1Th39DT6rCr/OX2Kh74fHLyEUcnvbaU6mEojgfHv70i5x2zgunY3cHlltTb50D5nw9utOd65MLOdcOnp7djadjjw9LXcjP3qaX+ZR+do9wPHEpp8H7nO6ux7ih7YMVLMTD+20C6M8mvun9Q0VkTS6yPJ+aRahPzobh5Wj9ZxVpauXW1sZ+/4ujmGpw2GynX3sxMIEJ5EDuiO0LXfQzRwhtPzUnK4SIz+tNcc2krQJZqY5fLTiTtOcLkw/mu1f96Ir+eITt66lawOlyPr646AoT/PwODhn/zIJMNWwfWzViX9bdbJJktxW2mwQZa9Saeqr826hNPYPd1m5my6e3pINUCxOZm3cle5+NS/rubUbRWvrOJlcFyKCjpvdx6Vc2VnWWI6Dzl6Yybg/fmpvZZU0suGzv7Cryabdqbad2VLLd8/QGHWFqjIALTDd2N1cXNKgvXlv2+++ro/ywlf7qT9PhuXqNmq1Bqb6Dladzm5alJcka6mD+w7P5fXTcSnWkXp7nxqLPpywAH/RvTz0W6U1JreBECTZPu4t29Zor3iSMlC80WNkb5oPs/e6aasiVGSrvSuqx/7c2Lh3eVRIndtr/vxaLAoRCBVlarXaG/cBf7q3rxaDdudrLt92ZUM6jneqMGbetn+YXJohRKxL4dZu137j0lY3izl4ZnkSGsp+UR3vpRO/NGl/HW3z275t9zT9CvJ0N961r4WmBJfec+HNvP1OhznrvlojZdMaDRu6291MGxNl9X7e/banZ1XncAnSbNCVIwit3Opwuc2f7dfwupPG2t1bKlp/1Xbam9f42WmduwNhDPOOiASaklZ0tu25JdbyevkC8TO2ErGaNyuvU03ul7Z36fctbTl7x7ZdXDbyozFtlrFefm1Nuf+yhCRdifU3PplzudV3df3YNRt58oqW6TV83/bN13YYRwc/GyrVvfvYvrqRrI7AMPffi9szjxbOKoATZ3NrsBo78/mwsXy0tsldl2WxqtueMqokqzh5jWWxbD6hHNV9uHGr0RH+JR/1drYkdYTy18Lh1y31tHDzbmylweWSuBvn1TObx+X88dgcxgYow8U12potpyui6OOpsxchxH7Wf8xElJyPyr6X9dxSLLS+0egZl+Hauti7rllmw9NaPr+r/q13bOjdV8+6XnuP66slLlyocOnR7NzewhLlU/ctjeaN9bW6DrftR3KBtXvIzHu7YUOayH8sXGU/PoP9SbR0/d774+7X3hLurQH/aA+7+vNdRbKwdkd/crHgvGbzcRi8VW1SaXtfy+U8C31zcHgZ6zHY72kFAR4E683nbtWRy0F86z8Pl0YXp+e20czdYCTCN+nQzMb39KXJ7eFrGmbtXjDbTkeP7vtrf1jvz7qeyqMvaReVz0ganfzWq/Pem8qlCSFF8xhv4SCL9LT11ijux2UG4cDeLeDM01JENslqbW/um8PZ6qQdN1WuxdzwM6fpvxbG9boZGavdedcxMuF8Zk7Yg08/FxoYs+jalJSJvHBh3KNR+bTFP7cKXxgMXXPkwveFkH8IY/m+dIp41Jw2L8n6a5+s4OPGabV0x5L2hvhirt9DEY/tv4aNeXYBBRbHq41R7gPpvdC6WjTawl1MjuO3B5nGTs9M7F4uHpzvGsn8YGROL8YH1ZpXo+PB2ZsintFlfTremaNkvTFU/1V1F7fyreer0rDN6raUXBEnbk3dEDZeGoVGnoSG+Ev4qvnDZWwJudfZ3R6rabkupsbL7szL1rud5o9tqoZDsQa/xm2pNVnAqcfr+7EPAmnWh+vM2XbJqnkfO61GX5IaXv6ed41QlY96skyFqnb9zcF/6PDeRXM6azqo+9/W5nxZwLQ4Fp1X34zd7uyrbXp2Gslwg6+zKl80uPORMe4H7/f72Tzre6vR6y9fFy1YTeNXM4FUUKuplu9Z7H9V8LngtvRMGdRNoByaz/Nirnma+9hedo3uTMTpgXE0V/uOb4iVl6d+R/ib43IAibrFoByudz2pPJcmxusDs1yXVzG+UmcjJEx6ttT8vojTiylHImIxenAWWsxEw9aiIE/mC6UjgqWGep90jFcwOqbP2XvxnnXixLnek1En6fWMa0OdeeNwcjXOnWE4a4n5FeyMYHwSMle2QUmZuQWM3O6ie+1a92qWvA/++7Zt9UE754fYH506x+CWCd+fOsONPR8Hi15s2o97awpnzBrdtjTJb0Ku+5JQgaY/bR4MpTU83ezFpmhI+t1cjA7xs6Xrq3C6uLZ2wQwUldk1o24Z5OZfnSD6J//ZbauVFjZo2NLXb2vMt/3nzufIUs1XrhP08p8T9LryiwS9/I+6883/fo7+/+fe2k+19i+vNsnf/Zb+NvXav2qf7f+Peq3yTxfBZwX9VSvo71ax/dWWyb9bxTYDcdMpxPxSzEnfU1ZlR9ssildc3TUA2a8r6bjuT6XYur1H6lbdlrrqlvo7vsRvaD85nrXpfaWT7HpytrlCK7uDtBUh0ahsi0/Er23lvjbq4DqCRuuW8Y7V1dU5dpT1Yq76l7aGqHnLSNwKwemKY7kyQjCPTrLqdaVoJr+ihXyezAZutDyf46NRiM/fse1fr6uvloO9O0PAfe8HLu9Ju0Vxnhx1an25fF7WiyIbH93cSw3FtaLmfsa/u9a/8y7eIpJXafjzO5V/d52XGzNJ4V5Xi245WmzLaDG9rxa6NAyebWzY1z9nIsJr1e+ZqoPDqnc+b65+svr+/+l8czlL0PB6u/TO8dkTEa13DhR9Hivh0emfSjEeBTXET8TPCfxcQWtYt3Lg98Uffg8/I7jcFc9gvfDF+Bf3zWV+iI/yNb50T2KcX1vs0AGjslGNJFan79iU041SvONUOjriyYyD1SmaOclGWV1iZS7hU+E2i//sU4YYy+3hD5/6aWGYrRf6OVoO+t9XYWmtkfLzjH7bvtHJ4R6gbTOC7S1oEQwth0NoRAmtjAsPG07aCB2F9qBuEBGwObAJ8iC5AIEoqBWxTyCNNKJWyQCswbaMITfBx4blclRBa1mf4aEIfiwReAYN2S1bGSMQLZcRZFbmNaBMxmbb0LYWYZy5uDaDIV4IruVm8dB80yVAUQBtfgEmE3LTdDtnIJ3CzcYJsIjNPaEl7ElmCMAT4SXUelgah26JkCuTxwlBroZM0HJXQsgFANQQmA1tdgFOY6gLAJYFDkJeqTE8w3AQcmXDGDMUDwA2BkEry7qhvU8wL4IpldT+lqE5x7o9MgPwjjnBJwm0AU3bsfn3GFvJnrBRqbtAqANDXOYOAJIAZkRAUxyHGppAQCdTqhjoCKAWHYANCDaAFt8Ex9bHCN1GyDRAK397rwAdULHtMrR3XRD8dkzATAImElRFR4AwNb0HaI1KEFab2+tCG15HJ/gjQGxchisDiIFbJWP7X2jZS7AHALt5cB8AnKgMbBWNrWcrm4DfZc7QEYRAVtzsXeW2yzqBcggw7CIEqAZoQ/tjaAtNDWPpmSB8WaG/FTN4wqUG8LMagh6W1O4UwFYEeGKItEwwHAIhMBiaAZwMtQVoNoI8YobFIXCM2nMj8AXa80Lz2emBW3BXBATg5vfYQJYhbopbA0SrHyi18wPxDhDiBO1cFQJP1PB6aDQbMbTEZjBV9IS17VYDbjueZN+vdftiar+Mbcd/Wjo7GjXO95/cAlxj0AOBlAma9eR24So3wc0J6s2t0al1M8E3K5eBmdRiHW1Q3WKdIEJ8bQjEKhiaRYDPgICw+DwtH+cGtUk+cUtabCMue7ZbEMxBzCNoRx8A8APACDD/sPVyxQBBTcxzhHkhwA0hCPUYRgQODKgtfQRAJpjPv4EUEDwAgJyDFMEkxxq06tOarJsLIyTg0GEwVPb9WoPPAoATuLlf/bkFOMCtsIU0gD8DBH7QGFsAq5mvNwG09QY7GiuReM8Y7ZlL7eIBKGAiABxsPa9BeBa4rhFkSfAqBOfy3Adwty/GYWUhlNRywP7K2BIewI3Ygt8uuTmyjNA1gM8E3YLb/GJjZoTRBASjIShIRM2RoXEzgHTSE0HdoPU6QQ9K8i8AnnGo7TTBawoC7jA8FK775GoIGyXgeg2ihnvAhs3Y9h1a2VcugnexnTDOJZgH0FDaJeATzClsWwwQKIDrut/AHQR5HXFdSWDjoLm0y6A5akPu4PhiO2TwEQRcROArAikQhETQEsQkKAiGUxnOVjEUCOF/aLtnklS3ZSfIEYNACUOg4BpIEfgkEwxZ/N6k8UMoB8FeVAZIIXT8ByQPUBONICC17UD76RLEqswZCiURFOqYMxRKYihUzlAo6QeiizB3fB4KQ3YltBPWQdiXSGL4bw2wqnESMsOiAcVQxUFYIsYAbQXDrwKGX2H79Ijbl+clA58kaq+eyNR2HiFvbIsIIgmQM/gOaroNgJewtt0AuYG1jOBRlzAADJ2M2UeJa0JYTW1rbVy7bOdVBG0hYCRiiEnC+AKAiToMUDIILMNAUoSqBDGB6wn2rLEPqgGQMgGgAXTmEkzk+7n5Wg1aRGAWvg+hlBVdA4KTMoarPQmuhugEmdEVCEXC1vEzhN4BHqJE/01QaQYGJQRTAbAm+UaGHIYAd4OG5zUgHuKNalQjJI4IsoJnr8YI4XMIVl1DliyE0KU1tsHFOWMwiBHBjwwzRbgc+HGE6bmkKwD0TTA8i2F4CKcKQcdBA/UKYx/CcMB7CKyHWuAHekcoEJdamiPoKSe/xc3YQSe7tAZQ55L9OSEMhuCgCYJXAKwJNhgBcACYBtBmatQYCvkbFIP+3GZYe4h+C9aih5oOW6bX7fxLBqrpDHnVcU4SmA5sWSk0bEUgGbgegPA6OoCXANrnEWSyFL9DIDgA4sYW2NoQIWUEhCKQLa0PBH5prKkJ1BgAkCdkWDJBkgkaR43sEQSLGA+EACJgG20iNNInQCbNEwQxgr6JcC4SxBaxAhlqsmMOOIIaJv6jlRCv0cW27wxV1Ajs5D/x/ntg8wyGMNoZozoK1tMEpELtGRKGArETWkHz2SGg37HWy3iNEkGL0M8RyInwBhpBAAAKhxpSIYCqSxAssGsEhJIBAv5L4CghZ/C+IlgLBB1FcAD9PwFyCeRK4FIGHNevNbhU42clxvGQ8nrUGaRaof1FMJoEWoHAghgTOQRAsiIG2jJ0iIDZCgNP9Sg4MaajfmV0COqcUGHbwtDchMFLqKsYHBnReBHcT2KshUo6FmKxEOMlj2yYUgPYMBbF94QIaBgRaFVlNE0NaJMJ8BQphBQgvRfDPcxYDx3RLjBYN2ewLsK0AI5av9YA6JKQGb4UoT8Vz4PiGRWxIAR6VghzkiAAjOJIiBfjjPExKulLhAZrBHIlQGUNTCfYMII09bjk+BQBVHat31GHEF6DwHaktVADK2T/EVxHcDnQezD+FgPYyFYrOAesCNc7/BsjZzj+iBjeiWsffASDhp0XgShAl7o4/9BPLl2KayleJDwPrCsFdENOCBv0cbhm4Xsg1swYHk22Au4rRVitgj4+QAB1RgBq+M6YP2sjSBL9MMDOgpji1RJ1huoCwtJErSzi0RPG8BiPpQZBPKxYYj2ujxduwUBViaCtLkNJEesE+KkX2ucS4+uKtUmF+q7WLrOc4lnyUzJp+bgi0AiAtgAN5TwZCF2xL1W8fwYax3gG7BSDxvE5wlrHsVHJZiFMrSJ0S6wBTgVhHnjdoNdPOUNoZbLDvwApXiT0QUJDi7iGsStmLsZzS3Orip81NgsQHAycRZgi2b4E4WR8rQqDAUFbl2TLaT5zvkUj+wf2IK7j7SfDyjKECiNwliDsCCckCKWK6CITXyv0bwrA1sRd1hq+JB1H0EuMBQlKhzYuIh1BwDSKm1OyaaDn2J7J3zDjWR13JQgOp3nhsE9jAC77NtLsBCEfL2yGzrm1FqFYDPT7jGJ8gC6K2OtJ+Qrwa34NxkWbCjBTyt/AmOD6AX1bEOCVwN1jERfh+BAYXWP9rzDIHXUuQttBFyCMj3V/4BAw8uQWHgKjYT0Iv0yosTq/JmKcE9mLP8L/5ndPxGo5xWmhHAWuhnMRtFWNj6rjfkC8iZ9B53lCc6CtnhGY0IN4rwLtgZAZsCc/KKr5L9BJsz+hk/40jyOIIy0EeAIOTieNlxCwFONjBHVn7BcJzmolHDsbqD05fsexHf+zdYm226A4Pp0zQoq/CzW7y3lIA3x0xRBHxSO/oBHEG4CFDsF9Ef/lcg6JwOSovQFBd0SAM6xzin0Q4OwgOJLi4ohyJWBXUGOGmCNApBZcC9qemLUT+FzG1pE2rGG6Ks+bivFvNeizQn9iuXRfFGcqlP9CzJ7GdgBi9yfbcNDDGF9RTBsyGDwk3Qe+QCUgqPcNEw4JW4UwXR/hyOBvPYyrSN+MZwwYRmCiTX8H9A/dM649uu6Y8EEYI1HsRrEYAiIRdEr2VyIdcKwBrw4i+0jjE3wTNDNBICHXxnMNfbKEPplh8mD/npyPywm5FrPGN9Ausb3PKd/lPDnWyinX2zkQFNfOvl/ruDb1KX/UJ7gr6vuS864Ul7NOgdjWYb/s1vGfwiCpAu32UarxhjnZIbIjpBulH2wXAnRDtJcImsQ8cYxxikd5ckJ+BaizdNKIEGP6VBsA/9uzdc5V16/f2EfMMzBQXvhciZ8lxPJ6DZpG/0PwTNCH9frjvCHkQBCyBfeVgU0h2zboUGxwyr5fa9tLuTMEpPIz0wlIH1KuiGytTr4Vcluko3+pxzEvEmaox0mLc9xJMF/WwwjGpbUVSpzL5PWE8Q35EYDLm4y6hBgBY2ObQKFHyg8D4o7yrJjnqch2hAiBJQBuInEuE1CFBCzH+BFzHARxxr8Pcw51g85IRS1G4KpNsDKMM2KEj0YVrtOC7WBVPy/2MQhmhVgNfTXmAOuYBtY55o45hrPRL9I8dwiEG+D6qziHoVOeg2sZJuYNIdegM+BM4/hD4VhfofgaYjP0sRLl7KHGIsbAAj8A3wW1HoCYMuAUbWDCgHWMHTMGq9KaIxyZjnhIjJtxLmZuxc8Ic/85xEXgRyXMi4HeRP3gEsaUYMYlYvWEjXTJxhasvcj+HmucqMOxJ8GmcT2Sf6hobZHOohyqpJCNI9g9/Hu9FlivVqzHKE+Btv9EKMyS8thjAq9TziPgnAfVmEqst+CawFrUN66UcnIhaXKaA5R7QXQq6x/OhYyptsFxnNCMpDMZhm7TOGMMEdc6B75Xp3gZczSstU+E/cTaIdSgfLG+Q7LpWA9D/SOTHk7Ec6cx4XpSTveBEOQX+mh6pXyX5df40IJiQbBZIWmDI9oo7fuzULcsKc+M6wm1dMR1GNSiCudAKvqcjfE8xCGcZyGAu3WifDBoVtXNKRfFqEQAM6dwvx5BotOY56dNudMjIVJdfA4QL0Md0y8YCC+uC9aVTWNOa10jfTgtqB6EvoDrQpwDtTgHGnznTwvI73jVd83g6aEPwHywhHlJXNtklzFv+u2zwe/FNM+ofsW53rzgnBPnevOiRqFSrjdn3wM1I8w/Kpyv4locXC/mkGluoa8KJcp7+xQ7kWbBdYLPFH2MhL7TI7B8gWOPY4Q1tXyEgEf0m6gbMHdZ5pwblzg3jjWBGlZeoi7FvwF5K4iPPVg/2YhiYbLrqFlsGiOcxzDnHZXsj0MaM4CaCv6cM8pVQ7+BGs2mugV+D9QjQrbDmFPS+HkTKlboDA/tI+ZZS871SzRPMS8PedOCdCI8L6otUi4f57ZGdTPnux7h4fvEOsP4CHM7knslnCKtn5xqLhbVXFx8bpgnkFmHFIwULdjH6jQeCP1WsI6F+WsX43PSOJgfgOcE+GcF6skuwuIhbmUcM81ZxNd67C/+FLf+Hp+a4nxE/QI2FPYY+FyHtal+jzl0ypP81Ftsqv9hDCURZBNqr1T7z+pcN+dRVMr3wrOi/BPnl6Tv3DT4jZI/j3lh9KMy5zt47wP4JBh70gxj9OkO5WggNgJ8MYJDfdJnJWE1R7DPAPUK1lRKvn6K50LSiB7l/STvR3P9doygFq+B3Rd+F2vpEItijC3udWxybQ39o031V8pnFDUwleOCkjQXxvklaUC3jCm3V1I+KkEdAzkqALDymiooT8z5J/KZal33Zu2uwnUgbhliMLxvzEXxng4f14FL9r6gGq6D+TXPrPdLOFTXSw+c+0UIqkQQVKeMyjo2wTlKcwqfH+5LEeMkZgvFdKg9MIcEdg18DebDwNdCLf8PoNnz/Un5PqgPGbzPwyXdA3XyMqc8GuHOCU/K2OvvvLHFubCUYhCOHSWqVQF+2UHt5yJQPue9LAdxvU69xivOO2Oc7YKfodoX5vX+gGhdO8dOuul1K9gPFSkF+KbjqNJaca8rrc3OScTf3p9yAAvwRbBvx6XaH+ZSEsq1oP/EfLlE+iTnZ2AzulnME4yfUd8pqBOpLp6hjau6KeXSSWfi86CcnEbrDfWxRvpE2EasC0AeyubaGeS05x3wU2Os4fj6b2vrY8RTOzgnySdiDJSRJgg1rk8hxpxivxPlv8sfDLkbrFKsT+Lfj3SK61BDlqjvcJ2HGtlgN2O89ZPz5aylua4zo2e4ARsdgBbAsSiotg8xfUIajJDzT6rb4P4Lyt1QPovqPpgrBntnoB3jnNoT6/VUC2cbZnOtkfVx6lDsdqQ6H73HoLjV/IYHw3OWfoHMLX8JA4ZdW5Vr2xTx1ZHfT8W2wOwQKGdhvSJaERJXh6iyDSsZd5odDsJSQgb2W6XUGT+ooLsX3M0iYUYHPQ9mJgpcKaAULdppQZYq1+tMJmZkA8qK0O4Do46QaVZQJMaKnTLuXI0h2HydSTlClgJmGWaKqUIlrATtMvhR1JQ1ot0pnPmlyBg9QcgVFudFswq9v8RZOdzl4legCmyFZzFUz2SEG2MFN1do1x1cw5R34vjZ9yuN+5OtIO7EwXtg2LiLGY0Tg839emcNRM1KnQ11SSGzZwOrEmZcZYFdGIVfV29p91FdvVJcsralG3RSjORpRw+qW8g08u4fyWVLCCuXdrqgypA4WpXIQ7jkkfGZxBRFW+hJ6JmAGjUhYyBxBR0rTBVW9UCJmJRh9XA3Elh3h3ZAcSXZw0oaKUqsTILnDjD6UFzaxSChWqUKDmYJQfnS7onvV1RmterlzA3slIPMi4afwQplQqqO4OugiCWuYuscDeWcIS9YNVL1BHfDwQ4CeB9kH2CnAygtqFhshSdAa6xRBSNEBfXH3T9Yca066yD4TdZ1Yf9kcKukpO/iKp4F1h5+Bg80t8Ta0bHShRl+2CnkYoRZ7zQZA8QdoqhgLr7DhuxJSVVOiCg4G4aRG1rhJ1tIrJzhNeP9w06MOutj6/T8I/YiCe6GAoU1rnd9UUaNM3jobRTaZWbgPIXntLHgel2KxMOk9KwVZ299rGhAJEAgcYmqkDiWYCnBCp8tF64LMymwTuD+Tzp9BzyTECtJ3/fa/UUWu/xTFvu36uyAkSavSXytM+lHjJwwS0c7WELavQVjQZVSiBQl3oVG0TNFXxJl9nzKbJU0F0GleuX3jgGJ52BJkVdIOyxSUNKIvdcoCnQJyI4VT5ezB5wNrKPaY/6TSUzJpuGzCmg3qfhelaKPuK6QV1CxAtuCO0dRrRq4+wTHxMJqHWTpcQ382yj63+uyC+5jlag2CisH80ka1eVc2GODHgRXPOwjg5iTcsVP9K+oa3EkKs6VlqQvUb9ruG/Bwr0HpXvF3CTl1GhvLuUH0wNohoJq17jHja7lW/MlEEtkVIPivUOUu5brfRMYm5QYW5esn9C/13tOQBO6mAMQKzJdVRTDofatfbjwIkIfQp0I9tjhTCctiHFDCrUwXNWgy8A7Qjys/Nuj/kd1Kb4Prhb8Hdx1QrtlcARQLckUpbio6ihqgkriiTL4UCkMHN5J5+DuC8h+jUnla2TzMPKWxF0V7m/vBis+tsxVGtwJgv4R/gbeISri/2k+wXmCS4wdMP5vDsw0WtI/Gq3fHZlRleY/tMafj1G2/tFQf3FwRvlH+//o6Iz2L3s2/2VNme+P42UNt/hpyvxXNML/O1ActH/ZlPkvm53Zc33dbj6z8z80O//Y7F77qzEN2q+6LH/OHH7OHH7OHH7OHH7OHH7OHH7OHH7OHH7OHH7OHH7OHH7OHH7OHH7OHH7OHH7OHH7OHH7OHH7OHH7OHH7OHH7OHH7OHH7OHH7OHH7OHH7OHH7OHH7OHH7OHH7OHH7OHH7OHH7OHH7OHH7OHH7OHH7OHH7OHH7OHH7OHH7OHH7OHH7OHH7OHH7OHH7OHH7OHH7OHH7OHP6bp7m0v/GZQ/Vfner6JWJOkprKpvl7xNzfEfJ4v2XP5LHL/ut23z3Wz9vjw6X7Dx1g/P0RMbWGkv5mssvqf/SMmPaZ6J+J/r9/nlz7z010QLrf4Kl//673WN8P7m27g3f8Nw== \ No newline at end of file diff --git a/docs/static/favicon.ico b/docs/static/favicon.ico deleted file mode 100644 index b30f559497..0000000000 Binary files a/docs/static/favicon.ico and /dev/null differ diff --git a/docs/static/favicon.png b/docs/static/favicon.png deleted file mode 100644 index 66ce2072e9..0000000000 Binary files a/docs/static/favicon.png and /dev/null differ diff --git a/docs/static/images/namespace-multi.png b/docs/static/images/namespace-multi.png deleted file mode 100644 index 8bb0c3bb1a..0000000000 Binary files a/docs/static/images/namespace-multi.png and /dev/null differ diff --git a/docs/static/images/namespace-own.png b/docs/static/images/namespace-own.png deleted file mode 100644 index d1f9bde948..0000000000 Binary files a/docs/static/images/namespace-own.png and /dev/null differ diff --git a/docs/static/images/namespace-single.png b/docs/static/images/namespace-single.png deleted file mode 100644 index a32d628388..0000000000 Binary files a/docs/static/images/namespace-single.png and /dev/null differ diff --git a/docs/static/images/pgadmin4-login.png b/docs/static/images/pgadmin4-login.png deleted file mode 100644 index 298683c7b5..0000000000 Binary files a/docs/static/images/pgadmin4-login.png and /dev/null differ diff --git a/docs/static/images/pgadmin4-query.png b/docs/static/images/pgadmin4-query.png deleted file mode 100644 index 5c0d306016..0000000000 Binary files a/docs/static/images/pgadmin4-query.png and /dev/null differ diff --git a/docs/static/images/postgresql-cluster-architecture.png b/docs/static/images/postgresql-cluster-architecture.png deleted file mode 100644 index 8376397cb1..0000000000 Binary files a/docs/static/images/postgresql-cluster-architecture.png and /dev/null differ diff --git a/docs/static/images/postgresql-cluster-dr-base.png b/docs/static/images/postgresql-cluster-dr-base.png deleted file mode 100644 index 515e597500..0000000000 Binary files a/docs/static/images/postgresql-cluster-dr-base.png and /dev/null differ diff --git a/docs/static/images/postgresql-cluster-dr-schedule.png b/docs/static/images/postgresql-cluster-dr-schedule.png deleted file mode 100644 index 098c5e5658..0000000000 Binary files a/docs/static/images/postgresql-cluster-dr-schedule.png and /dev/null differ diff --git a/docs/static/images/postgresql-cluster-restore-step-1.png b/docs/static/images/postgresql-cluster-restore-step-1.png deleted file mode 100644 index d8d2439fbd..0000000000 Binary files a/docs/static/images/postgresql-cluster-restore-step-1.png and /dev/null differ diff --git a/docs/static/images/postgresql-cluster-restore-step-2.png b/docs/static/images/postgresql-cluster-restore-step-2.png deleted file mode 100644 index cf6c653d54..0000000000 Binary files a/docs/static/images/postgresql-cluster-restore-step-2.png and /dev/null differ diff --git a/docs/static/images/postgresql-ha-overview.png b/docs/static/images/postgresql-ha-overview.png deleted file mode 100644 index bb74de6739..0000000000 Binary files a/docs/static/images/postgresql-ha-overview.png and /dev/null differ diff --git a/docs/static/images/postgresql-monitoring-alerts.png b/docs/static/images/postgresql-monitoring-alerts.png deleted file mode 100644 index 13f49f3fe1..0000000000 Binary files a/docs/static/images/postgresql-monitoring-alerts.png and /dev/null differ diff --git a/docs/static/images/postgresql-monitoring-backups.png b/docs/static/images/postgresql-monitoring-backups.png deleted file mode 100644 index de5530f552..0000000000 Binary files a/docs/static/images/postgresql-monitoring-backups.png and /dev/null differ diff --git a/docs/static/images/postgresql-monitoring-cluster.png b/docs/static/images/postgresql-monitoring-cluster.png deleted file mode 100644 index ea83ce4270..0000000000 Binary files a/docs/static/images/postgresql-monitoring-cluster.png and /dev/null differ diff --git a/docs/static/images/postgresql-monitoring-overview.png b/docs/static/images/postgresql-monitoring-overview.png deleted file mode 100644 index 8d623aa0f8..0000000000 Binary files a/docs/static/images/postgresql-monitoring-overview.png and /dev/null differ diff --git a/docs/static/images/postgresql-monitoring-pod.png b/docs/static/images/postgresql-monitoring-pod.png deleted file mode 100644 index 30e8183f54..0000000000 Binary files a/docs/static/images/postgresql-monitoring-pod.png and /dev/null differ diff --git a/docs/static/images/postgresql-monitoring-query-topn.png b/docs/static/images/postgresql-monitoring-query-topn.png deleted file mode 100644 index a09c25c6c9..0000000000 Binary files a/docs/static/images/postgresql-monitoring-query-topn.png and /dev/null differ diff --git a/docs/static/images/postgresql-monitoring-query-total.png b/docs/static/images/postgresql-monitoring-query-total.png deleted file mode 100644 index 8c9485ebe5..0000000000 Binary files a/docs/static/images/postgresql-monitoring-query-total.png and /dev/null differ diff --git a/docs/static/images/postgresql-monitoring-service.png b/docs/static/images/postgresql-monitoring-service.png deleted file mode 100644 index a24baf56a2..0000000000 Binary files a/docs/static/images/postgresql-monitoring-service.png and /dev/null differ diff --git a/docs/static/images/postgresql-monitoring.png b/docs/static/images/postgresql-monitoring.png deleted file mode 100644 index 96ed4017fc..0000000000 Binary files a/docs/static/images/postgresql-monitoring.png and /dev/null differ diff --git a/docs/static/images/repo-based-standby.png b/docs/static/images/repo-based-standby.png deleted file mode 100644 index 024cdd0d67..0000000000 Binary files a/docs/static/images/repo-based-standby.png and /dev/null differ diff --git a/docs/static/images/streaming-standby-external-repo.png b/docs/static/images/streaming-standby-external-repo.png deleted file mode 100644 index 086cceed42..0000000000 Binary files a/docs/static/images/streaming-standby-external-repo.png and /dev/null differ diff --git a/docs/static/images/streaming-standby.png b/docs/static/images/streaming-standby.png deleted file mode 100644 index 59be88e522..0000000000 Binary files a/docs/static/images/streaming-standby.png and /dev/null differ diff --git a/docs/static/operator-backrest-integration.png b/docs/static/operator-backrest-integration.png deleted file mode 100644 index 7d1f64b500..0000000000 Binary files a/docs/static/operator-backrest-integration.png and /dev/null differ diff --git a/docs/static/operator-backrest-integration.xml b/docs/static/operator-backrest-integration.xml deleted file mode 100644 index 7b27e5c83e..0000000000 --- a/docs/static/operator-backrest-integration.xml +++ /dev/null @@ -1 +0,0 @@ -7Vxbd6I6FP41PtrFXXysvc6szqqnnXU6c15mIURkioRCaGt//UkgQUKC2jG2Tqt9qNkJIdmXb18I9syT+fNF5qWzbzAAcc/QgueeedozDF1zNfyPUBYVZWBRQphFAR20JNxGL4BdSalFFICcG4ggjFGU8kQfJgnwEUfzsgw+8cOmMObvmnohEAi3vheL1LsoQDNK1Z3hsuMSROGM3to1BlXHxPPvwwwWCb1fzzCn5afqnntsLrrRfOYF8KlBMs965kkGIaq+zZ9PQEx4y9hWXXfe0VuvOwMJ2uQCo7rg0YsLuvXjzJ9FCDO1yDDlGHeOYY7CDIvD0K5TkHkIZvhrv2c43jztmaMQ32lE7hqS3eOBCDe+JAiEeGwEE7pTtGDcnUZxfAJjPA1pmtbpsX5u4zlylMF7wHoSmABCZBzSya0yL4jw1lpjpjBBVIl0C7e9OAoT3IjBlKwtTz0/SsKrsnVqa/SKxhKojMzRDM1jei+RlZS7jyBD4LlBoqy9AHAOULbAQ1gv0xdqBswKnpY65Zp01llDndg4j6pxWM+8FCX+QqUpl6ytuQLjQYC1nDZhhmYwhIkXny2po1J1AZlC47kRexMQj2rtbgsJeRk6JoaHaZMY+vffZ1HCOs6jmE0DkkA2DJMbg34DhBZUnl6BICYtV3sFYUrH8fpCNMlwrXOz7mGma7RURKIAw/LTUB0fSx3gzlHg5bOaIXiddBKLba7Rxpdkix9k6JHNmj/Zxp8j1OjCrZ90zgQ2BLBa83JYZD6V5a8ne2j51pd/ovHpt+LrjzNwPe7bOhU6XlkIENMEejkR/0q1zUCMLfaRB0GZEtJLxzBK0FLdTZtXd9O1+SmqVdGrWqpcL2Mj7f5+/t/Vv/Au9a+t88FLPrl4uUn7NkPVBpxxoET+QQxrbbvA5ox4dccjoxdvUg4gQkrJmstd2KOefSpTFAILEXYhx7RjHgVBaVMdptOEHVHitfkKaFN7Pbo8znPIUKivHelYwznRUNz/U4GzIXA6zcG2srSGtkRssPSOGCkqp1qkgtQE8w/KDzFu3sHUbOZ8CY8ROgd+pOVNchgXCGCHyICDUOuWwUSL/WNU+jmJMly1BtRKsVZ9JhAhOBe81yrsamIcWd0TceS3+HJCecKR2nK2EZ293IVap1eLk6qabrtHtuD3Bpro9lxze7dnDR1BmQTN4TCdt6aW22tp0sAmfytClZTMMX8OSVh8FPqpcRR7KSIea4SZijq5vLSBjfmsSbhqS7hqmtvZOkMRoyXWWqpKwaB2VqvktweW/9p4goaiik2NWRFzuPpQohT6UKIVjpIQU0wf5gs/LnIMaX3md/sZSKE6E8QhTjPG28QEJ1H4UACigU0jbGN3VjFnc+imIu30+6204xXALQsHNleORTc+WJYEH4ZK8IFHh/5SE9XCg24KqvQ2WU2TRnMV7cMnNDRpYQlMmbKUCYzapIW67LdIWrCwvEVjAA3suRpJS7NdvZ3Ft7Cz2qGQ1ogmoq2ZaNf5kS7GRlx+1BFo72N6xHBg+/QIZ0euafGxzXaIqBjxRKGRApwKGWEoyhsBzmq/VkWx1Kl9LzGrb70CiBTEO26rnmaaA8HFGZJgR1cQ7KyGrpXRqiwmCWCBRXNTFq9F3N82nlXA63btsiO2NF1JbOnujN0DCbudGFG1K42fMct5KGBVmTa18tMkOWXROg1hfwl/1Tx4YdVUDq1rv2Pgiv2xfx9kGIKyvzh2XWM5nzugXV3G/XiwUtdnWAxkOe8OKsOdgUq/rjgfwOVNwaWreH4Al75tmBJ9378KmwKwcVr1Md3WRbCRBYwqqmMdzLcE5o8vcPukqpq9KyL4MSyCX/lD/OHwwOjKFg94gJljC1p3OCjwJnU1/jBAtW16hKBu7KjmxiziXQ4K2Hb7ucVmhbA/KN4ZA5e7lWG14siO4p0661pTc/PIeatH0E+LfCbY4R5W3lZjiJLTCoZr8kKzlGCstQNEzYPLCTq9Qde/Jrcv068Pg9u7cBl+qkHUxrEiS3N4XBhaQ0YYgyzCKyciL2fYFBylJv+nqDIQUWVV+r8FsmwZhomFpOrIyeGsyd901qR+tM2AwhVPWO7qpEmHYm1Qudm/4ydrjOSTn0npAPmhSpB/D7Q2OzzxllJxLN4ocVjZFou6B52rdiaB964ToAd830t8H7gtfHfeG9/Zcf2PgO+1lXxyfL98iu15MNYj1539Di9/38weHncWxEsS+9dh/xqo79iM5IjN6sKAuhrApgbXsXKlbrZLDI5lqMmjlEnn7Q9AyW1w4LRqNbbbFOprx+Mv1QoUG6usykIcvp8BD9XP3yYZe/Tms3p7u+MQGvydoYGpyZzTroKDDi2UHU/Z++BgjUV98uCg400zS6VXOjwzUfVypdlrnlTWDPlzEzKi7eY3ct+1U2566vrdnV0/NtEt/uyMoW2WVosvarawU3jere6hSIf5yN74azwUiZKp+MrIHj4MWQ0OH+7VTdxc/jRBNXz5+w/m2f8= \ No newline at end of file diff --git a/docs/static/operator-crd-architecture.png b/docs/static/operator-crd-architecture.png deleted file mode 100644 index da86fa51b2..0000000000 Binary files a/docs/static/operator-crd-architecture.png and /dev/null differ diff --git a/docs/static/operator-crd-architecture.xml b/docs/static/operator-crd-architecture.xml deleted file mode 100644 index 0c57bd52f4..0000000000 --- a/docs/static/operator-crd-architecture.xml +++ /dev/null @@ -1 +0,0 @@ -7Vtbd9o4EP41fgzHN7D9GELSnrPZ3TTtXrovewwWRo2xvLJooL9+R7ZkW5ZNSAgph0AfikZXz/fNjGZwDOdquf5Aw2zxK4lQYthmtDaciWHbljky4T8u2ZQSz7JLQUxxJAbVgs/4B5IzhXSFI5QrAxkhCcOZKpyRNEUzpshCSsmjOmxOEnXXLIyRJvg8CxNd+heO2EJIrVFQd3xEOF6IrX3bKzum4ewhpmSViv0M25kXn7J7Gcq1xIPmizAijw2Rc204V5QQVn5brq9QwnUr1VbOu+nprc5NUcp2meCPhuWU72GyEg9/R3IWU/T50y3If88QDRmh8PXqfpKLU7ON1FTxrIivZhnOOJzmJFkxdElnAtRCWrXg+cYLtkxER84oeUBXJIH1nUlKUhgzjmkYYTh+S1xpyoRGFOaLYlPemJOUyd1s0ZaTQf/BNf/Hj5HgOAVZguagm/F3RBkGyC+FmJGMb5OFM5zGfC2zbn7hfZMLl6+Ok6Sx+rV1M7zhq+uaF2DwfdC6IRJIfEBkiRjdwBDRa1uCXsJqbFOw5LHmoOsJ2aJBP1fSKRS8j6u1a+zhi4C/hwqWvzMVAN8FZmB5KwojLzk5VjkjS/hyj3KyojMEXydojlPMMEl13rTU6E4uQZF9lKiwt/r50WSBq6MtgLwtWpOhqfNE2KlC0FcAdaRi6uqQBkEXpK+B6MjTENWAUBUOeoiKj870Sj8KGuV06SS55PkeoQldBzBm8WlAOgM0EH11pAAJBSpHBq0GVlaX+XmvgpVufYY9SpjQRxHspEpG/614gBjXymmIRjH/P4tnCZgkqEksAgco1yn7pXhK2xKxW00RuTTvuMgLnLjFW1a21vedCZeRc5+gnqDeqnYM/cfQztvirRICxD0gnBa9pkqNFo+Hlu/eOFtcTcbXWK5jfrUZxLPMHsDyLMQpov+iNMbFOGAaE+sn4RQl8NjlAzkTWjKjCjC3rf4ljiJ+zmcFJOm4OuJb8emIg2rs8juNpHYRT5tJMwq5uhU4wnooSkKGv6s3qS7TEDvcEVwQTmw+VGzQU6eT+TxHTLOr6pC7mZrvaGxCEdz/RBMlU/J4XQsaHo0zq+iG7xIulWwFGcbVLbDNLga0ucFJfQGCtsCMBy2URpf8+grNaUJmD18WOC3FjUnQakz5hhjbiHa4YgREhLIFiUkK1CMF+Ppdi4dcW7ED6cLbDrnjRtXvkBWj7HXJ4oKgEhA0ESPJSk+M5LBs5eTzyAa6DTeNARmnXt7PRVceTl7IPKfFvXLF3WY7jq2er9SDmLUXpX/7J74yqfP1Iv/28fGXe+sP/OnPCz+wtZBiDgapRn4wfKayGPw3/tHwpkJRMHo4NoaTLujb3qvycj0W0bxR9LigKr8TJ6myGdWFlebcG9bNgW/69n6+6VX9Tw9YjgaW9c6QujAHgeX5itFYRwSc77vnwPEGgQOtMfubLzEwzZFof+V9A8ezRHuybgyebBqNO0QxgMqX3TsMOW8chuo6zVOxZPTWscR8H7HEfSKWeJYkhUDiYnREHqoHO0vD7vRCy3bgILTAEDW0uEcEXHXj3Vqq6cpNI7ICxdwXVXDdV+9bzDmS0owbuAp0EIQMvTQj4FVKM34/njuXZjz9Hr1faYaF+cPOdZklzmdA0RRhUsSn/CHXB4VpxLVB6MM8ATg7qirn8olxgPKJtzuVT6Z84umZytlV9boqx9cr/gd0Ve4ruyqKsgSsamdv1Sj/3qtTu8q/HZ7s7KkO5KmcnZl8Kp6q5zbcZSKn5L46rtCaR9v5R7Ch9Ybuqwcw/RWF/XxaRsAvYZS/xKkVczf6kOjs097Epz1B7m0m/+4c3Ujj3blaebhqpTkYGo1apbG1TpkCtuUk17WkgM+zBubQk4J6ctFSZneXOVuVyifrnttDpFIMLSuQb/0TnCNfhJFFm6HZMpZtP8E5busyHrSiVE/ZVF/IatWOZJiUC5WqOmD9VX+V5wTrr9sc2akVZfX3fc6ACvSOvFjb81DBOa/YvSzy8/OKwNR/0tovr+Avf6+yl2QV1cxwya/N6TTPOqfARYqtzgnG8SUYwTtMMAJT/xXxnGAcMsHwGwkGJAq+fDli95chGonHsJl21ElGd8rxsuyi862K51nQgV/wc1rZRfBTsgu5bRUc3daDHji7CEz73V5GhRc7rewiME//bcIXoXlkqQU067/1K4fXf1DpXP8P \ No newline at end of file diff --git a/docs/static/operator-diagram-cluster.png b/docs/static/operator-diagram-cluster.png deleted file mode 100644 index 201a18a5ed..0000000000 Binary files a/docs/static/operator-diagram-cluster.png and /dev/null differ diff --git a/docs/static/operator-diagram-database.png b/docs/static/operator-diagram-database.png deleted file mode 100644 index 6cfb3959d0..0000000000 Binary files a/docs/static/operator-diagram-database.png and /dev/null differ diff --git a/docs/static/operator-diagram.png b/docs/static/operator-diagram.png deleted file mode 100644 index a37c738ffe..0000000000 Binary files a/docs/static/operator-diagram.png and /dev/null differ diff --git a/docs/themes/crunchy-hugo-theme b/docs/themes/crunchy-hugo-theme deleted file mode 160000 index cda8fd1e16..0000000000 --- a/docs/themes/crunchy-hugo-theme +++ /dev/null @@ -1 +0,0 @@ -Subproject commit cda8fd1e169ee0a62583b88685c4b55b340bbd1d diff --git a/examples/postgrescluster/postgrescluster.yaml b/examples/postgrescluster/postgrescluster.yaml index 58d3535741..75756af94e 100644 --- a/examples/postgrescluster/postgrescluster.yaml +++ b/examples/postgrescluster/postgrescluster.yaml @@ -3,8 +3,7 @@ kind: PostgresCluster metadata: name: hippo spec: - image: registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-15.3-2 - postgresVersion: 15 + postgresVersion: 16 instances: - name: instance1 dataVolumeClaimSpec: @@ -15,7 +14,6 @@ spec: storage: 1Gi backups: pgbackrest: - image: registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi8-2.45-2 repos: - name: repo1 volume: @@ -34,5 +32,4 @@ spec: requests: storage: 1Gi proxy: - pgBouncer: - image: registry.developers.crunchydata.com/crunchydata/crunchy-pgbouncer:ubi8-1.19-2 + pgBouncer: {} diff --git a/go.mod b/go.mod index 26d0d1c55b..d268d66018 100644 --- a/go.mod +++ b/go.mod @@ -1,95 +1,96 @@ module github.com/crunchydata/postgres-operator -go 1.19 +go 1.22.0 require ( - github.com/evanphx/json-patch/v5 v5.6.0 - github.com/go-logr/logr v1.2.2 - github.com/golang-jwt/jwt/v5 v5.0.0 - github.com/google/go-cmp v0.5.9 - github.com/google/uuid v1.3.0 - github.com/onsi/ginkgo/v2 v2.0.0 - github.com/onsi/gomega v1.18.1 + github.com/go-logr/logr v1.4.2 + github.com/golang-jwt/jwt/v5 v5.2.1 + github.com/google/go-cmp v0.6.0 + github.com/google/uuid v1.6.0 + github.com/kubernetes-csi/external-snapshotter/client/v8 v8.0.0 + github.com/onsi/ginkgo/v2 v2.17.2 + github.com/onsi/gomega v1.33.1 + github.com/pganalyze/pg_query_go/v5 v5.1.0 github.com/pkg/errors v0.9.1 - github.com/sirupsen/logrus v1.8.1 + github.com/sirupsen/logrus v1.9.3 github.com/xdg-go/stringprep v1.0.2 - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.27.0 - go.opentelemetry.io/otel v1.2.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.2.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.2.0 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 + go.opentelemetry.io/otel v1.27.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.2.0 - go.opentelemetry.io/otel/sdk v1.2.0 - go.opentelemetry.io/otel/trace v1.2.0 - golang.org/x/crypto v0.11.0 - golang.org/x/mod v0.8.0 + go.opentelemetry.io/otel/sdk v1.27.0 + go.opentelemetry.io/otel/trace v1.27.0 + golang.org/x/crypto v0.27.0 gotest.tools/v3 v3.1.0 - k8s.io/api v0.24.2 - k8s.io/apimachinery v0.24.2 - k8s.io/client-go v0.24.2 - k8s.io/component-base v0.24.2 - sigs.k8s.io/controller-runtime v0.12.3 - sigs.k8s.io/yaml v1.3.0 + k8s.io/api v0.30.2 + k8s.io/apimachinery v0.30.2 + k8s.io/client-go v0.30.2 + k8s.io/component-base v0.30.2 + sigs.k8s.io/controller-runtime v0.18.4 + sigs.k8s.io/yaml v1.4.0 ) require ( - cloud.google.com/go/compute v1.19.1 // indirect - cloud.google.com/go/compute/metadata v0.2.3 // indirect - github.com/PuerkitoBio/purell v1.1.1 // indirect - github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/cenkalti/backoff/v4 v4.1.1 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/emicklei/go-restful v2.16.0+incompatible // indirect - github.com/evanphx/json-patch v4.12.0+incompatible // indirect - github.com/felixge/httpsnoop v1.0.2 // indirect - github.com/fsnotify/fsnotify v1.5.1 // indirect - github.com/go-openapi/jsonpointer v0.19.5 // indirect - github.com/go-openapi/jsonreference v0.19.5 // indirect - github.com/go-openapi/swag v0.19.14 // indirect + github.com/emicklei/go-restful/v3 v3.12.1 // indirect + github.com/evanphx/json-patch v5.6.0+incompatible // indirect + github.com/evanphx/json-patch/v5 v5.9.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect + github.com/go-openapi/jsonreference v0.21.0 // indirect + github.com/go-openapi/swag v0.23.0 // indirect + github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.3 // indirect - github.com/google/gnostic v0.5.7-v3refs // indirect - github.com/google/gofuzz v1.1.0 // indirect - github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect - github.com/imdario/mergo v0.3.12 // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 // indirect + github.com/gorilla/websocket v1.5.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect + github.com/imdario/mergo v0.3.16 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/mailru/easyjson v0.7.6 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect + github.com/mailru/easyjson v0.7.7 // indirect github.com/moby/spdystream v0.2.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/prometheus/client_golang v1.12.2 // indirect - github.com/prometheus/client_model v0.2.0 // indirect - github.com/prometheus/common v0.32.1 // indirect - github.com/prometheus/procfs v0.7.3 // indirect + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect + github.com/prometheus/client_golang v1.19.1 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.54.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect github.com/spf13/pflag v1.0.5 // indirect - go.opentelemetry.io/otel/internal/metric v0.25.0 // indirect - go.opentelemetry.io/otel/metric v0.25.0 // indirect - go.opentelemetry.io/proto/otlp v0.10.0 // indirect - golang.org/x/net v0.12.0 // indirect - golang.org/x/oauth2 v0.7.0 // indirect - golang.org/x/sys v0.10.0 // indirect - golang.org/x/term v0.10.0 // indirect - golang.org/x/text v0.11.0 // indirect - golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect - gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230525234025-438c736192d0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230629202037-9506855d4529 // indirect - google.golang.org/grpc v1.56.1 // indirect - google.golang.org/protobuf v1.31.0 // indirect + go.opentelemetry.io/otel/metric v1.27.0 // indirect + go.opentelemetry.io/proto/otlp v1.3.1 // indirect + golang.org/x/exp v0.0.0-20240604190554-fc45aab8b7f8 // indirect + golang.org/x/net v0.29.0 // indirect + golang.org/x/oauth2 v0.21.0 // indirect + golang.org/x/sys v0.25.0 // indirect + golang.org/x/term v0.24.0 // indirect + golang.org/x/text v0.18.0 // indirect + golang.org/x/time v0.5.0 // indirect + golang.org/x/tools v0.22.0 // indirect + gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240610135401-a8a62080eff3 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect + google.golang.org/grpc v1.66.2 // indirect + google.golang.org/protobuf v1.34.2 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiextensions-apiserver v0.24.2 // indirect - k8s.io/klog/v2 v2.60.1 // indirect - k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 // indirect - k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 // indirect - sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect + k8s.io/apiextensions-apiserver v0.30.2 // indirect + k8s.io/klog/v2 v2.120.1 // indirect + k8s.io/kube-openapi v0.0.0-20240521193020-835d969ad83a // indirect + k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect ) diff --git a/go.sum b/go.sum index 10a703f00a..aed2056f6f 100644 --- a/go.sum +++ b/go.sum @@ -1,1036 +1,251 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= -cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v1.19.1 h1:am86mquDUgjGNWxiGn+5PGLbmgiWXlE/yNWpIpNvuXY= -cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= -cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= -cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= -github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= -github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= -github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= -github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210826220005-b48c857c3a0e/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= -github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= -github.com/cenkalti/backoff/v4 v4.1.1 h1:G2HAfAmvm/GcKan2oOQpBXOd2tT2G57ZnZGWa1PxPBQ= -github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= -github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= -github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= -github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.16.0+incompatible h1:rgqiKNjTnFQA6kkhFe16D8epTksy9HQ1MyrbDXSdYhM= -github.com/emicklei/go-restful v2.16.0+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= -github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= -github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= -github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/felixge/httpsnoop v1.0.2 h1:+nS9g82KMXccJ/wp0zyRW9ZBHFETmMGtkk+2CTTrW4o= -github.com/felixge/httpsnoop v1.0.2/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= -github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= -github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= -github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.2 h1:ahHml/yUpnlb96Rp8HCvtYVPY8ZYpxq3g7UYchIYwbs= +github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU= +github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= +github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= +github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/zapr v1.2.0 h1:n4JnPI1T3Qq1SFEi/F8rwLrZERp2bso19PJZDB9dayk= -github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM= -github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng= -github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= +github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= +github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v5 v5.0.0 h1:1n1XNM9hk7O9mnQoNBGolZvzebBQ7p93ULHRc28XJUE= -github.com/golang-jwt/jwt/v5 v5.0.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= +github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= -github.com/google/cel-go v0.10.1/go.mod h1:U7ayypeSkw23szu4GaQTPJGx66c20mx8JklMSxrmI1w= -github.com/google/cel-spec v0.6.0/go.mod h1:Nwjgxy5CbjlPrtCWjeDjUyKMl8w41YBYGjsyDdqk0xA= -github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= -github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= -github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 h1:k7nVchz72niMH6YLQNvHSdIE7iqsQxK1P41mySCvssg= +github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= -github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= +github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= -github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= -github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/kubernetes-csi/external-snapshotter/client/v8 v8.0.0 h1:mjQG0Vakr2h246kEDR85U8y8ZhPgT3bguTCajRa/jaw= +github.com/kubernetes-csi/external-snapshotter/client/v8 v8.0.0/go.mod h1:E3vdYxHj2C2q6qo8/Da4g7P+IcwqRZyy3gJBzYybV9Y= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= -github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= -github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= -github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo/v2 v2.0.0 h1:CcuG/HvWNkkaqCUpJifQY8z7qEMBJya6aLPx6ftGyjQ= -github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= -github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE= -github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/onsi/ginkgo/v2 v2.17.2 h1:7eMhcy3GimbsA3hEnVKdw/PQM9XN9krpKVXsZdph0/g= +github.com/onsi/ginkgo/v2 v2.17.2/go.mod h1:nP2DPOQoNsQmsVyv5rDA8JkXQoCs6goXIvr/PRJ1eCc= +github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= +github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= +github.com/pganalyze/pg_query_go/v5 v5.1.0 h1:MlxQqHZnvA3cbRQYyIrjxEjzo560P6MyTgtlaf3pmXg= +github.com/pganalyze/pg_query_go/v5 v5.1.0/go.mod h1:FsglvxidZsVN+Ltw3Ai6nTgPVcK2BPukH3jCDEqc1Ug= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_golang v1.12.2 h1:51L9cDoUHVrXx4zWYlcLQIZ+d+VXHgqnYKkIuq4g/34= -github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= -github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= -github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= -github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= -github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= +github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.54.0 h1:ZlZy0BgJhTwVZUn7dLOkwCZHUkrAqd3WYtcFCWnM1D8= +github.com/prometheus/common v0.54.0/go.mod h1:/TQgMJP5CuVYveyT7n/0Ix8yLNNXy9yRSkhnLTHPDIQ= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/xdg-go/stringprep v1.0.2 h1:6iq84/ryjjeRmMJwxutI51F2GIPlP5BfTvXHeYjyhBc= github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= -go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= -go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= -go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= -go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= -go.etcd.io/etcd/client/v3 v3.5.1/go.mod h1:OnjH4M8OnAotwaB2l9bVgZzRFKru7/ZMoS46OtKyd3Q= -go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE= -go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc= -go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.27.0 h1:0BgiNWjN7rUWO9HdjF4L12r8OW86QkVQcYmCjnayJLo= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.27.0/go.mod h1:bdvm3YpMxWAgEfQhtTBaVR8ceXPRuRBSQrvOBnIlHxc= -go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= -go.opentelemetry.io/otel v1.2.0 h1:YOQDvxO1FayUcT9MIhJhgMyNO1WqoduiyvQHzGN0kUQ= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= go.opentelemetry.io/otel v1.2.0/go.mod h1:aT17Fk0Z1Nor9e0uisf98LrntPGMnk4frBO9+dkf69I= -go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.2.0 h1:xzbcGykysUh776gzD1LUPsNNHKWN0kQWDnJhn1ddUuk= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.2.0/go.mod h1:14T5gr+Y6s2AgHPqBMgnGwp04csUjQmYXFWPeiBoq5s= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.2.0 h1:j/jXNzS6Dy0DFgO/oyCvin4H7vTQBg2Vdi6idIzWhCI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.2.0/go.mod h1:k5GnE4m4Jyy2DNh6UAzG6Nml51nuqQyszV7O1ksQAnE= +go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg= +go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 h1:R9DE4kQ4k+YtfLI2ULwX82VtNQ2J8yZmA7ZIF/D+7Mc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0/go.mod h1:OQFyQVrDlbe+R7xrEyDr/2Wr67Ol0hRUgsfA+V5A95s= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 h1:QY7/0NeRPKlzusf40ZE4t1VlMKbqSNT7cJRYzWuja0s= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0/go.mod h1:HVkSiDhTM9BoUJU8qE6j2eSWLLXvi1USXjyd2BXT8PY= go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.2.0 h1:OiYdrCq1Ctwnovp6EofSPwlp5aGy4LgKNbkg7PtEUw8= go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.2.0/go.mod h1:DUFCmFkXr0VtAHl5Zq2JRx24G6ze5CAq8YfdD36RdX8= -go.opentelemetry.io/otel/internal/metric v0.25.0 h1:w/7RXe16WdPylaIXDgcYM6t/q0K5lXgSdZOEbIEyliE= -go.opentelemetry.io/otel/internal/metric v0.25.0/go.mod h1:Nhuw26QSX7d6n4duoqAFi5KOQR4AuzyMcl5eXOgwxtc= -go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= -go.opentelemetry.io/otel/metric v0.25.0 h1:7cXOnCADUsR3+EOqxPaSKwhEuNu0gz/56dRN1hpIdKw= -go.opentelemetry.io/otel/metric v0.25.0/go.mod h1:E884FSpQfnJOMMUaq+05IWlJ4rjZpk2s/F1Ju+TEEm8= -go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= -go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= -go.opentelemetry.io/otel/sdk v1.2.0 h1:wKN260u4DesJYhyjxDa7LRFkuhH7ncEVKU37LWcyNIo= +go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik= +go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak= go.opentelemetry.io/otel/sdk v1.2.0/go.mod h1:jNN8QtpvbsKhgaC6V5lHiejMoKD+V8uadoSafgHPx1U= -go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE= -go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE= -go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= -go.opentelemetry.io/otel/trace v1.2.0 h1:Ys3iqbqZhcf28hHzrm5WAquMkDHNZTUkw7KHbuNjej0= +go.opentelemetry.io/otel/sdk v1.27.0 h1:mlk+/Y1gLPLn84U4tI8d3GNJmGT/eXe3ZuOXN9kTWmI= +go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A= go.opentelemetry.io/otel/trace v1.2.0/go.mod h1:N5FLswTubnxKxOJHM7XZC074qpeEdLy3CgAVsdMucK0= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v0.10.0 h1:n7brgtEbDvXEgGyKKo8SobKT1e9FewlDtXzkVP5djoE= -go.opentelemetry.io/proto/otlp v0.10.0/go.mod h1:zG20xCK0szZ1xdokeSOwEcmlXu+x9kkdRe6N1DhKcfU= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= -go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= -go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= -go.uber.org/zap v1.19.1 h1:ue41HOKd1vGURxrmeKIgELGb3jPW9DMUDGtsinblHwI= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw= +go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4= +go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= +go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= +go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= +golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= +golang.org/x/exp v0.0.0-20240604190554-fc45aab8b7f8 h1:LoYXNGAShUG3m/ehNk4iFctuhGX/+R1ZpfJ4/ia80JM= +golang.org/x/exp v0.0.0-20240604190554-fc45aab8b7f8/go.mod h1:jj3sYF3dwk5D+ghuXyeI3r5MFf+NT2An6/9dOA95KSI= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= -golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8= -golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50= -golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= -golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= +golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= +golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= +golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44= -golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.10-0.20220218145154-897bd77cd717/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= +golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA= +golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY= -gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201102152239-715cce707fb0/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20230525234025-438c736192d0 h1:x1vNwUhVOcsYoKyEGCZBH694SBmmBjA2EfauFVEI2+M= -google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= -google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a h1:HiYVD+FGJkTo+9zj1gqz0anapsa1JxjiSrN+BJKyUmE= -google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230629202037-9506855d4529 h1:DEH99RbiLZhMxrpEJCZ0A+wdTe0EOgou/poSLx9vWf4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230629202037-9506855d4529/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= -google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.56.1 h1:z0dNfjIl0VpaZ9iSVjA6daGatAYwPGstTjt5vkRMFkQ= -google.golang.org/grpc v1.56.1/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= +gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +google.golang.org/genproto/googleapis/api v0.0.0-20240610135401-a8a62080eff3 h1:QW9+G6Fir4VcRXVH8x3LilNAb6cxBGLa6+GM4hRwexE= +google.golang.org/genproto/googleapis/api v0.0.0-20240610135401-a8a62080eff3/go.mod h1:kdrSS/OiLkPrNUpzD4aHgCq2rVuC/YRxok32HXZ4vRE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/grpc v1.66.2 h1:3QdXkuq3Bkh7w+ywLdLvM56cmGvQHUMZpiCzt6Rqaoo= +google.golang.org/grpc v1.66.2/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= -gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= gotest.tools/v3 v3.1.0 h1:rVV8Tcg/8jHUkPUorwjaMTtemIMVXfIPKiOqnhEhakk= gotest.tools/v3 v3.1.0/go.mod h1:fHy7eyTmJFO5bQbUsEGQ1v4m2J3Jz9eWL54TP2/ZuYQ= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.24.2 h1:g518dPU/L7VRLxWfcadQn2OnsiGWVOadTLpdnqgY2OI= -k8s.io/api v0.24.2/go.mod h1:AHqbSkTm6YrQ0ObxjO3Pmp/ubFF/KuM7jU+3khoBsOg= -k8s.io/apiextensions-apiserver v0.24.2 h1:/4NEQHKlEz1MlaK/wHT5KMKC9UKYz6NZz6JE6ov4G6k= -k8s.io/apiextensions-apiserver v0.24.2/go.mod h1:e5t2GMFVngUEHUd0wuCJzw8YDwZoqZfJiGOW6mm2hLQ= -k8s.io/apimachinery v0.24.2 h1:5QlH9SL2C8KMcrNJPor+LbXVTaZRReml7svPEh4OKDM= -k8s.io/apimachinery v0.24.2/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM= -k8s.io/apiserver v0.24.2/go.mod h1:pSuKzr3zV+L+MWqsEo0kHHYwCo77AT5qXbFXP2jbvFI= -k8s.io/client-go v0.24.2 h1:CoXFSf8if+bLEbinDqN9ePIDGzcLtqhfd6jpfnwGOFA= -k8s.io/client-go v0.24.2/go.mod h1:zg4Xaoo+umDsfCWr4fCnmLEtQXyCNXCvJuSsglNcV30= -k8s.io/code-generator v0.24.2/go.mod h1:dpVhs00hTuTdTY6jvVxvTFCk6gSMrtfRydbhZwHI15w= -k8s.io/component-base v0.24.2 h1:kwpQdoSfbcH+8MPN4tALtajLDfSfYxBDYlXobNWI6OU= -k8s.io/component-base v0.24.2/go.mod h1:ucHwW76dajvQ9B7+zecZAP3BVqvrHoOxm8olHEg0nmM= -k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.60.1 h1:VW25q3bZx9uE3vvdL6M8ezOX79vA2Aq1nEWLqNQclHc= -k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 h1:Gii5eqf+GmIEwGNKQYQClCayuJCe2/4fZUvF7VG99sU= -k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42/go.mod h1:Z/45zLw8lUo4wdiUkI+v/ImEGAvu3WatcZl3lPMR4Rk= -k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 h1:HNSDgDCrr/6Ly3WEGKZftiE7IY19Vz2GdbOCyI4qqhc= -k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30/go.mod h1:fEO7lRTdivWO2qYVCVG7dEADOMo/MLDCVr8So2g88Uw= -sigs.k8s.io/controller-runtime v0.12.3 h1:FCM8xeY/FI8hoAfh/V4XbbYMY20gElh9yh+A98usMio= -sigs.k8s.io/controller-runtime v0.12.3/go.mod h1:qKsk4WE6zW2Hfj0G4v10EnNB2jMG1C+NTb8h+DwCoU0= -sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 h1:kDi4JBNAsJWfz1aEXhO8Jg87JJaPNLh5tIzYHgStQ9Y= -sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY= -sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y= -sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= -sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= -sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= -sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= +k8s.io/api v0.30.2 h1:+ZhRj+28QT4UOH+BKznu4CBgPWgkXO7XAvMcMl0qKvI= +k8s.io/api v0.30.2/go.mod h1:ULg5g9JvOev2dG0u2hig4Z7tQ2hHIuS+m8MNZ+X6EmI= +k8s.io/apiextensions-apiserver v0.30.2 h1:l7Eue2t6QiLHErfn2vwK4KgF4NeDgjQkCXtEbOocKIE= +k8s.io/apiextensions-apiserver v0.30.2/go.mod h1:lsJFLYyK40iguuinsb3nt+Sj6CmodSI4ACDLep1rgjw= +k8s.io/apimachinery v0.30.2 h1:fEMcnBj6qkzzPGSVsAZtQThU62SmQ4ZymlXRC5yFSCg= +k8s.io/apimachinery v0.30.2/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= +k8s.io/client-go v0.30.2 h1:sBIVJdojUNPDU/jObC+18tXWcTJVcwyqS9diGdWHk50= +k8s.io/client-go v0.30.2/go.mod h1:JglKSWULm9xlJLx4KCkfLLQ7XwtlbflV6uFFSHTMgVs= +k8s.io/component-base v0.30.2 h1:pqGBczYoW1sno8q9ObExUqrYSKhtE5rW3y6gX88GZII= +k8s.io/component-base v0.30.2/go.mod h1:yQLkQDrkK8J6NtP+MGJOws+/PPeEXNpwFixsUI7h/OE= +k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= +k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20240521193020-835d969ad83a h1:zD1uj3Jf+mD4zmA7W+goE5TxDkI7OGJjBNBzq5fJtLA= +k8s.io/kube-openapi v0.0.0-20240521193020-835d969ad83a/go.mod h1:UxDHUPsUwTOOxSU+oXURfFBcAS6JwiRXTYqYwfuGowc= +k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 h1:jgGTlFYnhF1PM1Ax/lAlxUPE+KfCIXHaathvJg1C3ak= +k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/controller-runtime v0.18.4 h1:87+guW1zhvuPLh1PHybKdYFLU0YJp4FhJRmiHvm5BZw= +sigs.k8s.io/controller-runtime v0.18.4/go.mod h1:TVoGrfdpbA9VRFaRnKgk9P5/atA0pMwq+f+msb9M8Sg= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/hack/boilerplate.go.txt b/hack/boilerplate.go.txt index d8998fcc83..7fc3d63c10 100644 --- a/hack/boilerplate.go.txt +++ b/hack/boilerplate.go.txt @@ -1,15 +1,3 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 diff --git a/hack/controller-generator.sh b/hack/controller-generator.sh deleted file mode 100755 index b3e75e7ee3..0000000000 --- a/hack/controller-generator.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2021 - 2023 Crunchy Data Solutions, Inc. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -eu - -# Find the Go install path. -[ "${GOBIN:-}" ] || GOBIN="$(go env GOBIN)" -[ "${GOBIN:-}" ] || GOBIN="$(go env GOPATH)/bin" - -# Find `controller-gen` on the current PATH or install it to the Go install path. -tool="$(command -v controller-gen || true)" -[ -n "$tool" ] || tool="$GOBIN/controller-gen" -[ -x "$tool" ] || go install 'sigs.k8s.io/controller-tools/cmd/controller-gen@v0.8.0' - -"$tool" "$@" diff --git a/hack/create-kubeconfig.sh b/hack/create-kubeconfig.sh index b160b0b6d3..3bebcd194e 100755 --- a/hack/create-kubeconfig.sh +++ b/hack/create-kubeconfig.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# Copyright 2021 - 2023 Crunchy Data Solutions, Inc. +# Copyright 2021 - 2024 Crunchy Data Solutions, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at diff --git a/hack/create-todo-patch.sh b/hack/create-todo-patch.sh deleted file mode 100755 index f49dbc168a..0000000000 --- a/hack/create-todo-patch.sh +++ /dev/null @@ -1,54 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2021 - 2023 Crunchy Data Solutions, Inc. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -directory=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) -clusters_dir="${directory}/../build/crd/postgresclusters" -upgrades_dir="${directory}/../build/crd/pgupgrades" - -# Generate a Kustomize patch file for removing any TODOs we inherit from the Kubernetes API. -# Right now there is one TODO in our CRD. This script focuses on removing the specific TODO -# anywhere they are found in the CRD. - -# The TODO comes from the following: -# https://github.com/kubernetes/api/blob/25b7aa9e86de7bba38c35cbe56701d2c1ff207e9/core/v1/types.go#L5609 -# Additionally, the hope is that this step can be removed once the following issue is addressed -# in the kubebuilder controller-tools project: -# https://github.com/kubernetes-sigs/controller-tools/issues/649 - -echo "Generating Kustomize patch file for removing Kube API TODOs" - -# Get the description of the "name" field with the TODO from any place it is used in the CRD and -# store it in a variable. Then, create another variable with the TODO stripped out. -name_desc_with_todo=$( - python3 -m yq -r \ - .spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.customTLSSecret.properties.name.description \ - "${clusters_dir}/generated/postgres-operator.crunchydata.com_postgresclusters.yaml" -) -name_desc_without_todo=$(sed 's/ TODO.*//g' <<< "${name_desc_with_todo}") - -# Generate a JSON patch file to update the "name" description for all applicable paths in the CRD. -python3 -m yq -y --arg old "${name_desc_with_todo}" --arg new "${name_desc_without_todo}" ' - [{ op: "add", path: "/work", value: $new }] + - [paths(select(. == $old)) | { op: "copy", from: "/work", path: "/\(map(tostring) | join("/"))" }] + - [{ op: "remove", path: "/work" }] -' \ - "${clusters_dir}/generated/postgres-operator.crunchydata.com_postgresclusters.yaml" > "${clusters_dir}/todos.yaml" - -python3 -m yq -y --arg old "${name_desc_with_todo}" --arg new "${name_desc_without_todo}" ' - [{ op: "add", path: "/work", value: $new }] + - [paths(select(. == $old)) | { op: "copy", from: "/work", path: "/\(map(tostring) | join("/"))" }] + - [{ op: "remove", path: "/work" }] -' \ - "${upgrades_dir}/generated/postgres-operator.crunchydata.com_pgupgrades.yaml" > "${upgrades_dir}/todos.yaml" diff --git a/hack/generate-rbac.sh b/hack/generate-rbac.sh deleted file mode 100755 index 223843f3e8..0000000000 --- a/hack/generate-rbac.sh +++ /dev/null @@ -1,72 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2021 - 2023 Crunchy Data Solutions, Inc. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -eu - -declare -r paths="$1" directory="$2" - -# Use `controller-gen` to parse Go markers. -( set -x -"${BASH_SOURCE[0]%/*}/controller-generator.sh" \ - rbac:roleName='generated' \ - paths="${paths}" \ - output:dir="${directory}" # ${directory}/role.yaml -) - -# NOTE(cbandy): `kustomize` v4.1 and `kubectl` v1.22 will be able to change the -# kind of a resource: https://pr.k8s.io/101120 -ruby -r 'set' -r 'yaml' -e ' -directory = ARGV[0] -roles = YAML.load_stream(IO.read(File.join(directory, "role.yaml"))) -operator = roles.shift - -abort "Expected the operator ClusterRole first!" unless operator and operator["kind"] == "ClusterRole" - -# The client used by the controller sets up a cache and an informer for any GVK -# that it GETs. That informer needs the "watch" permission. -# - https://github.com/kubernetes-sigs/controller-runtime/issues/1249 -# - https://github.com/kubernetes-sigs/controller-runtime/issues/1454 -# TODO(cbandy): Move this into an RBAC marker when it can be configured on the Manager. -operator["rules"].each do |rule| - verbs = rule["verbs"].to_set - rule["verbs"] = verbs.add("watch").sort if verbs.intersect? Set["get", "list"] -end - -# Combine the other parsed Roles into the ClusterRole. -rules = operator["rules"] + roles.flat_map { |role| role["rules"] } -rules = rules. - group_by { |rule| rule.slice("apiGroups", "resources") }. - map do |(group_resource, rules)| - verbs = rules.flat_map { |rule| rule["verbs"] }.to_set.sort - group_resource.merge("verbs" => verbs) - end -operator["rules"] = rules.sort_by { |rule| rule.to_a } - -# Combine resources that have the same verbs. -rules = operator["rules"]. - group_by { |rule| rule.slice("apiGroups", "verbs") }. - map do |(group_verb, rules)| - resources = rules.flat_map { |rule| rule["resources"] }.to_set.sort - rule = group_verb.merge("resources" => resources) - rule.slice("apiGroups", "resources", "verbs") # keep the keys in order - end -operator["rules"] = rules.sort_by { |rule| rule.to_a } - -operator["metadata"] = { "name" => "postgres-operator" } -IO.write(File.join(directory, "cluster", "role.yaml"), YAML.dump(operator)) - -operator["kind"] = "Role" -IO.write(File.join(directory, "namespace", "role.yaml"), YAML.dump(operator)) -' -- "${directory}" diff --git a/hack/update-pgmonitor-installer.sh b/hack/update-pgmonitor-installer.sh index 088bc2908b..148a4761c9 100755 --- a/hack/update-pgmonitor-installer.sh +++ b/hack/update-pgmonitor-installer.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# Copyright 2022 - 2023 Crunchy Data Solutions, Inc. +# Copyright 2022 - 2024 Crunchy Data Solutions, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at diff --git a/img/CrunchyDataPrimaryIcon.png b/img/CrunchyDataPrimaryIcon.png new file mode 100644 index 0000000000..e238a688dd Binary files /dev/null and b/img/CrunchyDataPrimaryIcon.png differ diff --git a/installers/olm/.gitignore b/installers/olm/.gitignore deleted file mode 100644 index a2d12b4ff2..0000000000 --- a/installers/olm/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -/bundles/ -/projects/ -/tools/ -/config/marketplace diff --git a/installers/olm/Makefile b/installers/olm/Makefile deleted file mode 100644 index 3a6d7aac90..0000000000 --- a/installers/olm/Makefile +++ /dev/null @@ -1,112 +0,0 @@ -.DEFAULT_GOAL := help -.SUFFIXES: - -CONTAINER ?= docker -PGO_VERSION ?= latest -REPLACES_VERSION ?= 5.x.y - -OS_KERNEL ?= $(shell bash -c 'echo $${1,,}' - `uname -s`) -OS_MACHINE ?= $(shell bash -c 'echo $${1/x86_/amd}' - `uname -m`) -SYSTEM = $(OS_KERNEL)-$(OS_MACHINE) - -export PATH := $(CURDIR)/tools/$(SYSTEM):$(PATH) - -export PGO_VERSION - -export REPLACES_VERSION - -distros = community redhat marketplace - -.PHONY: bundles -bundles: ## Build OLM bundles -bundles: $(distros:%=bundles/%) - -# https://olm.operatorframework.io/docs/tasks/creating-operator-bundle/#validating-your-bundle -# https://github.com/operator-framework/community-operators/blob/8a36a33/docs/packaging-required-criteria-ocp.md -.PHONY: bundles/community -bundles/community: - ./generate.sh community - env operator-sdk bundle validate $@ --select-optional='suite=operatorframework' - env operator-sdk bundle validate $@ --select-optional='name=community' --optional-values='index-path=$@/Dockerfile' - -# https://redhat-connect.gitbook.io/certified-operator-guide/ocp-deployment/operator-metadata/reviewing-your-metadata-bundle -.PHONY: bundles/redhat -bundles/redhat: - ./generate.sh redhat - env operator-sdk bundle validate $@ --select-optional='suite=operatorframework' - -# The 'marketplace' configuration is currently identical to the 'redhat', so we just copy it here. -.PHONY: bundles/marketplace -bundles/marketplace: - cp -r ./config/redhat/ ./config/marketplace - ./generate.sh marketplace - env operator-sdk bundle validate $@ --select-optional='suite=operatorframework' - -.PHONY: clean -clean: clean-deprecated -clean: ## Remove generated files and downloaded tools - rm -rf ./bundles ./projects ./tools ./config/marketplace - -.PHONY: clean-deprecated -clean-deprecated: - rm -rf ./package - -.PHONY: help -help: ALIGN=18 -help: ## Print this message - @awk -F ': ## ' -- "/^[^':]+: ## /"' { printf "'$$(tput bold)'%-$(ALIGN)s'$$(tput sgr0)' %s\n", $$1, $$2 }' $(MAKEFILE_LIST) - -.PHONY: install-olm -install-olm: ## Install OLM in Kubernetes - env operator-sdk olm install - -.PHONY: tools -tools: ## Download tools needed to build bundles - -tools: tools/$(SYSTEM)/jq -tools/$(SYSTEM)/jq: - install -d '$(dir $@)' - curl -fSL -o '$@' "https://github.com/stedolan/jq/releases/download/jq-1.6/jq-$$(SYSTEM='$(SYSTEM)'; \ - case "$$SYSTEM" in \ - (linux-*) echo "$${SYSTEM/-amd/}";; (darwin-*) echo "$${SYSTEM/darwin/osx}";; (*) echo '$(SYSTEM)';; \ - esac)" - chmod u+x '$@' - -tools: tools/$(SYSTEM)/kubectl -tools/$(SYSTEM)/kubectl: - install -d '$(dir $@)' - curl -fSL -o '$@' 'https://dl.k8s.io/release/$(shell curl -Ls https://dl.k8s.io/release/stable-1.21.txt)/bin/$(OS_KERNEL)/$(OS_MACHINE)/kubectl' - chmod u+x '$@' - -# quay.io/operator-framework/operator-sdk -tools: tools/$(SYSTEM)/operator-sdk -tools/$(SYSTEM)/operator-sdk: - install -d '$(dir $@)' - curl -fSL -o '$@' 'https://github.com/operator-framework/operator-sdk/releases/download/v1.18.0/operator-sdk_$(OS_KERNEL)_$(OS_MACHINE)' - chmod u+x '$@' - -tools: tools/$(SYSTEM)/opm -tools/$(SYSTEM)/opm: - install -d '$(dir $@)' - curl -fSL -o '$@' 'https://github.com/operator-framework/operator-registry/releases/download/v1.20.0/$(OS_KERNEL)-$(OS_MACHINE)-opm' - chmod u+x '$@' - -tools/$(SYSTEM)/venv: - install -d '$(dir $@)' - python3 -m venv '$@' - -tools: tools/$(SYSTEM)/yq -tools/$(SYSTEM)/yq: | tools/$(SYSTEM)/venv - 'tools/$(SYSTEM)/venv/bin/python' -m pip install yq - cd '$(dir $@)' && ln -s venv/bin/yq - -.PHONY: validate-bundles -validate-bundles: ## Build temporary bundle images and run scorecard tests in Kubernetes -validate-bundles: $(distros:%=validate-%-image) -validate-bundles: $(distros:%=validate-%-directory) - -validate-%-directory: - ./validate-directory.sh 'bundles/$*' - -validate-%-image: - ./validate-image.sh '$(CONTAINER)' 'bundles/$*' diff --git a/installers/olm/README.md b/installers/olm/README.md deleted file mode 100644 index c36f918544..0000000000 --- a/installers/olm/README.md +++ /dev/null @@ -1,147 +0,0 @@ -This directory contains the files that are used to install [Crunchy PostgreSQL for Kubernetes][hub-listing], -which includes PGO, the Postgres Operator from [Crunchy Data][], using [Operator Lifecycle Manager][OLM]. - -The integration centers around a [ClusterServiceVersion][olm-csv] [manifest](./bundle.csv.yaml) -that gets packaged for OperatorHub. Changes there are accepted only if they pass all the [scorecard][] -tests. Consult the [technical requirements][hub-contrib] when making changes. - - - -[Crunchy Data]: https://www.crunchydata.com -[hub-contrib]: https://operator-framework.github.io/community-operators/packaging-operator/ -[hub-listing]: https://operatorhub.io/operator/postgresql -[OLM]: https://github.com/operator-framework/operator-lifecycle-manager -[olm-csv]: https://github.com/operator-framework/operator-lifecycle-manager/blob/master/doc/design/building-your-csv.md -[scorecard]: https://sdk.operatorframework.io/docs/testing-operators/scorecard/ - -[Red Hat Container Certification]: https://redhat-connect.gitbook.io/partner-guide-for-red-hat-openshift-and-container/ -[Red Hat Operator Certification]: https://redhat-connect.gitbook.io/certified-operator-guide/ - - - -## Notes - -### v5 Versions per Repository - -Community: https://github.com/k8s-operatorhub/community-operators/tree/main/operators/postgresql - -5.0.2 -5.0.3 -5.0.4 -5.0.5 -5.1.0 - -Community Prod: https://github.com/redhat-openshift-ecosystem/community-operators-prod/tree/main/operators/postgresql - -5.0.2 -5.0.3 -5.0.4 -5.0.5 -5.1.0 - -Certified: https://github.com/redhat-openshift-ecosystem/certified-operators/tree/main/operators/crunchy-postgres-operator - -5.0.4 -5.0.5 -5.1.0 - -Marketplace: https://github.com/redhat-openshift-ecosystem/redhat-marketplace-operators/tree/main/operators/crunchy-postgres-operator-rhmp - -5.0.4 -5.0.5 -5.1.0 - -### Issues Encountered - -We hit various issues with 5.1.0 where the 'replaces' name, set in the clusterserviceversion.yaml, didn't match the -expected names found for all indexes. Previously, we set the 'com.redhat.openshift.versions' annotation to "v4.6-v4.9". -The goal for this setting was to limit the upper bound of supported versions for a particulary PGO release. -The problem with this was, at the time of the 5.1.0 release, OCP 4.10 had been just been released. This meant that the -5.0.5 bundle did not exist in the OCP 4.10 index. The solution presented by Red Hat was to use the 'skips' clause for -the 5.1.0 release to remedy the immediate problem, but then go back to using an unbounded setting for subsequent -releases. - -For the certified, marketplace and community repositories, this strategy of using 'skips' instead of replaces worked as -expected. However, for the production community operator bundle, we were seeing a failure that required adding an -additional 'replaces' value of 5.0.4 in addition to the 5.0.5 'skips' value. While this allowed the PR to merge, it -seems at odds with the behavior at the other repos. - -For more information on the use of 'skips' and 'replaces', please see: -https://olm.operatorframework.io/docs/concepts/olm-architecture/operator-catalog/creating-an-update-graph/ - - -Another version issue encountered was related to our attempt to both support OCP v4.6 (which is an Extended Update -Support (EUS) release) while also limiting Kubernetes to 1.20+. The issue with this is that OCP 4.6 utilizes k8s 1.19 -and the kube minversion validation was in fact limiting the OCP version as well. Our hope was that those setting would -be treated independently, but that was unfortunately not the case. The fix for this was to move this kube version to the -1.19, despite its being released 3rd quarter of 2020 with 1 year of patch support. - -Following the lessons learned above, when bumping the Openshift supported version from v4.6 to v4.8, we will similarly -keep the matching minimum Kubernetes version, i.e. 1.21. -https://access.redhat.com/solutions/4870701 - -## Testing - -### Setup - -```sh -make tools -``` - -### Testing - -```sh -make bundles validate-bundles -``` - -Previously, the 'validate_bundle_image' function in validate-bundles.sh ended -with the following command: - -```sh - # Create an index database from the bundle image. - "${opm[@]}" index add --bundles="${image}" --generate - - # drwxr-xr-x. 2 user user 22 database - # -rw-r--r--. 1 user user 286720 database/index.db - # -rw-r--r--. 1 user user 267 index.Dockerfile -``` - -this command was used to generate the updated registry database, but this step -is no longer required when validating the OLM bundles. -- https://github.com/operator-framework/operator-registry/blob/master/docs/design/opm-tooling.md#add-1 - -```sh -BUNDLE_DIRECTORY='bundles/community' -BUNDLE_IMAGE='gcr.io/.../postgres-operator-bundle:latest' -INDEX_IMAGE='gcr.io/.../postgres-operator-bundle-index:latest' -NAMESPACE='pgo' - -docker build --tag "$BUNDLE_IMAGE" "$BUNDLE_DIRECTORY" -docker push "$BUNDLE_IMAGE" - -opm index add --bundles "$BUNDLE_IMAGE" --tag "$INDEX_IMAGE" --container-tool=docker -docker push "$INDEX_IMAGE" - -./install.sh operator "$BUNDLE_DIRECTORY" "$INDEX_IMAGE" "$NAMESPACE" "$NAMESPACE" - -# Cleanup -operator-sdk cleanup postgresql --namespace="$NAMESPACE" -kubectl -n "$NAMESPACE" delete operatorgroup olm-operator-group -``` - -### Post Bundle Generation - -After generating and testing the OLM bundles, there are two manual steps. - -1. Update the image SHA values (denoted with '', required for both the Red Hat 'Certified' and -'Marketplace' bundles) -2. Update the 'description.md' file to indicate which OCP versions this release of PGO was tested against. - -### Troubleshooting - -If, when running `make validate-bundles` you encounter an error similar to - -`cannot find Containerfile or Dockerfile in context directory: stat /mnt/Dockerfile: permission denied` - -the target command is likely being blocked by SELinux and you will need to adjust -your settings accordingly. diff --git a/installers/olm/bundle.Dockerfile b/installers/olm/bundle.Dockerfile deleted file mode 100644 index a81d16f73e..0000000000 --- a/installers/olm/bundle.Dockerfile +++ /dev/null @@ -1,18 +0,0 @@ -# Used to build the bundle image. This file is ignored by the community operator -# registries which work with bundle directories instead. -# https://operator-framework.github.io/community-operators/packaging-operator/ - -FROM scratch AS builder - -COPY manifests/ /build/manifests/ -COPY metadata/ /build/metadata/ -COPY tests/ /build/tests - - -FROM scratch - -# ANNOTATIONS is replaced with bundle.annotations.yaml -LABEL \ - ${ANNOTATIONS} - -COPY --from=builder /build/ / diff --git a/installers/olm/bundle.annotations.yaml b/installers/olm/bundle.annotations.yaml deleted file mode 100644 index 27dce5aa07..0000000000 --- a/installers/olm/bundle.annotations.yaml +++ /dev/null @@ -1,38 +0,0 @@ ---- -annotations: - # https://olm.operatorframework.io/docs/tasks/creating-operator-bundle/ - # https://docs.openshift.com/container-platform/4.7/operators/understanding/olm-packaging-format.html - operators.operatorframework.io.bundle.mediatype.v1: registry+v1 - operators.operatorframework.io.bundle.manifests.v1: manifests/ - operators.operatorframework.io.bundle.metadata.v1: metadata/ - - operators.operatorframework.io.test.mediatype.v1: scorecard+v1 - operators.operatorframework.io.test.config.v1: tests/scorecard/ - - # "package.v1" is the name of the PackageManifest. It also determines the URL - # of the details page at OperatorHub.io; "postgresql" here becomes: - # https://operatorhub.io/operator/postgresql - # - # A package consists of multiple bundles (versions) arranged into channels. - # https://olm.operatorframework.io/docs/concepts/olm-architecture/operator-catalog/creating-an-update-graph/ - operators.operatorframework.io.bundle.package.v1: '' # generate.sh - - # "channels.v1" is the comma-separated list of channels from which this bundle - # can be installed. - # - # "channel.default.v1" is the default channel of the PackageManifest. It is - # the first channel presented, the first used to satisfy dependencies, and - # the one used by a Subscription that does not specify a channel. OLM uses - # the value from the bundle with the highest semantic version. - # - # https://olm.operatorframework.io/docs/best-practices/channel-naming/ - operators.operatorframework.io.bundle.channels.v1: v5 - operators.operatorframework.io.bundle.channel.default.v1: v5 - - # OpenShift v4.9 is the lowest version supported for v5.3.0+. - # https://github.com/operator-framework/community-operators/blob/8a36a33/docs/packaging-required-criteria-ocp.md - # https://redhat-connect.gitbook.io/certified-operator-guide/ocp-deployment/operator-metadata/bundle-directory - com.redhat.delivery.operator.bundle: true - com.redhat.openshift.versions: 'v4.10' - -... diff --git a/installers/olm/bundle.csv.yaml b/installers/olm/bundle.csv.yaml deleted file mode 100644 index 600f8b1bc0..0000000000 --- a/installers/olm/bundle.csv.yaml +++ /dev/null @@ -1,84 +0,0 @@ -# https://olm.operatorframework.io/docs/concepts/crds/clusterserviceversion/ -# https://docs.openshift.com/container-platform/4.7/operators/operator_sdk/osdk-generating-csvs.html -# https://redhat-connect.gitbook.io/certified-operator-guide/ocp-deployment/operator-metadata/creating-the-csv -# https://pkg.go.dev/github.com/operator-framework/api@v0.10.1/pkg/operators/v1alpha1#ClusterServiceVersion - -apiVersion: operators.coreos.com/v1alpha1 -kind: ClusterServiceVersion -metadata: - name: '' # generate.sh - annotations: - support: crunchydata.com - olm.properties: '[]' - - # The following affect how the package is indexed at OperatorHub.io: - # https://operatorhub.io/?category=Database - # https://sdk.operatorframework.io/docs/advanced-topics/operator-capabilities/operator-capabilities/ - categories: Database - capabilities: Auto Pilot - description: Production Postgres Made Easy - - # The following appear on the details page at OperatorHub.io: - # https://operatorhub.io/operator/postgresql - createdAt: 2019-12-31 19:40Z - repository: https://github.com/CrunchyData/postgres-operator - containerImage: # kustomize config/operator - alm-examples: |- # kustomize config/examples - -spec: - # The following affect how the package is indexed at OperatorHub.io: - # https://operatorhub.io/ - displayName: Crunchy Postgres for Kubernetes - provider: - # These values become labels on the PackageManifest. - name: Crunchy Data - url: https://www.crunchydata.com/ - keywords: - - postgres - - postgresql - - database - - sql - - operator - - crunchy data - - # The following appear on the details page at OperatorHub.io: - # https://operatorhub.io/operator/postgresql - description: |- # description.md - version: '' # generate.sh - links: - - name: Crunchy Data - url: https://www.crunchydata.com/ - - name: Documentation - url: https://access.crunchydata.com/documentation/postgres-operator/v5/ - maintainers: - - name: Crunchy Data - email: info@crunchydata.com - - # https://olm.operatorframework.io/docs/best-practices/common/ - # Note: The minKubeVersion must correspond to the lowest supported OCP version - minKubeVersion: 1.23.0 - maturity: stable - # https://github.com/operator-framework/operator-lifecycle-manager/blob/v0.18.2/doc/design/how-to-update-operators.md#replaces--channels - replaces: '' # generate.sh - - # https://github.com/operator-framework/operator-lifecycle-manager/blob/v0.18.2/doc/design/building-your-csv.md#your-custom-resource-definitions - customresourcedefinitions: - # The "displayName" and "description" fields appear in the "Custom Resource Definitions" section - # on the details page at OperatorHub.io: https://operatorhub.io/operator/postgresql - # - # The "specDescriptors" and "statusDescriptors" fields appear in the OpenShift Console: - # https://github.com/openshift/console/tree/a8b35e4/frontend/packages/operator-lifecycle-manager/src/components/descriptors - owned: # operator-sdk generate kustomize manifests - - # https://olm.operatorframework.io/docs/advanced-tasks/operator-scoping-with-operatorgroups/ - installModes: - - { type: OwnNamespace, supported: true } - - { type: SingleNamespace, supported: true } - - { type: MultiNamespace, supported: false } - - { type: AllNamespaces, supported: true } - - install: - strategy: deployment - spec: - permissions: # kustomize config/operator - deployments: # kustomize config/operator diff --git a/installers/olm/bundle.relatedImages.yaml b/installers/olm/bundle.relatedImages.yaml deleted file mode 100644 index 3824b27b2e..0000000000 --- a/installers/olm/bundle.relatedImages.yaml +++ /dev/null @@ -1,25 +0,0 @@ - relatedImages: - - name: PGADMIN - image: registry.connect.redhat.com/crunchydata/crunchy-pgadmin4@sha256: - - name: PGBACKREST - image: registry.connect.redhat.com/crunchydata/crunchy-pgbackrest@sha256: - - name: PGBOUNCER - image: registry.connect.redhat.com/crunchydata/crunchy-pgbouncer@sha256: - - name: PGEXPORTER - image: registry.connect.redhat.com/crunchydata/crunchy-postgres-exporter@sha256: - - name: PGUPGRADE - image: registry.connect.redhat.com/crunchydata/crunchy-upgrade@sha256: - - name: POSTGRES_14 - image: registry.connect.redhat.com/crunchydata/crunchy-postgres@sha256: - - name: POSTGRES_15 - image: registry.connect.redhat.com/crunchydata/crunchy-postgres@sha256: - - name: POSTGRES_14_GIS_3.1 - image: registry.connect.redhat.com/crunchydata/crunchy-postgres-gis@sha256: - - name: POSTGRES_14_GIS_3.2 - image: registry.connect.redhat.com/crunchydata/crunchy-postgres-gis@sha256: - - name: POSTGRES_14_GIS_3.3 - image: registry.connect.redhat.com/crunchydata/crunchy-postgres-gis@sha256: - - name: POSTGRES_15_GIS_3.3 - image: registry.connect.redhat.com/crunchydata/crunchy-postgres-gis@sha256: - - name: postgres-operator - image: registry.connect.redhat.com/crunchydata/postgres-operator@sha256: diff --git a/installers/olm/config/community/kustomization.yaml b/installers/olm/config/community/kustomization.yaml deleted file mode 100644 index a34c7b4844..0000000000 --- a/installers/olm/config/community/kustomization.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -resources: -- ../operator -- ../examples diff --git a/installers/olm/config/examples/kustomization.yaml b/installers/olm/config/examples/kustomization.yaml deleted file mode 100644 index 420c2644f7..0000000000 --- a/installers/olm/config/examples/kustomization.yaml +++ /dev/null @@ -1,19 +0,0 @@ -# Custom resources that are imported into the ClusterServiceVersion. -# -# The first for each GVK appears in the "Custom Resource Definitions" section on -# the details page at OperatorHub.io: https://operatorhub.io/operator/postgresql -# -# The "metadata.name" fields should be unique so they can be given a description -# that is presented by compatible UIs. -# https://github.com/operator-framework/operator-lifecycle-manager/blob/v0.18.2/doc/design/building-your-csv.md#crd-templates -# -# The "image" fields should be omitted so the defaults are used. -# https://redhat-connect.gitbook.io/certified-operator-guide/troubleshooting-and-resources/offline-enabled-operators - -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -resources: -- postgrescluster.example.yaml -- pgadmin.example.yaml -- pgupgrade.example.yaml diff --git a/installers/olm/config/examples/pgupgrade.example.yaml b/installers/olm/config/examples/pgupgrade.example.yaml deleted file mode 100644 index ad4f45310a..0000000000 --- a/installers/olm/config/examples/pgupgrade.example.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PGUpgrade -metadata: - name: example-upgrade -spec: - postgresClusterName: example - fromPostgresVersion: 14 - toPostgresVersion: 15 diff --git a/installers/olm/config/operator/kustomization.yaml b/installers/olm/config/operator/kustomization.yaml deleted file mode 100644 index dfdce41618..0000000000 --- a/installers/olm/config/operator/kustomization.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -resources: -- ../../../../config/default - -patches: -- path: target-namespace.yaml diff --git a/installers/olm/config/operator/target-namespace.yaml b/installers/olm/config/operator/target-namespace.yaml deleted file mode 100644 index d7dbaadeef..0000000000 --- a/installers/olm/config/operator/target-namespace.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: pgo -spec: - template: - spec: - containers: - - name: operator - env: - # https://docs.openshift.com/container-platform/4.7/operators/understanding/olm/olm-understanding-operatorgroups.html - - name: PGO_TARGET_NAMESPACE - valueFrom: { fieldRef: { fieldPath: "metadata.annotations['olm.targetNamespaces']" } } diff --git a/installers/olm/config/redhat/kustomization.yaml b/installers/olm/config/redhat/kustomization.yaml deleted file mode 100644 index ba0fce9a49..0000000000 --- a/installers/olm/config/redhat/kustomization.yaml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -resources: -- ../operator -- ../examples - -patches: -- path: related-images.yaml diff --git a/installers/olm/config/redhat/related-images.yaml b/installers/olm/config/redhat/related-images.yaml deleted file mode 100644 index ce0309b6bd..0000000000 --- a/installers/olm/config/redhat/related-images.yaml +++ /dev/null @@ -1,29 +0,0 @@ -# Red Hat Marketplace requires that bundles work offline. OSBS will fill out -# the "spec.relatedImages" field of the ClusterServiceVersion if it is blank. -# -# https://redhat-connect.gitbook.io/certified-operator-guide/troubleshooting-and-resources/offline-enabled-operators -# https://osbs.readthedocs.io/en/latest/users.html#pinning-pullspecs-for-related-images -apiVersion: apps/v1 -kind: Deployment -metadata: - name: pgo -spec: - template: - spec: - containers: - - name: operator - image: registry.connect.redhat.com/crunchydata/postgres-operator@sha256: - env: - - { name: RELATED_IMAGE_PGADMIN, value: 'registry.connect.redhat.com/crunchydata/crunchy-pgadmin4@sha256:' } - - { name: RELATED_IMAGE_PGBACKREST, value: 'registry.connect.redhat.com/crunchydata/crunchy-pgbackrest@sha256:' } - - { name: RELATED_IMAGE_PGBOUNCER, value: 'registry.connect.redhat.com/crunchydata/crunchy-pgbouncer@sha256:' } - - { name: RELATED_IMAGE_PGEXPORTER, value: 'registry.connect.redhat.com/crunchydata/crunchy-postgres-exporter@sha256:' } - - { name: RELATED_IMAGE_PGUPGRADE, value: 'registry.connect.redhat.com/crunchydata/crunchy-upgrade@sha256:' } - - - { name: RELATED_IMAGE_POSTGRES_14, value: 'registry.connect.redhat.com/crunchydata/crunchy-postgres@sha256:' } - - { name: RELATED_IMAGE_POSTGRES_15, value: 'registry.connect.redhat.com/crunchydata/crunchy-postgres@sha256:' } - - - { name: RELATED_IMAGE_POSTGRES_14_GIS_3.1, value: 'registry.connect.redhat.com/crunchydata/crunchy-postgres-gis@sha256:' } - - { name: RELATED_IMAGE_POSTGRES_14_GIS_3.2, value: 'registry.connect.redhat.com/crunchydata/crunchy-postgres-gis@sha256:' } - - { name: RELATED_IMAGE_POSTGRES_14_GIS_3.3, value: 'registry.connect.redhat.com/crunchydata/crunchy-postgres-gis@sha256:' } - - { name: RELATED_IMAGE_POSTGRES_15_GIS_3.3, value: 'registry.connect.redhat.com/crunchydata/crunchy-postgres-gis@sha256:' } diff --git a/installers/olm/description.md b/installers/olm/description.md deleted file mode 100644 index f9fbfff771..0000000000 --- a/installers/olm/description.md +++ /dev/null @@ -1,73 +0,0 @@ -[PGO](https://github.com/CrunchyData/postgres-operator), the -[Postgres Operator](https://github.com/CrunchyData/postgres-operator) from -[Crunchy Data](https://www.crunchydata.com), gives you a **declarative Postgres** solution that -automatically manages your [PostgreSQL](https://www.postgresql.org) clusters. - -Designed for your GitOps workflows, it is [easy to get started](https://access.crunchydata.com/documentation/postgres-operator/v5/quickstart/) -with Postgres on Kubernetes with PGO. Within a few moments, you can have a production grade Postgres -cluster complete with high availability, disaster recovery, and monitoring, all over secure TLS communications. -Even better, PGO lets you easily customize your Postgres cluster to tailor it to your workload! - -With conveniences like cloning Postgres clusters to using rolling updates to roll out disruptive -changes with minimal downtime, PGO is ready to support your Postgres data at every stage of your -release pipeline. Built for resiliency and uptime, PGO will keep your desired Postgres in a desired -state so you do not need to worry about it. - -PGO is developed with many years of production experience in automating Postgres management on -Kubernetes, providing a seamless cloud native Postgres solution to keep your data always available. - -- **PostgreSQL Cluster Provisioning**: [Create, Scale, & Delete PostgreSQL clusters with ease][provisioning], - while fully customizing your Pods and PostgreSQL configuration! -- **High-Availability**: Safe, automated failover backed by a [distributed consensus based high-availability solution][high-availability]. - Uses [Pod Anti-Affinity][k8s-anti-affinity] to help resiliency; you can configure how aggressive this can be! - Failed primaries automatically heal, allowing for faster recovery time. You can even create regularly scheduled - backups as well and set your backup retention policy -- **Disaster Recovery**: [Backups][backups] and [restores][disaster-recovery] leverage the open source [pgBackRest][] utility and - [includes support for full, incremental, and differential backups as well as efficient delta restores][backups]. - Set how long you want your backups retained for. Works great with very large databases! -- **Monitoring**: [Track the health of your PostgreSQL clusters][monitoring] using the open source [pgMonitor][] library. -- **Clone**: [Create new clusters from your existing clusters or backups][clone] with efficient data cloning. -- **TLS**: All connections are over [TLS][tls]. You can also [bring your own TLS infrastructure][tls] if you do not want to use the provided defaults. -- **Connection Pooling**: Advanced [connection pooling][pool] support using [pgBouncer][]. -- **Affinity and Tolerations**: Have your PostgreSQL clusters deployed to [Kubernetes Nodes][k8s-nodes] of your preference. - Set your [pod anti-affinity][k8s-anti-affinity], node affinity, Pod tolerations and more rules to customize your deployment topology! -- **PostgreSQL Major Version Upgrades**: Perform a [PostgreSQL major version upgrade][major-version-upgrade] declaratively. -- **Database Administration**: Easily deploy [pgAdmin4][pgadmin] to administer your PostgresClusters' databases. - The automatic discovery of PostgresClusters ensures that you are able to seamlessly access any databases within your environment from the pgAdmin4 GUI. -- **Full Customizability**: Crunchy PostgreSQL for Kubernetes makes it easy to get your own PostgreSQL-as-a-Service up and running - and fully customize your deployments, including: - - Choose the resources for your Postgres cluster: [container resources and storage size][resize-cluster]. [Resize at any time][resize-cluster] with minimal disruption. - - Use your own container image repository, including support `imagePullSecrets` and private repositories - - [Customize your PostgreSQL configuration][customize-cluster] - -and much more! - -[backups]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorial/backups/ -[clone]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorial/disaster-recovery/#clone-a-postgres-cluster -[customize-cluster]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorial/customize-cluster/ -[disaster-recovery]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorial/disaster-recovery/ -[high-availability]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorial/high-availability/ -[major-version-upgrade]: https://access.crunchydata.com/documentation/postgres-operator/v5/guides/major-postgres-version-upgrade/ -[monitoring]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorial/monitoring/ -[pool]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorial/connection-pooling/ -[provisioning]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorial/create-cluster/ -[resize-cluster]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorial/resize-cluster/ -[tls]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorial/customize-cluster/#customize-tls - -[k8s-anti-affinity]: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#inter-pod-affinity-and-anti-affinity -[k8s-nodes]: https://kubernetes.io/docs/concepts/architecture/nodes/ - -[pgAdmin]: https://www.pgadmin.org/ -[pgBackRest]: https://www.pgbackrest.org -[pgBouncer]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorial/connection-pooling/ -[pgMonitor]: https://github.com/CrunchyData/pgmonitor - - -## Post-Installation - -### Tutorial - -Want to [learn more about the PostgreSQL Operator][tutorial]? Browse through the [tutorial][] to learn more about what you can do! - -[tutorial]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorial - diff --git a/installers/olm/generate.sh b/installers/olm/generate.sh deleted file mode 100755 index 8814bd4c75..0000000000 --- a/installers/olm/generate.sh +++ /dev/null @@ -1,203 +0,0 @@ -#!/usr/bin/env bash -# shellcheck disable=SC2016 -# vim: set noexpandtab : -set -eu - -DISTRIBUTION="$1" - -cd "${BASH_SOURCE[0]%/*}" - -bundle_directory="bundles/${DISTRIBUTION}" -project_directory="projects/${DISTRIBUTION}" -go_api_directory=$(cd ../../pkg/apis && pwd) - -# The 'operators.operatorframework.io.bundle.package.v1' package name for each -# bundle (updated for the 'certified' and 'marketplace' bundles). -package_name='postgresql' - -# The project name used by operator-sdk for initial bundle generation. -project_name='postgresoperator' - -# The prefix for the 'clusterserviceversion.yaml' file. -# Per OLM guidance, the filename for the clusterserviceversion.yaml must be prefixed -# with the Operator's package name for the 'redhat' and 'marketplace' bundles. -# https://github.com/redhat-openshift-ecosystem/certification-releases/blob/main/4.9/ga/troubleshooting.md#get-supported-versions -file_name='postgresoperator' -case "${DISTRIBUTION}" in - # https://redhat-connect.gitbook.io/certified-operator-guide/appendix/what-if-ive-already-published-a-community-operator - 'redhat') - file_name='crunchy-postgres-operator' - package_name='crunchy-postgres-operator' - ;; - # https://github.com/redhat-openshift-ecosystem/certification-releases/blob/main/4.9/ga/ci-pipeline.md#bundle-structure - 'marketplace') - file_name='crunchy-postgres-operator-rhmp' - package_name='crunchy-postgres-operator-rhmp' - ;; -esac - -operator_yamls=$(kubectl kustomize "config/${DISTRIBUTION}") -operator_crds=$(yq <<< "${operator_yamls}" --slurp --yaml-roundtrip 'map(select(.kind == "CustomResourceDefinition"))') -operator_deployments=$(yq <<< "${operator_yamls}" --slurp --yaml-roundtrip 'map(select(.kind == "Deployment"))') -operator_accounts=$(yq <<< "${operator_yamls}" --slurp --yaml-roundtrip 'map(select(.kind == "ServiceAccount"))') -operator_roles=$(yq <<< "${operator_yamls}" --slurp --yaml-roundtrip 'map(select(.kind == "ClusterRole"))') - -# Recreate the Operator SDK project. -[ ! -d "${project_directory}" ] || rm -r "${project_directory}" -install -d "${project_directory}" -( - cd "${project_directory}" - operator-sdk init --fetch-deps='false' --project-name=${project_name} - rm ./*.go go.* - - # Generate CRD descriptions from Go markers. - # https://sdk.operatorframework.io/docs/building-operators/golang/references/markers/ - crd_gvks=$(yq <<< "${operator_crds}" 'map({ - group: .spec.group, kind: .spec.names.kind, version: .spec.versions[].name - })') - yq --in-place --yaml-roundtrip --argjson resources "${crd_gvks}" \ - '.multigroup = true | .resources = $resources | .' ./PROJECT - - ln -s "${go_api_directory}" . - operator-sdk generate kustomize manifests --interactive='false' -) - -# Recreate the OLM bundle. -[ ! -d "${bundle_directory}" ] || rm -r "${bundle_directory}" -install -d \ - "${bundle_directory}/manifests" \ - "${bundle_directory}/metadata" \ - "${bundle_directory}/tests/scorecard" \ - -# `echo "${operator_yamls}" | operator-sdk generate bundle` includes the ServiceAccount which cannot -# be upgraded: https://github.com/operator-framework/operator-lifecycle-manager/issues/2193 - -# Include Operator SDK scorecard tests. -# https://sdk.operatorframework.io/docs/advanced-topics/scorecard/scorecard/ -kubectl kustomize "${project_directory}/config/scorecard" \ - > "${bundle_directory}/tests/scorecard/config.yaml" - -# Render bundle annotations and strip comments. -# Per Red Hat we should not include the org.opencontainers annotations in the -# 'redhat' & 'marketplace' annotations.yaml file, so only add them for 'community'. -# - https://coreos.slack.com/team/UP1LZCC1Y -if [ ${DISTRIBUTION} == 'community' ]; then -yq --yaml-roundtrip < bundle.annotations.yaml > "${bundle_directory}/metadata/annotations.yaml" \ - --arg package "${package_name}" \ -' - .annotations["operators.operatorframework.io.bundle.package.v1"] = $package | - .annotations["org.opencontainers.image.authors"] = "info@crunchydata.com" | - .annotations["org.opencontainers.image.url"] = "https://crunchydata.com" | - .annotations["org.opencontainers.image.vendor"] = "Crunchy Data" | -.' -else -yq --yaml-roundtrip < bundle.annotations.yaml > "${bundle_directory}/metadata/annotations.yaml" \ - --arg package "${package_name}" \ -' - .annotations["operators.operatorframework.io.bundle.package.v1"] = $package | -.' -fi - -# Copy annotations into Dockerfile LABELs. -labels=$(yq --raw-output < "${bundle_directory}/metadata/annotations.yaml" \ - '.annotations | to_entries | map(.key +"="+ (.value | tojson)) | join(" \\\n\t")') -ANNOTATIONS="${labels}" envsubst '$ANNOTATIONS' < bundle.Dockerfile > "${bundle_directory}/Dockerfile" - -# Include CRDs as manifests. -crd_names=$(yq --raw-output <<< "${operator_crds}" 'to_entries[] | [.key, .value.metadata.name] | @tsv') -while IFS=$'\t' read -r index name; do - yq --yaml-roundtrip <<< "${operator_crds}" ".[${index}]" > "${bundle_directory}/manifests/${name}.crd.yaml" -done <<< "${crd_names}" - - -abort() { echo >&2 "$@"; exit 1; } -dump() { yq --color-output; } - -yq > /dev/null <<< "${operator_deployments}" --exit-status 'length == 1' || - abort "too many deployments!" $'\n'"$(dump <<< "${operator_deployments}")" - -yq > /dev/null <<< "${operator_accounts}" --exit-status 'length == 1' || - abort "too many service accounts!" $'\n'"$(dump <<< "${operator_accounts}")" - -yq > /dev/null <<< "${operator_roles}" --exit-status 'length == 1' || - abort "too many roles!" $'\n'"$(dump <<< "${operator_roles}")" - -# Render bundle CSV and strip comments. - -csv_stem=$(yq --raw-output '.projectName' "${project_directory}/PROJECT") - -crd_descriptions=$(yq '.spec.customresourcedefinitions.owned' \ -"${project_directory}/config/manifests/bases/${csv_stem}.clusterserviceversion.yaml") - -crd_gvks=$(yq <<< "${operator_crds}" 'map({ - group: .spec.group, kind: .spec.names.kind, version: .spec.versions[].name -} | { - apiVersion: "\(.group)/\(.version)", kind -})') -crd_examples=$(yq <<< "${operator_yamls}" --slurp --argjson gvks "${crd_gvks}" 'map(select( - IN({ apiVersion, kind }; $gvks | .[]) -))') - -yq --yaml-roundtrip < bundle.csv.yaml > "${bundle_directory}/manifests/${file_name}.clusterserviceversion.yaml" \ - --argjson deployment "$(yq <<< "${operator_deployments}" 'first')" \ - --argjson account "$(yq <<< "${operator_accounts}" 'first | .metadata.name')" \ - --argjson rules "$(yq <<< "${operator_roles}" 'first | .rules')" \ - --argjson crds "${crd_descriptions}" \ - --arg examples "${crd_examples}" \ - --arg version "${PGO_VERSION}" \ - --arg replaces "${REPLACES_VERSION}" \ - --arg description "$(< description.md)" \ - --arg icon "$(base64 ../seal.svg | tr -d '\n')" \ - --arg stem "${csv_stem}" \ -' - .metadata.annotations["alm-examples"] = $examples | - .metadata.annotations["containerImage"] = ($deployment.spec.template.spec.containers[0].image) | - - .metadata.name = "\($stem).v\($version)" | - .spec.version = $version | - .spec.replaces = "\($stem).v\($replaces)" | - - .spec.customresourcedefinitions.owned = $crds | - .spec.description = $description | - .spec.icon = [{ mediatype: "image/svg+xml", base64data: $icon }] | - - .spec.install.spec.permissions = [{ serviceAccountName: $account, rules: $rules }] | - .spec.install.spec.deployments = [( $deployment | { name: .metadata.name, spec } )] | -.' - -case "${DISTRIBUTION}" in - 'redhat') - # https://redhat-connect.gitbook.io/certified-operator-guide/appendix/what-if-ive-already-published-a-community-operator - yq --in-place --yaml-roundtrip \ - ' - .metadata.annotations.certified = "true" | - .metadata.annotations["containerImage"] = "registry.connect.redhat.com/crunchydata/postgres-operator@sha256:" | - .metadata.annotations["containerImage"] = "registry.connect.redhat.com/crunchydata/postgres-operator@sha256:" | - .' \ - "${bundle_directory}/manifests/${file_name}.clusterserviceversion.yaml" - - # Finally, add related images. NOTE: SHA values will need to be updated - # -https://github.com/redhat-openshift-ecosystem/certification-releases/blob/main/4.9/ga/troubleshooting.md#digest-pinning - cat bundle.relatedImages.yaml >> "${bundle_directory}/manifests/${file_name}.clusterserviceversion.yaml" - ;; - 'marketplace') - # Annotations needed when targeting Red Hat Marketplace - # https://github.com/redhat-openshift-ecosystem/certification-releases/blob/main/4.9/ga/ci-pipeline.md#bundle-structure - yq --in-place --yaml-roundtrip \ - --arg package_url "https://marketplace.redhat.com/en-us/operators/${file_name}" \ - ' - .metadata.annotations["containerImage"] = "registry.connect.redhat.com/crunchydata/postgres-operator@sha256:" | - .metadata.annotations["marketplace.openshift.io/remote-workflow"] = - "\($package_url)/pricing?utm_source=openshift_console" | - .metadata.annotations["marketplace.openshift.io/support-workflow"] = - "\($package_url)/support?utm_source=openshift_console" | - .' \ - "${bundle_directory}/manifests/${file_name}.clusterserviceversion.yaml" - - # Finally, add related images. NOTE: SHA values will need to be updated - # -https://github.com/redhat-openshift-ecosystem/certification-releases/blob/main/4.9/ga/troubleshooting.md#digest-pinning - cat bundle.relatedImages.yaml >> "${bundle_directory}/manifests/${file_name}.clusterserviceversion.yaml" - ;; -esac - -if > /dev/null command -v tree; then tree -C "${bundle_directory}"; fi diff --git a/installers/olm/install.sh b/installers/olm/install.sh deleted file mode 100755 index 2c4f6ce190..0000000000 --- a/installers/olm/install.sh +++ /dev/null @@ -1,144 +0,0 @@ -#!/usr/bin/env bash -# vim: set noexpandtab : -set -eu - -if command -v oc >/dev/null; then - kubectl() { oc "$@"; } - kubectl version -else - kubectl version --short -fi - -catalog_source() ( - source_namespace="$1" - source_name="$2" - index_image="$3" - - kc() { kubectl --namespace="$source_namespace" "$@"; } - kc get namespace "$source_namespace" --output=jsonpath='{""}' 2>/dev/null || - kc create namespace "$source_namespace" - - # See https://godoc.org/github.com/operator-framework/api/pkg/operators/v1alpha1#CatalogSource - source_json=$(jq --null-input \ - --arg name "${source_name}" \ - --arg image "${index_image}" \ - '{ - apiVersion: "operators.coreos.com/v1alpha1", kind: "CatalogSource", - metadata: { name: $name }, - spec: { - displayName: "Test Registry", - sourceType: "grpc", image: $image - } - }') - kc create --filename=- <<< "$source_json" - - # Wait for Pod to exist and be healthy. - for _ in $(seq 10); do - [ '[]' != "$( kc get pod --selector="olm.catalogSource=${source_name}" --output=jsonpath='{.items}' )" ] && - break || sleep 1s - done - if ! kc wait --for='condition=ready' --timeout='30s' pod --selector="olm.catalogSource=${source_name}"; then - kc logs --previous --tail='-1' --selector="olm.catalogSource=${source_name}" - fi -) - -operator_group() ( - group_namespace="$1" - group_name="$2" - target_namespaces=("${@:3}") - - kc() { kubectl --namespace="$group_namespace" "$@"; } - kc get namespace "$group_namespace" --output=jsonpath='{""}' 2>/dev/null || - kc create namespace "$group_namespace" - - group_json="$( jq <<< '{}' --arg name "$group_name" '{ - apiVersion: "operators.coreos.com/v1", kind: "OperatorGroup", - metadata: { "name": $name }, - spec: { targetNamespaces: [] } - }' )" - - for ns in "${target_namespaces[@]}"; do - group_json="$( jq <<< "$group_json" --arg namespace "$ns" '.spec.targetNamespaces += [ $namespace ]' )" - done - - kc create --filename=- <<< "$group_json" -) - -operator() ( - bundle_directory="$1" index_image="$2" - operator_namespace="$3" - target_namespaces=("${@:4}") - - package_name=$(yq \ - --raw-output '.annotations["operators.operatorframework.io.bundle.package.v1"]' \ - "${bundle_directory}"/*/annotations.yaml) - channel_name=$(yq \ - --raw-output '.annotations["operators.operatorframework.io.bundle.channels.v1"]' \ - "${bundle_directory}"/*/annotations.yaml) - csv_name=$(yq --raw-output '.metadata.name' \ - "${bundle_directory}"/*/*.clusterserviceversion.yaml) - - kc() { kubectl --namespace="$operator_namespace" "$@"; } - - catalog_source "$operator_namespace" olm-catalog-source "${index_image}" - operator_group "$operator_namespace" olm-operator-group "${target_namespaces[@]}" - - # Create a Subscription to install the operator. - # See https://godoc.org/github.com/operator-framework/api/pkg/operators/v1alpha1#Subscription - subscription_json=$(jq --null-input \ - --arg channel "$channel_name" \ - --arg namespace "$operator_namespace" \ - --arg package "$package_name" \ - --arg version "$csv_name" \ - '{ - apiVersion: "operators.coreos.com/v1alpha1", kind: "Subscription", - metadata: { name: $package }, - spec: { - name: $package, - sourceNamespace: $namespace, - source: "olm-catalog-source", - startingCSV: $version, - channel: $channel - } - }') - kc create --filename=- <<< "$subscription_json" - - # Wait for the InstallPlan to exist and be healthy. - for _ in $(seq 10); do - [ '[]' != "$( kc get installplan --output=jsonpath="{.items}" )" ] && - break || sleep 1s - done - if ! kc wait --for='condition=installed' --timeout='30s' installplan --all; then - subscription_uid="$( kc get subscription "$package_name" --output=jsonpath='{.metadata.uid}' )" - installplan_json="$( kc get installplan --output=json )" - - jq <<< "$installplan_json" --arg uid "$subscription_uid" \ - '.items[] | select(.metadata.ownerReferences[] | select(.uid == $uid)).status.conditions' - exit 1 - fi - - # Wait for Deployment to exist and be healthy. - for _ in $(seq 10); do - [ '[]' != "$( kc get deploy --selector="olm.owner=$csv_name" --output=jsonpath='{.items}' )" ] && - break || sleep 1s - done - if ! kc wait --for='condition=available' --timeout='30s' deploy --selector="olm.owner=$csv_name"; then - kc describe pod --selector="olm.owner=$csv_name" - - crashed_containers="$( kc get pod --selector="olm.owner=$csv_name" --output=json )" - crashed_containers="$( jq <<< "$crashed_containers" --raw-output \ - '.items[] | { - pod: .metadata.name, - container: .status.containerStatuses[] | select(.restartCount > 0).name - } | [.pod, .container] | @tsv' )" - - test -z "$crashed_containers" || while IFS=$'\t' read -r pod container; do - echo; echo "$pod/$container" restarted: - kc logs --container="$container" --previous --tail='-1' "pod/$pod" - done <<< "$crashed_containers" - - exit 1 - fi -) - -"$@" diff --git a/installers/olm/validate-directory.sh b/installers/olm/validate-directory.sh deleted file mode 100755 index 726f64946e..0000000000 --- a/installers/olm/validate-directory.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/usr/bin/env bash -# vim: set noexpandtab : -set -eu - -if command -v oc > /dev/null; then - kubectl() { oc "$@"; } - kubectl version -else - kubectl version --short -fi - -push_trap_exit() { - local -a array - eval "array=($(trap -p EXIT))" - # shellcheck disable=SC2064 - trap "$1;${array[2]-}" EXIT -} - -validate_bundle_directory() { - local directory="$1" - local namespace - - namespace=$(kubectl create --filename=- --output='go-template={{.metadata.name}}' <<< '{ - "apiVersion": "v1", "kind": "Namespace", - "metadata": { - "generateName": "olm-test-", - "labels": { "olm-test": "bundle-directory" } - } - }') - echo 'namespace "'"${namespace}"'" created' - push_trap_exit "kubectl delete namespace '${namespace}'" - - # https://olm.operatorframework.io/docs/best-practices/common/ - # https://sdk.operatorframework.io/docs/advanced-topics/scorecard/scorecard/ - operator-sdk scorecard --namespace="${namespace}" "${directory}" -} - -validate_bundle_directory "$@" diff --git a/installers/olm/validate-image.sh b/installers/olm/validate-image.sh deleted file mode 100755 index 9d9adef6cf..0000000000 --- a/installers/olm/validate-image.sh +++ /dev/null @@ -1,91 +0,0 @@ -#!/usr/bin/env bash -# vim: set noexpandtab : -set -eu - -push_trap_exit() { - local -a array - eval "array=($(trap -p EXIT))" - # shellcheck disable=SC2064 - trap "$1;${array[2]-}" EXIT -} - -# Store anything in a single temporary directory that gets cleaned up. -TMPDIR=$(mktemp -d) -push_trap_exit "rm -rf '${TMPDIR}'" -export TMPDIR - -validate_bundle_image() { - local container="$1" directory="$2" - directory=$(cd "${directory}" && pwd) - - cat > "${TMPDIR}/registry.config" <<-SSL - [req] - distinguished_name = req_distinguished_name - x509_extensions = v3_ext - prompt = no - [req_distinguished_name] - commonName = localhost - [v3_ext] - subjectAltName = @alt_names - [alt_names] - DNS.1 = localhost - SSL - - openssl ecparam -name prime256v1 -genkey -out "${TMPDIR}/registry.key" - openssl req -new -x509 -days 1 \ - -config "${TMPDIR}/registry.config" \ - -key "${TMPDIR}/registry.key" \ - -out "${TMPDIR}/registry.crt" - - # Start a local image registry. - local image port registry - registry=$(${container} run --detach --publish-all \ - --env='REGISTRY_HTTP_TLS_CERTIFICATE=/mnt/registry.crt' \ - --env='REGISTRY_HTTP_TLS_KEY=/mnt/registry.key' \ - --volume="${TMPDIR}:/mnt" \ - docker.io/library/registry:latest) - # https://github.com/containers/podman/issues/8524 - push_trap_exit "echo -n 'Removing '; ${container} rm '${registry}'" - push_trap_exit "echo -n 'Stopping '; ${container} stop '${registry}'" - - port=$(${container} inspect "${registry}" \ - --format='{{ (index .NetworkSettings.Ports "5000/tcp" 0).HostPort }}') - image="localhost:${port}/postgres-operator-bundle:latest" - - cat > "${TMPDIR}/registries.conf" <<-TOML - [[registry]] - location = "localhost:${port}" - insecure = true - TOML - - # Build the bundle image and push it to the local registry. - ${container} run --rm \ - --device='/dev/fuse:rw' --network='host' --security-opt='seccomp=unconfined' \ - --volume="${TMPDIR}/registries.conf:/etc/containers/registries.conf.d/localhost.conf:ro" \ - --volume="${directory}:/mnt:delegated" \ - --workdir='/mnt' \ - quay.io/buildah/stable:latest \ - buildah build-using-dockerfile \ - --format='docker' --layers --tag="docker://${image}" - - local -a opm - local opm_version - opm_version=$(opm version) - opm_version=$(sed -n 's#.*OpmVersion:"\([^"]*\)".*#\1# p' <<< "${opm_version}") - # shellcheck disable=SC2206 - opm=(${container} run --rm - --network='host' - --volume="${TMPDIR}/registry.crt:/usr/local/share/ca-certificates/registry.crt:ro" - --volume="${TMPDIR}:/mnt:delegated" - --workdir='/mnt' - quay.io/operator-framework/upstream-opm-builder:"${opm_version}" - sh -ceu 'update-ca-certificates && exec "$@"' - opm) - - # Validate the bundle image in the local registry. - # https://olm.operatorframework.io/docs/tasks/creating-operator-bundle/#validating-your-bundle - "${opm[@]}" alpha bundle validate --image-builder='none' \ - --optional-validators='operatorhub,bundle-objects' \ - --tag="${image}" -} - -validate_bundle_image "$@" diff --git a/installers/seal.svg b/installers/seal.svg deleted file mode 100644 index 28e875f48f..0000000000 --- a/installers/seal.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/internal/bridge/client.go b/internal/bridge/client.go index 01d006dea9..d5ad8470f7 100644 --- a/internal/bridge/client.go +++ b/internal/bridge/client.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package bridge @@ -27,14 +16,30 @@ import ( "strconv" "time" + "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/wait" + + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) const defaultAPI = "https://api.crunchybridge.com" var errAuthentication = errors.New("authentication failed") +type ClientInterface interface { + ListClusters(ctx context.Context, apiKey, teamId string) ([]*ClusterApiResource, error) + CreateCluster(ctx context.Context, apiKey string, clusterRequestPayload *PostClustersRequestPayload) (*ClusterApiResource, error) + DeleteCluster(ctx context.Context, apiKey, id string) (*ClusterApiResource, bool, error) + GetCluster(ctx context.Context, apiKey, id string) (*ClusterApiResource, error) + GetClusterStatus(ctx context.Context, apiKey, id string) (*ClusterStatusApiResource, error) + GetClusterUpgrade(ctx context.Context, apiKey, id string) (*ClusterUpgradeApiResource, error) + UpgradeCluster(ctx context.Context, apiKey, id string, clusterRequestPayload *PostClustersUpgradeRequestPayload) (*ClusterUpgradeApiResource, error) + UpgradeClusterHA(ctx context.Context, apiKey, id, action string) (*ClusterUpgradeApiResource, error) + UpdateCluster(ctx context.Context, apiKey, id string, clusterRequestPayload *PatchClustersRequestPayload) (*ClusterApiResource, error) + GetClusterRole(ctx context.Context, apiKey, clusterId, roleName string) (*ClusterRoleApiResource, error) +} + type Client struct { http.Client wait.Backoff @@ -43,6 +48,173 @@ type Client struct { Version string } +// BRIDGE API RESPONSE OBJECTS + +// ClusterApiResource is used to hold cluster information received in Bridge API response. +type ClusterApiResource struct { + ID string `json:"id,omitempty"` + ClusterGroup *ClusterGroupApiResource `json:"cluster_group,omitempty"` + PrimaryClusterID string `json:"cluster_id,omitempty"` + CPU int64 `json:"cpu,omitempty"` + CreatedAt string `json:"created_at,omitempty"` + DiskUsage *ClusterDiskUsageApiResource `json:"disk_usage,omitempty"` + Environment string `json:"environment,omitempty"` + Host string `json:"host,omitempty"` + IsHA *bool `json:"is_ha,omitempty"` + IsProtected *bool `json:"is_protected,omitempty"` + IsSuspended *bool `json:"is_suspended,omitempty"` + Keychain string `json:"keychain_id,omitempty"` + MaintenanceWindowStart int64 `json:"maintenance_window_start,omitempty"` + MajorVersion int `json:"major_version,omitempty"` + Memory float64 `json:"memory,omitempty"` + ClusterName string `json:"name,omitempty"` + Network string `json:"network_id,omitempty"` + Parent string `json:"parent_id,omitempty"` + Plan string `json:"plan_id,omitempty"` + PostgresVersion intstr.IntOrString `json:"postgres_version_id,omitempty"` + Provider string `json:"provider_id,omitempty"` + Region string `json:"region_id,omitempty"` + Replicas []*ClusterApiResource `json:"replicas,omitempty"` + Storage int64 `json:"storage,omitempty"` + Tailscale *bool `json:"tailscale_active,omitempty"` + Team string `json:"team_id,omitempty"` + LastUpdate string `json:"updated_at,omitempty"` + ResponsePayload v1beta1.SchemalessObject `json:""` +} + +func (c *ClusterApiResource) AddDataToClusterStatus(cluster *v1beta1.CrunchyBridgeCluster) { + cluster.Status.ClusterName = c.ClusterName + cluster.Status.Host = c.Host + cluster.Status.ID = c.ID + cluster.Status.IsHA = c.IsHA + cluster.Status.IsProtected = c.IsProtected + cluster.Status.MajorVersion = c.MajorVersion + cluster.Status.Plan = c.Plan + cluster.Status.Storage = FromGibibytes(c.Storage) + cluster.Status.Responses.Cluster = c.ResponsePayload +} + +type ClusterList struct { + Clusters []*ClusterApiResource `json:"clusters"` +} + +// ClusterDiskUsageApiResource hold information on disk usage for a particular cluster. +type ClusterDiskUsageApiResource struct { + DiskAvailableMB int64 `json:"disk_available_mb,omitempty"` + DiskTotalSizeMB int64 `json:"disk_total_size_mb,omitempty"` + DiskUsedMB int64 `json:"disk_used_mb,omitempty"` +} + +// ClusterGroupApiResource holds information on a ClusterGroup +type ClusterGroupApiResource struct { + ID string `json:"id,omitempty"` + Clusters []*ClusterApiResource `json:"clusters,omitempty"` + Kind string `json:"kind,omitempty"` + Name string `json:"name,omitempty"` + Network string `json:"network_id,omitempty"` + Provider string `json:"provider_id,omitempty"` + Region string `json:"region_id,omitempty"` + Team string `json:"team_id,omitempty"` +} + +type ClusterStatusApiResource struct { + DiskUsage *ClusterDiskUsageApiResource `json:"disk_usage,omitempty"` + OldestBackup string `json:"oldest_backup_at,omitempty"` + OngoingUpgrade *ClusterUpgradeApiResource `json:"ongoing_upgrade,omitempty"` + State string `json:"state,omitempty"` + ResponsePayload v1beta1.SchemalessObject `json:""` +} + +func (c *ClusterStatusApiResource) AddDataToClusterStatus(cluster *v1beta1.CrunchyBridgeCluster) { + cluster.Status.State = c.State + cluster.Status.Responses.Status = c.ResponsePayload +} + +type ClusterUpgradeApiResource struct { + ClusterID string `json:"cluster_id,omitempty"` + Operations []*v1beta1.UpgradeOperation `json:"operations,omitempty"` + Team string `json:"team_id,omitempty"` + ResponsePayload v1beta1.SchemalessObject `json:""` +} + +func (c *ClusterUpgradeApiResource) AddDataToClusterStatus(cluster *v1beta1.CrunchyBridgeCluster) { + cluster.Status.OngoingUpgrade = c.Operations + cluster.Status.Responses.Upgrade = c.ResponsePayload +} + +type ClusterUpgradeOperationApiResource struct { + Flavor string `json:"flavor,omitempty"` + StartingFrom string `json:"starting_from,omitempty"` + State string `json:"state,omitempty"` +} + +// ClusterRoleApiResource is used for retrieving details on ClusterRole from the Bridge API +type ClusterRoleApiResource struct { + AccountEmail string `json:"account_email"` + AccountId string `json:"account_id"` + ClusterId string `json:"cluster_id"` + Flavor string `json:"flavor"` + Name string `json:"name"` + Password string `json:"password"` + Team string `json:"team_id"` + URI string `json:"uri"` +} + +// ClusterRoleList holds a slice of ClusterRoleApiResource +type ClusterRoleList struct { + Roles []*ClusterRoleApiResource `json:"roles"` +} + +// BRIDGE API REQUEST PAYLOADS + +// PatchClustersRequestPayload is used for updating various properties of an existing cluster. +type PatchClustersRequestPayload struct { + ClusterGroup string `json:"cluster_group_id,omitempty"` + // DashboardSettings *ClusterDashboardSettings `json:"dashboard_settings,omitempty"` + // TODO (dsessler7): Find docs for DashboardSettings and create appropriate struct + Environment string `json:"environment,omitempty"` + IsProtected *bool `json:"is_protected,omitempty"` + MaintenanceWindowStart int64 `json:"maintenance_window_start,omitempty"` + Name string `json:"name,omitempty"` +} + +// PostClustersRequestPayload is used for creating a new cluster. +type PostClustersRequestPayload struct { + Name string `json:"name"` + Plan string `json:"plan_id"` + Team string `json:"team_id"` + ClusterGroup string `json:"cluster_group_id,omitempty"` + Environment string `json:"environment,omitempty"` + IsHA bool `json:"is_ha,omitempty"` + Keychain string `json:"keychain_id,omitempty"` + Network string `json:"network_id,omitempty"` + PostgresVersion intstr.IntOrString `json:"postgres_version_id,omitempty"` + Provider string `json:"provider_id,omitempty"` + Region string `json:"region_id,omitempty"` + Storage int64 `json:"storage,omitempty"` +} + +// PostClustersUpgradeRequestPayload is used for creating a new cluster upgrade which may include +// changing its plan, upgrading its major version, or increasing its storage size. +type PostClustersUpgradeRequestPayload struct { + Plan string `json:"plan_id,omitempty"` + PostgresVersion intstr.IntOrString `json:"postgres_version_id,omitempty"` + UpgradeStartTime string `json:"starting_from,omitempty"` + Storage int64 `json:"storage,omitempty"` +} + +// PutClustersUpgradeRequestPayload is used for updating an ongoing or scheduled upgrade. +// TODO: Implement the ability to update an upgrade (this isn't currently being used) +type PutClustersUpgradeRequestPayload struct { + Plan string `json:"plan_id,omitempty"` + PostgresVersion intstr.IntOrString `json:"postgres_version_id,omitempty"` + UpgradeStartTime string `json:"starting_from,omitempty"` + Storage int64 `json:"storage,omitempty"` + UseMaintenanceWindow *bool `json:"use_cluster_maintenance_window,omitempty"` +} + +// BRIDGE CLIENT FUNCTIONS AND METHODS + // NewClient creates a Client with backoff settings that amount to // ~10 attempts over ~2 minutes. A default is used when apiURL is not // an acceptable URL. @@ -75,7 +247,7 @@ func NewClient(apiURL, version string) *Client { // Be sure to close the [http.Response] Body when the returned error is nil. // See [http.Client.Do] for more details. func (c *Client) doWithBackoff( - ctx context.Context, method, path string, body []byte, headers http.Header, + ctx context.Context, method, path string, params url.Values, body []byte, headers http.Header, ) ( *http.Response, error, ) { @@ -94,11 +266,15 @@ func (c *Client) doWithBackoff( } headers.Set("User-Agent", "PGO/"+c.Version) - url := c.BaseURL.JoinPath(path).String() + url := c.BaseURL.JoinPath(path) + if params != nil { + url.RawQuery = params.Encode() + } + urlString := url.String() err := wait.ExponentialBackoff(c.Backoff, func() (bool, error) { // NOTE: The [net/http] package treats an empty [bytes.Reader] the same as nil. - request, err := http.NewRequestWithContext(ctx, method, url, bytes.NewReader(body)) + request, err := http.NewRequestWithContext(ctx, method, urlString, bytes.NewReader(body)) if err == nil { request.Header = headers.Clone() @@ -142,11 +318,11 @@ func (c *Client) doWithBackoff( // Be sure to close the [http.Response] Body when the returned error is nil. // See [http.Client.Do] for more details. func (c *Client) doWithRetry( - ctx context.Context, method, path string, body []byte, headers http.Header, + ctx context.Context, method, path string, params url.Values, body []byte, headers http.Header, ) ( *http.Response, error, ) { - response, err := c.doWithBackoff(ctx, method, path, body, headers) + response, err := c.doWithBackoff(ctx, method, path, params, body, headers) // Retry the request when the server responds with "Too many requests". // - https://docs.crunchybridge.com/api-concepts/getting-started/#status-codes @@ -170,7 +346,7 @@ func (c *Client) doWithRetry( select { case <-timer.C: // Try the request again. Check it in the loop condition. - response, err = c.doWithBackoff(ctx, method, path, body, headers) + response, err = c.doWithBackoff(ctx, method, path, params, body, headers) timer.Stop() case <-ctx.Done(): @@ -185,7 +361,7 @@ func (c *Client) doWithRetry( func (c *Client) CreateAuthObject(ctx context.Context, authn AuthObject) (AuthObject, error) { var result AuthObject - response, err := c.doWithRetry(ctx, "POST", "/vendor/operator/auth-objects", nil, http.Header{ + response, err := c.doWithRetry(ctx, "POST", "/vendor/operator/auth-objects", nil, nil, http.Header{ "Accept": []string{"application/json"}, "Authorization": []string{"Bearer " + authn.Secret}, }) @@ -217,7 +393,7 @@ func (c *Client) CreateAuthObject(ctx context.Context, authn AuthObject) (AuthOb func (c *Client) CreateInstallation(ctx context.Context) (Installation, error) { var result Installation - response, err := c.doWithRetry(ctx, "POST", "/vendor/operator/installations", nil, http.Header{ + response, err := c.doWithRetry(ctx, "POST", "/vendor/operator/installations", nil, nil, http.Header{ "Accept": []string{"application/json"}, }) @@ -240,3 +416,404 @@ func (c *Client) CreateInstallation(ctx context.Context) (Installation, error) { return result, err } + +// CRUNCHYBRIDGECLUSTER CRUD METHODS + +// ListClusters makes a GET request to the "/clusters" endpoint to retrieve a list of all clusters +// in Bridge that are owned by the team specified by the provided team id. +func (c *Client) ListClusters(ctx context.Context, apiKey, teamId string) ([]*ClusterApiResource, error) { + result := &ClusterList{} + + params := url.Values{} + if len(teamId) > 0 { + params.Add("team_id", teamId) + } + response, err := c.doWithRetry(ctx, "GET", "/clusters", params, nil, http.Header{ + "Accept": []string{"application/json"}, + "Authorization": []string{"Bearer " + apiKey}, + }) + + if err == nil { + defer response.Body.Close() + body, _ := io.ReadAll(response.Body) + + switch { + // 2xx, Successful + case response.StatusCode >= 200 && response.StatusCode < 300: + if err = json.Unmarshal(body, &result); err != nil { + err = fmt.Errorf("%w: %s", err, body) + } + + default: + //nolint:goerr113 // This is intentionally dynamic. + err = fmt.Errorf("%v: %s", response.Status, body) + } + } + + return result.Clusters, err +} + +// CreateCluster makes a POST request to the "/clusters" endpoint thereby creating a cluster +// in Bridge with the settings specified in the request payload. +func (c *Client) CreateCluster( + ctx context.Context, apiKey string, clusterRequestPayload *PostClustersRequestPayload, +) (*ClusterApiResource, error) { + result := &ClusterApiResource{} + + clusterbyte, err := json.Marshal(clusterRequestPayload) + if err != nil { + return result, err + } + + response, err := c.doWithRetry(ctx, "POST", "/clusters", nil, clusterbyte, http.Header{ + "Accept": []string{"application/json"}, + "Authorization": []string{"Bearer " + apiKey}, + }) + + if err == nil { + defer response.Body.Close() + body, _ := io.ReadAll(response.Body) + + switch { + // 2xx, Successful + case response.StatusCode >= 200 && response.StatusCode < 300: + if err = json.Unmarshal(body, &result); err != nil { + err = fmt.Errorf("%w: %s", err, body) + return result, err + } + if err = json.Unmarshal(body, &result.ResponsePayload); err != nil { + err = fmt.Errorf("%w: %s", err, body) + } + + default: + //nolint:goerr113 // This is intentionally dynamic. + err = fmt.Errorf("%v: %s", response.Status, body) + } + } + + return result, err +} + +// DeleteCluster calls the delete endpoint, returning +// +// the cluster, +// whether the cluster is deleted already, +// and an error. +func (c *Client) DeleteCluster(ctx context.Context, apiKey, id string) (*ClusterApiResource, bool, error) { + result := &ClusterApiResource{} + var deletedAlready bool + + response, err := c.doWithRetry(ctx, "DELETE", "/clusters/"+id, nil, nil, http.Header{ + "Accept": []string{"application/json"}, + "Authorization": []string{"Bearer " + apiKey}, + }) + + if err == nil { + defer response.Body.Close() + body, _ := io.ReadAll(response.Body) + + switch { + // 2xx, Successful + case response.StatusCode >= 200 && response.StatusCode < 300: + if err = json.Unmarshal(body, &result); err != nil { + err = fmt.Errorf("%w: %s", err, body) + } + + // Already deleted + // Bridge API returns 410 Gone for previously deleted clusters + // --https://docs.crunchybridge.com/api-concepts/idempotency#delete-semantics + // But also, if we can't find it... + // Maybe if no ID we return already deleted? + case response.StatusCode == 410: + fallthrough + case response.StatusCode == 404: + deletedAlready = true + err = nil + + default: + //nolint:goerr113 // This is intentionally dynamic. + err = fmt.Errorf("%v: %s", response.Status, body) + } + } + + return result, deletedAlready, err +} + +// GetCluster makes a GET request to the "/clusters/" endpoint, thereby retrieving details +// for a given cluster in Bridge specified by the provided cluster id. +func (c *Client) GetCluster(ctx context.Context, apiKey, id string) (*ClusterApiResource, error) { + result := &ClusterApiResource{} + + response, err := c.doWithRetry(ctx, "GET", "/clusters/"+id, nil, nil, http.Header{ + "Accept": []string{"application/json"}, + "Authorization": []string{"Bearer " + apiKey}, + }) + + if err == nil { + defer response.Body.Close() + body, _ := io.ReadAll(response.Body) + + switch { + // 2xx, Successful + case response.StatusCode >= 200 && response.StatusCode < 300: + if err = json.Unmarshal(body, &result); err != nil { + err = fmt.Errorf("%w: %s", err, body) + return result, err + } + if err = json.Unmarshal(body, &result.ResponsePayload); err != nil { + err = fmt.Errorf("%w: %s", err, body) + } + + default: + //nolint:goerr113 // This is intentionally dynamic. + err = fmt.Errorf("%v: %s", response.Status, body) + } + } + + return result, err +} + +// GetClusterStatus makes a GET request to the "/clusters//status" endpoint, thereby retrieving details +// for a given cluster's status in Bridge, specified by the provided cluster id. +func (c *Client) GetClusterStatus(ctx context.Context, apiKey, id string) (*ClusterStatusApiResource, error) { + result := &ClusterStatusApiResource{} + + response, err := c.doWithRetry(ctx, "GET", "/clusters/"+id+"/status", nil, nil, http.Header{ + "Accept": []string{"application/json"}, + "Authorization": []string{"Bearer " + apiKey}, + }) + + if err == nil { + defer response.Body.Close() + body, _ := io.ReadAll(response.Body) + + switch { + // 2xx, Successful + case response.StatusCode >= 200 && response.StatusCode < 300: + if err = json.Unmarshal(body, &result); err != nil { + err = fmt.Errorf("%w: %s", err, body) + return result, err + } + if err = json.Unmarshal(body, &result.ResponsePayload); err != nil { + err = fmt.Errorf("%w: %s", err, body) + } + + default: + //nolint:goerr113 // This is intentionally dynamic. + err = fmt.Errorf("%v: %s", response.Status, body) + } + } + + return result, err +} + +// GetClusterUpgrade makes a GET request to the "/clusters//upgrade" endpoint, thereby retrieving details +// for a given cluster's upgrade status in Bridge, specified by the provided cluster id. +func (c *Client) GetClusterUpgrade(ctx context.Context, apiKey, id string) (*ClusterUpgradeApiResource, error) { + result := &ClusterUpgradeApiResource{} + + response, err := c.doWithRetry(ctx, "GET", "/clusters/"+id+"/upgrade", nil, nil, http.Header{ + "Accept": []string{"application/json"}, + "Authorization": []string{"Bearer " + apiKey}, + }) + + if err == nil { + defer response.Body.Close() + body, _ := io.ReadAll(response.Body) + + switch { + // 2xx, Successful + case response.StatusCode >= 200 && response.StatusCode < 300: + if err = json.Unmarshal(body, &result); err != nil { + err = fmt.Errorf("%w: %s", err, body) + return result, err + } + if err = json.Unmarshal(body, &result.ResponsePayload); err != nil { + err = fmt.Errorf("%w: %s", err, body) + } + + default: + //nolint:goerr113 // This is intentionally dynamic. + err = fmt.Errorf("%v: %s", response.Status, body) + } + } + + return result, err +} + +// UpgradeCluster makes a POST request to the "/clusters//upgrade" endpoint, thereby attempting +// to upgrade certain settings for a given cluster in Bridge. +func (c *Client) UpgradeCluster( + ctx context.Context, apiKey, id string, clusterRequestPayload *PostClustersUpgradeRequestPayload, +) (*ClusterUpgradeApiResource, error) { + result := &ClusterUpgradeApiResource{} + + clusterbyte, err := json.Marshal(clusterRequestPayload) + if err != nil { + return result, err + } + + response, err := c.doWithRetry(ctx, "POST", "/clusters/"+id+"/upgrade", nil, clusterbyte, http.Header{ + "Accept": []string{"application/json"}, + "Authorization": []string{"Bearer " + apiKey}, + }) + + if err == nil { + defer response.Body.Close() + body, _ := io.ReadAll(response.Body) + + switch { + // 2xx, Successful + case response.StatusCode >= 200 && response.StatusCode < 300: + if err = json.Unmarshal(body, &result); err != nil { + err = fmt.Errorf("%w: %s", err, body) + return result, err + } + if err = json.Unmarshal(body, &result.ResponsePayload); err != nil { + err = fmt.Errorf("%w: %s", err, body) + } + + default: + //nolint:goerr113 // This is intentionally dynamic. + err = fmt.Errorf("%v: %s", response.Status, body) + } + } + + return result, err +} + +// UpgradeClusterHA makes a PUT request to the "/clusters//actions/" endpoint, +// where is either "enable-ha" or "disable-ha", thereby attempting to change the +// HA setting for a given cluster in Bridge. +func (c *Client) UpgradeClusterHA(ctx context.Context, apiKey, id, action string) (*ClusterUpgradeApiResource, error) { + result := &ClusterUpgradeApiResource{} + + response, err := c.doWithRetry(ctx, "PUT", "/clusters/"+id+"/actions/"+action, nil, nil, http.Header{ + "Accept": []string{"application/json"}, + "Authorization": []string{"Bearer " + apiKey}, + }) + + if err == nil { + defer response.Body.Close() + body, _ := io.ReadAll(response.Body) + + switch { + // 2xx, Successful + case response.StatusCode >= 200 && response.StatusCode < 300: + if err = json.Unmarshal(body, &result); err != nil { + err = fmt.Errorf("%w: %s", err, body) + return result, err + } + if err = json.Unmarshal(body, &result.ResponsePayload); err != nil { + err = fmt.Errorf("%w: %s", err, body) + } + + default: + //nolint:goerr113 // This is intentionally dynamic. + err = fmt.Errorf("%v: %s", response.Status, body) + } + } + + return result, err +} + +// UpdateCluster makes a PATCH request to the "/clusters/" endpoint, thereby attempting to +// update certain settings for a given cluster in Bridge. +func (c *Client) UpdateCluster( + ctx context.Context, apiKey, id string, clusterRequestPayload *PatchClustersRequestPayload, +) (*ClusterApiResource, error) { + result := &ClusterApiResource{} + + clusterbyte, err := json.Marshal(clusterRequestPayload) + if err != nil { + return result, err + } + + response, err := c.doWithRetry(ctx, "PATCH", "/clusters/"+id, nil, clusterbyte, http.Header{ + "Accept": []string{"application/json"}, + "Authorization": []string{"Bearer " + apiKey}, + }) + + if err == nil { + defer response.Body.Close() + body, _ := io.ReadAll(response.Body) + + switch { + // 2xx, Successful + case response.StatusCode >= 200 && response.StatusCode < 300: + if err = json.Unmarshal(body, &result); err != nil { + err = fmt.Errorf("%w: %s", err, body) + return result, err + } + if err = json.Unmarshal(body, &result.ResponsePayload); err != nil { + err = fmt.Errorf("%w: %s", err, body) + } + + default: + //nolint:goerr113 // This is intentionally dynamic. + err = fmt.Errorf("%v: %s", response.Status, body) + } + } + + return result, err +} + +// GetClusterRole sends a GET request to the "/clusters//roles/" endpoint, thereby retrieving +// Role information for a specific role from a specific cluster in Bridge. +func (c *Client) GetClusterRole(ctx context.Context, apiKey, clusterId, roleName string) (*ClusterRoleApiResource, error) { + result := &ClusterRoleApiResource{} + + response, err := c.doWithRetry(ctx, "GET", "/clusters/"+clusterId+"/roles/"+roleName, nil, nil, http.Header{ + "Accept": []string{"application/json"}, + "Authorization": []string{"Bearer " + apiKey}, + }) + + if err == nil { + defer response.Body.Close() + body, _ := io.ReadAll(response.Body) + + switch { + // 2xx, Successful + case response.StatusCode >= 200 && response.StatusCode < 300: + if err = json.Unmarshal(body, &result); err != nil { + err = fmt.Errorf("%w: %s", err, body) + } + + default: + //nolint:goerr113 // This is intentionally dynamic. + err = fmt.Errorf("%v: %s", response.Status, body) + } + } + + return result, err +} + +// ListClusterRoles sends a GET request to the "/clusters//roles" endpoint thereby retrieving +// a list of all cluster roles for a specific cluster in Bridge. +func (c *Client) ListClusterRoles(ctx context.Context, apiKey, id string) ([]*ClusterRoleApiResource, error) { + result := ClusterRoleList{} + + response, err := c.doWithRetry(ctx, "GET", "/clusters/"+id+"/roles", nil, nil, http.Header{ + "Accept": []string{"application/json"}, + "Authorization": []string{"Bearer " + apiKey}, + }) + + if err == nil { + defer response.Body.Close() + body, _ := io.ReadAll(response.Body) + + switch { + // 2xx, Successful + case response.StatusCode >= 200 && response.StatusCode < 300: + if err = json.Unmarshal(body, &result); err != nil { + err = fmt.Errorf("%w: %s", err, body) + } + + default: + //nolint:goerr113 // This is intentionally dynamic. + err = fmt.Errorf("%v: %s", response.Status, body) + } + } + + return result.Roles, err +} diff --git a/internal/bridge/client_test.go b/internal/bridge/client_test.go index 729cf521d3..28728c701c 100644 --- a/internal/bridge/client_test.go +++ b/internal/bridge/client_test.go @@ -1,33 +1,30 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package bridge import ( "context" + "encoding/json" "io" "net/http" "net/http/httptest" + "net/url" "testing" "time" gocmp "github.com/google/go-cmp/cmp" gocmpopts "github.com/google/go-cmp/cmp/cmpopts" "gotest.tools/v3/assert" + "k8s.io/apimachinery/pkg/util/intstr" + + "github.com/crunchydata/postgres-operator/internal/initialize" ) +var testApiKey = "9012" +var testTeamId = "5678" + // TestClientBackoff logs the backoff timing chosen by [NewClient] for use // with `go test -v`. func TestClientBackoff(t *testing.T) { @@ -75,8 +72,10 @@ func TestClientDoWithBackoff(t *testing.T) { assert.Equal(t, client.BaseURL.String(), server.URL) ctx := context.Background() + params := url.Values{} + params.Add("foo", "bar") response, err := client.doWithBackoff(ctx, - "ANY", "/some/path", []byte(`the-body`), + "ANY", "/some/path", params, []byte(`the-body`), http.Header{"Some": []string{"header"}}) assert.NilError(t, err) @@ -87,7 +86,7 @@ func TestClientDoWithBackoff(t *testing.T) { assert.Equal(t, len(requests), 1) assert.Equal(t, bodies[0], "the-body") assert.Equal(t, requests[0].Method, "ANY") - assert.Equal(t, requests[0].URL.String(), "/some/path") + assert.Equal(t, requests[0].URL.String(), "/some/path?foo=bar") assert.DeepEqual(t, requests[0].Header.Values("Some"), []string{"header"}) assert.DeepEqual(t, requests[0].Header.Values("User-Agent"), []string{"PGO/xyz"}) @@ -120,7 +119,7 @@ func TestClientDoWithBackoff(t *testing.T) { ctx := context.Background() response, err := client.doWithBackoff(ctx, - "POST", "/anything", []byte(`any-body`), + "POST", "/anything", nil, []byte(`any-body`), http.Header{"Any": []string{"thing"}}) assert.NilError(t, err) @@ -147,7 +146,7 @@ func TestClientDoWithBackoff(t *testing.T) { // Another, identical request gets a new Idempotency-Key. response, err = client.doWithBackoff(ctx, - "POST", "/anything", []byte(`any-body`), + "POST", "/anything", nil, []byte(`any-body`), http.Header{"Any": []string{"thing"}}) assert.NilError(t, err) @@ -176,7 +175,7 @@ func TestClientDoWithBackoff(t *testing.T) { assert.Equal(t, client.BaseURL.String(), server.URL) ctx := context.Background() - _, err := client.doWithBackoff(ctx, "POST", "/any", nil, nil) //nolint:bodyclose + _, err := client.doWithBackoff(ctx, "POST", "/any", nil, nil, nil) //nolint:bodyclose assert.ErrorContains(t, err, "timed out waiting") assert.Assert(t, requests > 0, "expected multiple requests") }) @@ -198,7 +197,7 @@ func TestClientDoWithBackoff(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) t.Cleanup(cancel) - _, err := client.doWithBackoff(ctx, "POST", "/any", nil, nil) //nolint:bodyclose + _, err := client.doWithBackoff(ctx, "POST", "/any", nil, nil, nil) //nolint:bodyclose assert.ErrorIs(t, err, context.DeadlineExceeded) assert.Assert(t, requests > 0, "expected multiple requests") }) @@ -222,8 +221,10 @@ func TestClientDoWithRetry(t *testing.T) { assert.Equal(t, client.BaseURL.String(), server.URL) ctx := context.Background() + params := url.Values{} + params.Add("foo", "bar") response, err := client.doWithRetry(ctx, - "ANY", "/some/path", []byte(`the-body`), + "ANY", "/some/path", params, []byte(`the-body`), http.Header{"Some": []string{"header"}}) assert.NilError(t, err) @@ -234,7 +235,7 @@ func TestClientDoWithRetry(t *testing.T) { assert.Equal(t, len(requests), 1) assert.Equal(t, bodies[0], "the-body") assert.Equal(t, requests[0].Method, "ANY") - assert.Equal(t, requests[0].URL.String(), "/some/path") + assert.Equal(t, requests[0].URL.String(), "/some/path?foo=bar") assert.DeepEqual(t, requests[0].Header.Values("Some"), []string{"header"}) assert.DeepEqual(t, requests[0].Header.Values("User-Agent"), []string{"PGO/xyz"}) @@ -267,7 +268,7 @@ func TestClientDoWithRetry(t *testing.T) { ctx := context.Background() response, err := client.doWithRetry(ctx, - "POST", "/anything", []byte(`any-body`), + "POST", "/anything", nil, []byte(`any-body`), http.Header{"Any": []string{"thing"}}) assert.NilError(t, err) @@ -299,7 +300,7 @@ func TestClientDoWithRetry(t *testing.T) { assert.Assert(t, requests[1].Header.Get("Idempotency-Key") != prior, "expected a new idempotency key") - // Requests are delayed according the the server's response. + // Requests are delayed according the server's response. // TODO: Mock the clock for faster tests. assert.Assert(t, times[0].Add(time.Second).Before(times[1]), "expected the second request over 1sec after the first") @@ -321,7 +322,7 @@ func TestClientDoWithRetry(t *testing.T) { t.Cleanup(cancel) start := time.Now() - _, err := client.doWithRetry(ctx, "POST", "/any", nil, nil) //nolint:bodyclose + _, err := client.doWithRetry(ctx, "POST", "/any", nil, nil, nil) //nolint:bodyclose assert.ErrorIs(t, err, context.DeadlineExceeded) assert.Assert(t, time.Since(start) < time.Second) assert.Equal(t, requests, 1, "expected one request") @@ -392,7 +393,7 @@ func TestClientDoWithRetry(t *testing.T) { assert.Equal(t, client.BaseURL.String(), server.URL) ctx := context.Background() - response, err := client.doWithRetry(ctx, "POST", "/any", nil, nil) + response, err := client.doWithRetry(ctx, "POST", "/any", nil, nil, nil) assert.NilError(t, err) assert.Assert(t, response != nil) t.Cleanup(func() { _ = response.Body.Close() }) @@ -531,3 +532,824 @@ func TestClientCreateInstallation(t *testing.T) { assert.ErrorContains(t, err, "asdf") }) } + +func TestListClusters(t *testing.T) { + responsePayload := &ClusterList{ + Clusters: []*ClusterApiResource{}, + } + firstClusterApiResource := &ClusterApiResource{ + ID: "1234", + } + secondClusterApiResource := &ClusterApiResource{ + ID: "2345", + } + + t.Run("WeSendCorrectData", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(responsePayload) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, r.Method, "GET", "Expected GET method") + assert.Equal(t, r.URL.Path, "/clusters", "Expected path to be '/clusters'") + assert.Equal(t, r.Header.Get("Authorization"), "Bearer "+testApiKey, "Expected Authorization header to contain api key.") + assert.Equal(t, r.URL.Query()["team_id"][0], testTeamId, "Expected query params to contain team id.") + + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err = client.ListClusters(context.Background(), testApiKey, testTeamId) + assert.NilError(t, err) + }) + + t.Run("OkResponseNoClusters", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(responsePayload) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + clusters, err := client.ListClusters(context.Background(), testApiKey, testTeamId) + assert.NilError(t, err) + assert.Equal(t, len(clusters), 0) + }) + + t.Run("OkResponseOneCluster", func(t *testing.T) { + responsePayload.Clusters = append(responsePayload.Clusters, firstClusterApiResource) + responsePayloadJson, err := json.Marshal(responsePayload) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + clusters, err := client.ListClusters(context.Background(), testApiKey, testTeamId) + assert.NilError(t, err) + assert.Equal(t, len(clusters), 1) + assert.Equal(t, clusters[0].ID, responsePayload.Clusters[0].ID) + }) + + t.Run("OkResponseTwoClusters", func(t *testing.T) { + responsePayload.Clusters = append(responsePayload.Clusters, secondClusterApiResource) + responsePayloadJson, err := json.Marshal(responsePayload) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + clusters, err := client.ListClusters(context.Background(), testApiKey, testTeamId) + assert.NilError(t, err) + assert.Equal(t, len(clusters), 2) + }) + + t.Run("ErrorResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(responsePayload) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusBadRequest) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err = client.ListClusters(context.Background(), testApiKey, testTeamId) + assert.Check(t, err != nil) + assert.ErrorContains(t, err, "400 Bad Request") + }) +} + +func TestCreateCluster(t *testing.T) { + clusterApiResource := &ClusterApiResource{ + ClusterName: "test-cluster1", + } + clusterRequestPayload := &PostClustersRequestPayload{ + Name: "test-cluster1", + } + + t.Run("WeSendCorrectData", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var receivedPayload PostClustersRequestPayload + dec := json.NewDecoder(r.Body) + err = dec.Decode(&receivedPayload) + assert.NilError(t, err) + assert.Equal(t, r.Method, "POST", "Expected POST method") + assert.Equal(t, r.URL.Path, "/clusters", "Expected path to be '/clusters'") + assert.Equal(t, r.Header.Get("Authorization"), "Bearer "+testApiKey, "Expected Authorization header to contain api key.") + assert.Equal(t, receivedPayload, *clusterRequestPayload) + + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err = client.CreateCluster(context.Background(), testApiKey, clusterRequestPayload) + assert.NilError(t, err) + }) + + t.Run("OkResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + newCluster, err := client.CreateCluster(context.Background(), testApiKey, clusterRequestPayload) + assert.NilError(t, err) + assert.Equal(t, newCluster.ClusterName, clusterApiResource.ClusterName) + }) + + t.Run("ErrorResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusBadRequest) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err = client.CreateCluster(context.Background(), testApiKey, clusterRequestPayload) + assert.Check(t, err != nil) + assert.ErrorContains(t, err, "400 Bad Request") + }) +} + +func TestDeleteCluster(t *testing.T) { + clusterId := "1234" + clusterApiResource := &ClusterApiResource{ + ClusterName: "test-cluster1", + } + + t.Run("WeSendCorrectData", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, r.Method, "DELETE", "Expected DELETE method") + assert.Equal(t, r.URL.Path, "/clusters/"+clusterId, "Expected path to be /clusters/"+clusterId) + assert.Equal(t, r.Header.Get("Authorization"), "Bearer "+testApiKey, "Expected Authorization header to contain api key.") + + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, _, err = client.DeleteCluster(context.Background(), testApiKey, clusterId) + assert.NilError(t, err) + }) + + t.Run("OkResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + deletedCluster, deletedAlready, err := client.DeleteCluster(context.Background(), testApiKey, clusterId) + assert.NilError(t, err) + assert.Equal(t, deletedCluster.ClusterName, clusterApiResource.ClusterName) + assert.Equal(t, deletedAlready, false) + }) + + t.Run("GoneResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusGone) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, deletedAlready, err := client.DeleteCluster(context.Background(), testApiKey, clusterId) + assert.NilError(t, err) + assert.Equal(t, deletedAlready, true) + }) + + t.Run("NotFoundResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, deletedAlready, err := client.DeleteCluster(context.Background(), testApiKey, clusterId) + assert.NilError(t, err) + assert.Equal(t, deletedAlready, true) + }) + + t.Run("ErrorResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusBadRequest) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, _, err = client.DeleteCluster(context.Background(), testApiKey, clusterId) + assert.Check(t, err != nil) + assert.ErrorContains(t, err, "400 Bad Request") + }) +} + +func TestGetCluster(t *testing.T) { + clusterId := "1234" + clusterApiResource := &ClusterApiResource{ + ClusterName: "test-cluster1", + } + + t.Run("WeSendCorrectData", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, r.Method, "GET", "Expected GET method") + assert.Equal(t, r.URL.Path, "/clusters/"+clusterId, "Expected path to be /clusters/"+clusterId) + assert.Equal(t, r.Header.Get("Authorization"), "Bearer "+testApiKey, "Expected Authorization header to contain api key.") + + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err = client.GetCluster(context.Background(), testApiKey, clusterId) + assert.NilError(t, err) + }) + + t.Run("OkResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + cluster, err := client.GetCluster(context.Background(), testApiKey, clusterId) + assert.NilError(t, err) + assert.Equal(t, cluster.ClusterName, clusterApiResource.ClusterName) + }) + + t.Run("ErrorResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusBadRequest) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err = client.GetCluster(context.Background(), testApiKey, clusterId) + assert.Check(t, err != nil) + assert.ErrorContains(t, err, "400 Bad Request") + }) +} + +func TestGetClusterStatus(t *testing.T) { + clusterId := "1234" + state := "Ready" + + clusterStatusApiResource := &ClusterStatusApiResource{ + State: state, + } + + t.Run("WeSendCorrectData", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterStatusApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, r.Method, "GET", "Expected GET method") + assert.Equal(t, r.URL.Path, "/clusters/"+clusterId+"/status", "Expected path to be /clusters/"+clusterId+"/status") + assert.Equal(t, r.Header.Get("Authorization"), "Bearer "+testApiKey, "Expected Authorization header to contain api key.") + + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err = client.GetClusterStatus(context.Background(), testApiKey, clusterId) + assert.NilError(t, err) + }) + + t.Run("OkResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterStatusApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + clusterStatus, err := client.GetClusterStatus(context.Background(), testApiKey, clusterId) + assert.NilError(t, err) + assert.Equal(t, clusterStatus.State, state) + }) + + t.Run("ErrorResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterStatusApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusBadRequest) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err = client.GetClusterStatus(context.Background(), testApiKey, clusterId) + assert.Check(t, err != nil) + assert.ErrorContains(t, err, "400 Bad Request") + }) +} + +func TestGetClusterUpgrade(t *testing.T) { + clusterId := "1234" + clusterUpgradeApiResource := &ClusterUpgradeApiResource{ + ClusterID: clusterId, + } + + t.Run("WeSendCorrectData", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterUpgradeApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, r.Method, "GET", "Expected GET method") + assert.Equal(t, r.URL.Path, "/clusters/"+clusterId+"/upgrade", "Expected path to be /clusters/"+clusterId+"/upgrade") + assert.Equal(t, r.Header.Get("Authorization"), "Bearer "+testApiKey, "Expected Authorization header to contain api key.") + + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err = client.GetClusterUpgrade(context.Background(), testApiKey, clusterId) + assert.NilError(t, err) + }) + + t.Run("OkResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterUpgradeApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + clusterUpgrade, err := client.GetClusterUpgrade(context.Background(), testApiKey, clusterId) + assert.NilError(t, err) + assert.Equal(t, clusterUpgrade.ClusterID, clusterId) + }) + + t.Run("ErrorResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterUpgradeApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusBadRequest) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err = client.GetClusterUpgrade(context.Background(), testApiKey, clusterId) + assert.Check(t, err != nil) + assert.ErrorContains(t, err, "400 Bad Request") + }) +} + +func TestUpgradeCluster(t *testing.T) { + clusterId := "1234" + clusterUpgradeApiResource := &ClusterUpgradeApiResource{ + ClusterID: clusterId, + } + clusterUpgradeRequestPayload := &PostClustersUpgradeRequestPayload{ + Plan: "standard-8", + PostgresVersion: intstr.FromInt(15), + UpgradeStartTime: "start-time", + Storage: 10, + } + + t.Run("WeSendCorrectData", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterUpgradeApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var receivedPayload PostClustersUpgradeRequestPayload + dec := json.NewDecoder(r.Body) + err = dec.Decode(&receivedPayload) + assert.NilError(t, err) + assert.Equal(t, r.Method, "POST", "Expected POST method") + assert.Equal(t, r.URL.Path, "/clusters/"+clusterId+"/upgrade", "Expected path to be /clusters/"+clusterId+"/upgrade") + assert.Equal(t, r.Header.Get("Authorization"), "Bearer "+testApiKey, "Expected Authorization header to contain api key.") + assert.Equal(t, receivedPayload, *clusterUpgradeRequestPayload) + + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err = client.UpgradeCluster(context.Background(), testApiKey, clusterId, clusterUpgradeRequestPayload) + assert.NilError(t, err) + }) + + t.Run("OkResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterUpgradeApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + clusterUpgrade, err := client.UpgradeCluster(context.Background(), testApiKey, clusterId, clusterUpgradeRequestPayload) + assert.NilError(t, err) + assert.Equal(t, clusterUpgrade.ClusterID, clusterId) + }) + + t.Run("ErrorResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterUpgradeApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusBadRequest) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err = client.UpgradeCluster(context.Background(), testApiKey, clusterId, clusterUpgradeRequestPayload) + assert.Check(t, err != nil) + assert.ErrorContains(t, err, "400 Bad Request") + }) +} + +func TestUpgradeClusterHA(t *testing.T) { + clusterId := "1234" + action := "enable-ha" + clusterUpgradeApiResource := &ClusterUpgradeApiResource{ + ClusterID: clusterId, + } + + t.Run("WeSendCorrectData", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterUpgradeApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, r.Method, "PUT", "Expected PUT method") + assert.Equal(t, r.URL.Path, "/clusters/"+clusterId+"/actions/"+action, + "Expected path to be /clusters/"+clusterId+"/actions/"+action) + assert.Equal(t, r.Header.Get("Authorization"), "Bearer "+testApiKey, "Expected Authorization header to contain api key.") + + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err = client.UpgradeClusterHA(context.Background(), testApiKey, clusterId, action) + assert.NilError(t, err) + }) + + t.Run("OkResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterUpgradeApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + clusterUpgrade, err := client.UpgradeClusterHA(context.Background(), testApiKey, clusterId, action) + assert.NilError(t, err) + assert.Equal(t, clusterUpgrade.ClusterID, clusterId) + }) + + t.Run("ErrorResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterUpgradeApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusBadRequest) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err = client.UpgradeClusterHA(context.Background(), testApiKey, clusterId, action) + assert.Check(t, err != nil) + assert.ErrorContains(t, err, "400 Bad Request") + }) +} + +func TestUpdateCluster(t *testing.T) { + clusterId := "1234" + clusterApiResource := &ClusterApiResource{ + ClusterName: "new-cluster-name", + } + clusterUpdateRequestPayload := &PatchClustersRequestPayload{ + IsProtected: initialize.Bool(true), + } + + t.Run("WeSendCorrectData", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var receivedPayload PatchClustersRequestPayload + dec := json.NewDecoder(r.Body) + err = dec.Decode(&receivedPayload) + assert.NilError(t, err) + assert.Equal(t, r.Method, "PATCH", "Expected PATCH method") + assert.Equal(t, r.URL.Path, "/clusters/"+clusterId, "Expected path to be /clusters/"+clusterId) + assert.Equal(t, r.Header.Get("Authorization"), "Bearer "+testApiKey, "Expected Authorization header to contain api key.") + assert.Equal(t, *receivedPayload.IsProtected, *clusterUpdateRequestPayload.IsProtected) + + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err = client.UpdateCluster(context.Background(), testApiKey, clusterId, clusterUpdateRequestPayload) + assert.NilError(t, err) + }) + + t.Run("OkResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + clusterUpdate, err := client.UpdateCluster(context.Background(), testApiKey, clusterId, clusterUpdateRequestPayload) + assert.NilError(t, err) + assert.Equal(t, clusterUpdate.ClusterName, clusterApiResource.ClusterName) + }) + + t.Run("ErrorResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusBadRequest) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err = client.UpdateCluster(context.Background(), testApiKey, clusterId, clusterUpdateRequestPayload) + assert.Check(t, err != nil) + assert.ErrorContains(t, err, "400 Bad Request") + }) +} + +func TestGetClusterRole(t *testing.T) { + clusterId := "1234" + roleName := "application" + clusterRoleApiResource := &ClusterRoleApiResource{ + Name: roleName, + } + + t.Run("WeSendCorrectData", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterRoleApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, r.Method, "GET", "Expected GET method") + assert.Equal(t, r.URL.Path, "/clusters/"+clusterId+"/roles/"+roleName, + "Expected path to be /clusters/"+clusterId+"/roles/"+roleName) + assert.Equal(t, r.Header.Get("Authorization"), "Bearer "+testApiKey, "Expected Authorization header to contain api key.") + + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err = client.GetClusterRole(context.Background(), testApiKey, clusterId, roleName) + assert.NilError(t, err) + }) + + t.Run("OkResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterRoleApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + clusterRole, err := client.GetClusterRole(context.Background(), testApiKey, clusterId, roleName) + assert.NilError(t, err) + assert.Equal(t, clusterRole.Name, roleName) + }) + + t.Run("ErrorResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterRoleApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusBadRequest) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err = client.GetClusterRole(context.Background(), testApiKey, clusterId, roleName) + assert.Check(t, err != nil) + assert.ErrorContains(t, err, "400 Bad Request") + }) +} + +func TestListClusterRoles(t *testing.T) { + clusterId := "1234" + responsePayload := &ClusterRoleList{ + Roles: []*ClusterRoleApiResource{}, + } + applicationClusterRoleApiResource := &ClusterRoleApiResource{} + postgresClusterRoleApiResource := &ClusterRoleApiResource{} + + t.Run("WeSendCorrectData", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(responsePayload) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, r.Method, "GET", "Expected GET method") + assert.Equal(t, r.URL.Path, "/clusters/"+clusterId+"/roles", "Expected path to be '/clusters/%s/roles'") + assert.Equal(t, r.Header.Get("Authorization"), "Bearer "+testApiKey, "Expected Authorization header to contain api key.") + + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err = client.ListClusterRoles(context.Background(), testApiKey, clusterId) + assert.NilError(t, err) + }) + + t.Run("OkResponse", func(t *testing.T) { + responsePayload.Roles = append(responsePayload.Roles, applicationClusterRoleApiResource, postgresClusterRoleApiResource) + responsePayloadJson, err := json.Marshal(responsePayload) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + clusterRoles, err := client.ListClusterRoles(context.Background(), testApiKey, clusterId) + assert.NilError(t, err) + assert.Equal(t, len(clusterRoles), 2) + }) + + t.Run("ErrorResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(responsePayload) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusBadRequest) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err = client.ListClusterRoles(context.Background(), testApiKey, clusterId) + assert.Check(t, err != nil) + assert.ErrorContains(t, err, "400 Bad Request") + }) +} diff --git a/internal/bridge/crunchybridgecluster/apply.go b/internal/bridge/crunchybridgecluster/apply.go new file mode 100644 index 0000000000..d77d719d6a --- /dev/null +++ b/internal/bridge/crunchybridgecluster/apply.go @@ -0,0 +1,47 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package crunchybridgecluster + +import ( + "context" + "reflect" + + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// patch sends patch to object's endpoint in the Kubernetes API and updates +// object with any returned content. The fieldManager is set to r.Owner, but +// can be overridden in options. +// - https://docs.k8s.io/reference/using-api/server-side-apply/#managers +// +// NOTE: This function is duplicated from a version in the postgrescluster package +func (r *CrunchyBridgeClusterReconciler) patch( + ctx context.Context, object client.Object, + patch client.Patch, options ...client.PatchOption, +) error { + options = append([]client.PatchOption{r.Owner}, options...) + return r.Client.Patch(ctx, object, patch, options...) +} + +// apply sends an apply patch to object's endpoint in the Kubernetes API and +// updates object with any returned content. The fieldManager is set to +// r.Owner and the force parameter is true. +// - https://docs.k8s.io/reference/using-api/server-side-apply/#managers +// - https://docs.k8s.io/reference/using-api/server-side-apply/#conflicts +// +// NOTE: This function is duplicated from a version in the postgrescluster package +func (r *CrunchyBridgeClusterReconciler) apply(ctx context.Context, object client.Object) error { + // Generate an apply-patch by comparing the object to its zero value. + zero := reflect.New(reflect.TypeOf(object).Elem()).Interface() + data, err := client.MergeFrom(zero.(client.Object)).Data(object) + apply := client.RawPatch(client.Apply.Type(), data) + + // Send the apply-patch with force=true. + if err == nil { + err = r.patch(ctx, object, apply, client.ForceOwnership) + } + + return err +} diff --git a/internal/bridge/crunchybridgecluster/crunchybridgecluster_controller.go b/internal/bridge/crunchybridgecluster/crunchybridgecluster_controller.go new file mode 100644 index 0000000000..03d67442be --- /dev/null +++ b/internal/bridge/crunchybridgecluster/crunchybridgecluster_controller.go @@ -0,0 +1,701 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package crunchybridgecluster + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/event" + + "github.com/crunchydata/postgres-operator/internal/bridge" + "github.com/crunchydata/postgres-operator/internal/controller/runtime" + pgoRuntime "github.com/crunchydata/postgres-operator/internal/controller/runtime" + "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +// CrunchyBridgeClusterReconciler reconciles a CrunchyBridgeCluster object +type CrunchyBridgeClusterReconciler struct { + client.Client + + Owner client.FieldOwner + + // For this iteration, we will only be setting conditions rather than + // setting conditions and emitting events. That may change in the future, + // so we're leaving this EventRecorder here for now. + // record.EventRecorder + + // NewClient is called each time a new Client is needed. + NewClient func() bridge.ClientInterface +} + +//+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="crunchybridgeclusters",verbs={list,watch} +//+kubebuilder:rbac:groups="",resources="secrets",verbs={list,watch} + +// SetupWithManager sets up the controller with the Manager. +func (r *CrunchyBridgeClusterReconciler) SetupWithManager( + mgr ctrl.Manager, +) error { + return ctrl.NewControllerManagedBy(mgr). + For(&v1beta1.CrunchyBridgeCluster{}). + Owns(&corev1.Secret{}). + // Wake periodically to check Bridge API for all CrunchyBridgeClusters. + // Potentially replace with different requeue times, remove the Watch function + // Smarter: retry after a certain time for each cluster: https://gist.github.com/cbandy/a5a604e3026630c5b08cfbcdfffd2a13 + WatchesRawSource( + pgoRuntime.NewTickerImmediate(5*time.Minute, event.GenericEvent{}, r.Watch()), + ). + // Watch secrets and filter for secrets mentioned by CrunchyBridgeClusters + Watches( + &corev1.Secret{}, + r.watchForRelatedSecret(), + ). + Complete(r) +} + +// The owner reference created by controllerutil.SetControllerReference blocks +// deletion. The OwnerReferencesPermissionEnforcement plugin requires that the +// creator of such a reference have either "delete" permission on the owner or +// "update" permission on the owner's "finalizers" subresource. +// - https://docs.k8s.io/reference/access-authn-authz/admission-controllers/ +// +kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="crunchybridgeclusters/finalizers",verbs={update} + +// setControllerReference sets owner as a Controller OwnerReference on controlled. +// Only one OwnerReference can be a controller, so it returns an error if another +// is already set. +func (r *CrunchyBridgeClusterReconciler) setControllerReference( + owner *v1beta1.CrunchyBridgeCluster, controlled client.Object, +) error { + return controllerutil.SetControllerReference(owner, controlled, r.Client.Scheme()) +} + +//+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="crunchybridgeclusters",verbs={get,patch,update} +//+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="crunchybridgeclusters/status",verbs={patch,update} +//+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="crunchybridgeclusters/finalizers",verbs={patch,update} +//+kubebuilder:rbac:groups="",resources="secrets",verbs={get} + +// Reconcile does the work to move the current state of the world toward the +// desired state described in a [v1beta1.CrunchyBridgeCluster] identified by req. +func (r *CrunchyBridgeClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + log := ctrl.LoggerFrom(ctx) + + // Retrieve the crunchybridgecluster from the client cache, if it exists. A deferred + // function below will send any changes to its Status field. + // + // NOTE: No DeepCopy is necessary here because controller-runtime makes a + // copy before returning from its cache. + // - https://github.com/kubernetes-sigs/controller-runtime/issues/1235 + crunchybridgecluster := &v1beta1.CrunchyBridgeCluster{} + err := r.Get(ctx, req.NamespacedName, crunchybridgecluster) + + if err == nil { + // Write any changes to the crunchybridgecluster status on the way out. + before := crunchybridgecluster.DeepCopy() + defer func() { + if !equality.Semantic.DeepEqual(before.Status, crunchybridgecluster.Status) { + status := r.Status().Patch(ctx, crunchybridgecluster, client.MergeFrom(before), r.Owner) + + if err == nil && status != nil { + err = status + } else if status != nil { + log.Error(status, "Patching CrunchyBridgeCluster status") + } + } + }() + } else { + // NotFound cannot be fixed by requeuing so ignore it. During background + // deletion, we receive delete events from crunchybridgecluster's dependents after + // crunchybridgecluster is deleted. + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + // Get and validate connection secret for requests + key, team, err := r.reconcileBridgeConnectionSecret(ctx, crunchybridgecluster) + if err != nil { + log.Error(err, "issue reconciling bridge connection secret") + + // Don't automatically requeue Secret issues. We are watching for + // related secrets, so will requeue when a related secret is touched. + // lint:ignore nilerr Return err as status, no requeue needed + return ctrl.Result{}, nil + } + + // Check for and handle deletion of cluster. Return early if it is being + // deleted or there was an error. Make sure finalizer is added if cluster + // is not being deleted. + if result, err := r.handleDelete(ctx, crunchybridgecluster, key); err != nil { + log.Error(err, "deleting") + return ctrl.Result{}, err + } else if result != nil { + if log := log.V(1); log.Enabled() { + log.Info("deleting", "result", fmt.Sprintf("%+v", *result)) + } + return *result, err + } + + // Wonder if there's a better way to handle adding/checking/removing statuses + // We did something in the upgrade controller + // Exit early if we can't create from this K8s object + // unless this K8s object has been changed (compare ObservedGeneration) + invalid := meta.FindStatusCondition(crunchybridgecluster.Status.Conditions, + v1beta1.ConditionReady) + if invalid != nil && + invalid.Status == metav1.ConditionFalse && + invalid.Reason == "ClusterInvalid" && + invalid.ObservedGeneration == crunchybridgecluster.GetGeneration() { + return ctrl.Result{}, nil + } + + // check for an upgrade error and return until observedGeneration has + // been incremented. + invalidUpgrade := meta.FindStatusCondition(crunchybridgecluster.Status.Conditions, + v1beta1.ConditionUpgrading) + if invalidUpgrade != nil && + invalidUpgrade.Status == metav1.ConditionFalse && + invalidUpgrade.Reason == "UpgradeError" && + invalidUpgrade.ObservedGeneration == crunchybridgecluster.GetGeneration() { + return ctrl.Result{}, nil + } + + // We should only be missing the ID if no create has been issued + // or the create was interrupted and we haven't received the ID. + if crunchybridgecluster.Status.ID == "" { + // Check if a cluster with the same name already exists + controllerResult, err := r.handleDuplicateClusterName(ctx, key, team, crunchybridgecluster) + if err != nil || controllerResult != nil { + return *controllerResult, err + } + + // if we've gotten here then no cluster exists with that name and we're missing the ID, ergo, create cluster + return r.handleCreateCluster(ctx, key, team, crunchybridgecluster), nil + } + + // If we reach this point, our CrunchyBridgeCluster object has an ID, so we want + // to fill in the details for the cluster, cluster status, and cluster upgrades + // from the Bridge API. + + // Get Cluster + err = r.handleGetCluster(ctx, key, crunchybridgecluster) + if err != nil { + return ctrl.Result{}, err + } + + // Get Cluster Status + err = r.handleGetClusterStatus(ctx, key, crunchybridgecluster) + if err != nil { + return ctrl.Result{}, err + } + + // Get Cluster Upgrade + err = r.handleGetClusterUpgrade(ctx, key, crunchybridgecluster) + if err != nil { + return ctrl.Result{}, err + } + + // Reconcile roles and their secrets + err = r.reconcilePostgresRoles(ctx, key, crunchybridgecluster) + if err != nil { + log.Error(err, "issue reconciling postgres user roles/secrets") + return ctrl.Result{}, err + } + + // For now, we skip updating until the upgrade status is cleared. + // For the future, we may want to update in-progress upgrades, + // and for that we will need a way tell that an upgrade in progress + // is the one we want to update. + // Consider: Perhaps add `generation` field to upgrade status? + // Checking this here also means that if an upgrade is requested through the GUI/API + // then we will requeue and wait for it to be done. + // TODO(crunchybridgecluster): Do we want the operator to interrupt + // upgrades created through the GUI/API? + if len(crunchybridgecluster.Status.OngoingUpgrade) != 0 { + return runtime.RequeueWithoutBackoff(3 * time.Minute), nil + } + + // Check if there's an upgrade difference for the three upgradeable fields that hit the upgrade endpoint + // Why PostgresVersion and MajorVersion? Because MajorVersion in the Status is sure to be + // an int of the major version, whereas Status.Responses.Cluster.PostgresVersion might be the ID + if (crunchybridgecluster.Spec.Storage != *crunchybridgecluster.Status.Storage) || + crunchybridgecluster.Spec.Plan != crunchybridgecluster.Status.Plan || + crunchybridgecluster.Spec.PostgresVersion != crunchybridgecluster.Status.MajorVersion { + return r.handleUpgrade(ctx, key, crunchybridgecluster), nil + } + + // Are there diffs between the cluster response from the Bridge API and the spec? + // HA diffs are sent to /clusters/{cluster_id}/actions/[enable|disable]-ha + // so have to know (a) to send and (b) which to send to + if crunchybridgecluster.Spec.IsHA != *crunchybridgecluster.Status.IsHA { + return r.handleUpgradeHA(ctx, key, crunchybridgecluster), nil + } + + // Check if there's a difference in is_protected, name, maintenance_window_start, etc. + // see https://docs.crunchybridge.com/api/cluster#update-cluster + // updates to these fields that hit the PATCH `clusters/` endpoint + if crunchybridgecluster.Spec.IsProtected != *crunchybridgecluster.Status.IsProtected || + crunchybridgecluster.Spec.ClusterName != crunchybridgecluster.Status.ClusterName { + return r.handleUpdate(ctx, key, crunchybridgecluster), nil + } + + log.Info("Reconciled") + // TODO(crunchybridgecluster): do we always want to requeue? Does the Watch mean we + // don't need this, or do we want both? + return runtime.RequeueWithoutBackoff(3 * time.Minute), nil +} + +// reconcileBridgeConnectionSecret looks for the Bridge connection secret specified by the cluster, +// and returns the API key and Team ID found in the secret, or sets conditions and returns an error +// if the secret is invalid. +func (r *CrunchyBridgeClusterReconciler) reconcileBridgeConnectionSecret( + ctx context.Context, crunchybridgecluster *v1beta1.CrunchyBridgeCluster, +) (string, string, error) { + key, team, err := r.GetSecretKeys(ctx, crunchybridgecluster) + if err != nil { + meta.SetStatusCondition(&crunchybridgecluster.Status.Conditions, metav1.Condition{ + ObservedGeneration: crunchybridgecluster.GetGeneration(), + Type: v1beta1.ConditionReady, + Status: metav1.ConditionUnknown, + Reason: "SecretInvalid", + Message: fmt.Sprintf( + "The condition of the cluster is unknown because the secret is invalid: %v", err), + }) + meta.SetStatusCondition(&crunchybridgecluster.Status.Conditions, metav1.Condition{ + Type: v1beta1.ConditionUpgrading, + Status: metav1.ConditionUnknown, + ObservedGeneration: crunchybridgecluster.GetGeneration(), + LastTransitionTime: metav1.Time{}, + Reason: "SecretInvalid", + Message: fmt.Sprintf( + "The condition of the upgrade(s) is unknown because the secret is invalid: %v", err), + }) + + return "", "", err + } + + return key, team, err +} + +// handleDuplicateClusterName checks Bridge for any already existing clusters that +// have the same name. It returns (nil, nil) when no cluster is found with the same +// name. It returns a controller result, indicating we should exit the reconcile loop, +// if a cluster with a duplicate name is found. The caller is responsible for +// returning controller result objects and errors to controller-runtime. +func (r *CrunchyBridgeClusterReconciler) handleDuplicateClusterName(ctx context.Context, + apiKey, teamId string, crunchybridgecluster *v1beta1.CrunchyBridgeCluster, +) (*ctrl.Result, error) { + log := ctrl.LoggerFrom(ctx) + + clusters, err := r.NewClient().ListClusters(ctx, apiKey, teamId) + if err != nil { + meta.SetStatusCondition(&crunchybridgecluster.Status.Conditions, metav1.Condition{ + ObservedGeneration: crunchybridgecluster.GetGeneration(), + Type: v1beta1.ConditionReady, + Status: metav1.ConditionUnknown, + Reason: "UnknownClusterState", + Message: fmt.Sprintf("Issue listing existing clusters in Bridge: %v", err), + }) + log.Error(err, "issue listing existing clusters in Bridge") + return &ctrl.Result{}, err + } + + for _, cluster := range clusters { + if crunchybridgecluster.Spec.ClusterName == cluster.ClusterName { + // Cluster with the same name exists so check for adoption annotation + adoptionID, annotationExists := crunchybridgecluster.Annotations[naming.CrunchyBridgeClusterAdoptionAnnotation] + if annotationExists && strings.EqualFold(adoptionID, cluster.ID) { + // Annotation is present with correct ID value; adopt cluster by assigning ID to status. + crunchybridgecluster.Status.ID = cluster.ID + // Requeue now that we have a cluster ID assigned + return &ctrl.Result{Requeue: true}, nil + } + + // If we made it here, the adoption annotation either doesn't exist or its value is incorrect. + // The user must either add it or change the name on the CR. + + // Set invalid status condition and create log message. + meta.SetStatusCondition(&crunchybridgecluster.Status.Conditions, metav1.Condition{ + ObservedGeneration: crunchybridgecluster.GetGeneration(), + Type: v1beta1.ConditionReady, + Status: metav1.ConditionFalse, + Reason: "DuplicateClusterName", + Message: fmt.Sprintf("A cluster with the same name already exists for this team (Team ID: %v). "+ + "Give the CrunchyBridgeCluster CR a unique name, or if you would like to take control of the "+ + "existing cluster, add the 'postgres-operator.crunchydata.com/adopt-bridge-cluster' "+ + "annotation and set its value to the existing cluster's ID (Cluster ID: %v).", teamId, cluster.ID), + }) + + log.Info(fmt.Sprintf("A cluster with the same name already exists for this team (Team ID: %v). "+ + "Give the CrunchyBridgeCluster CR a unique name, or if you would like to take control "+ + "of the existing cluster, add the 'postgres-operator.crunchydata.com/adopt-bridge-cluster' "+ + "annotation and set its value to the existing cluster's ID (Cluster ID: %v).", teamId, cluster.ID)) + + // We have an invalid cluster spec so we don't want to requeue + return &ctrl.Result{}, nil + } + } + + return nil, nil +} + +// handleCreateCluster handles creating new Crunchy Bridge Clusters +func (r *CrunchyBridgeClusterReconciler) handleCreateCluster(ctx context.Context, + apiKey, teamId string, crunchybridgecluster *v1beta1.CrunchyBridgeCluster, +) ctrl.Result { + log := ctrl.LoggerFrom(ctx) + + createClusterRequestPayload := &bridge.PostClustersRequestPayload{ + IsHA: crunchybridgecluster.Spec.IsHA, + Name: crunchybridgecluster.Spec.ClusterName, + Plan: crunchybridgecluster.Spec.Plan, + PostgresVersion: intstr.FromInt(crunchybridgecluster.Spec.PostgresVersion), + Provider: crunchybridgecluster.Spec.Provider, + Region: crunchybridgecluster.Spec.Region, + Storage: bridge.ToGibibytes(crunchybridgecluster.Spec.Storage), + Team: teamId, + } + cluster, err := r.NewClient().CreateCluster(ctx, apiKey, createClusterRequestPayload) + if err != nil { + log.Error(err, "issue creating cluster in Bridge") + // TODO(crunchybridgecluster): probably shouldn't set this condition unless response from Bridge + // indicates the payload is wrong + // Otherwise want a different condition + meta.SetStatusCondition(&crunchybridgecluster.Status.Conditions, metav1.Condition{ + ObservedGeneration: crunchybridgecluster.GetGeneration(), + Type: v1beta1.ConditionReady, + Status: metav1.ConditionFalse, + Reason: "ClusterInvalid", + Message: fmt.Sprintf( + "Cannot create from spec: %v", err), + }) + + // TODO(crunchybridgecluster): If the payload is wrong, we don't want to requeue, so pass nil error + // If the transmission hit a transient problem, we do want to requeue + return ctrl.Result{} + } + crunchybridgecluster.Status.ID = cluster.ID + + meta.SetStatusCondition(&crunchybridgecluster.Status.Conditions, metav1.Condition{ + ObservedGeneration: crunchybridgecluster.GetGeneration(), + Type: v1beta1.ConditionReady, + Status: metav1.ConditionUnknown, + Reason: "UnknownClusterState", + Message: "The condition of the cluster is unknown.", + }) + + meta.SetStatusCondition(&crunchybridgecluster.Status.Conditions, metav1.Condition{ + ObservedGeneration: crunchybridgecluster.GetGeneration(), + Type: v1beta1.ConditionUpgrading, + Status: metav1.ConditionUnknown, + Reason: "UnknownUpgradeState", + Message: "The condition of the upgrade(s) is unknown.", + }) + + return runtime.RequeueWithoutBackoff(3 * time.Minute) +} + +// handleGetCluster handles getting the cluster details from Bridge and +// updating the cluster CR's Status accordingly +func (r *CrunchyBridgeClusterReconciler) handleGetCluster(ctx context.Context, + apiKey string, crunchybridgecluster *v1beta1.CrunchyBridgeCluster, +) error { + log := ctrl.LoggerFrom(ctx) + + clusterDetails, err := r.NewClient().GetCluster(ctx, apiKey, crunchybridgecluster.Status.ID) + if err != nil { + meta.SetStatusCondition(&crunchybridgecluster.Status.Conditions, metav1.Condition{ + ObservedGeneration: crunchybridgecluster.GetGeneration(), + Type: v1beta1.ConditionReady, + Status: metav1.ConditionUnknown, + Reason: "UnknownClusterState", + Message: fmt.Sprintf("Issue getting cluster information from Bridge: %v", err), + }) + log.Error(err, "issue getting cluster information from Bridge") + return err + } + clusterDetails.AddDataToClusterStatus(crunchybridgecluster) + + return nil +} + +// handleGetClusterStatus handles getting the cluster status from Bridge and +// updating the cluster CR's Status accordingly +func (r *CrunchyBridgeClusterReconciler) handleGetClusterStatus(ctx context.Context, + apiKey string, crunchybridgecluster *v1beta1.CrunchyBridgeCluster, +) error { + log := ctrl.LoggerFrom(ctx) + + clusterStatus, err := r.NewClient().GetClusterStatus(ctx, apiKey, crunchybridgecluster.Status.ID) + if err != nil { + meta.SetStatusCondition(&crunchybridgecluster.Status.Conditions, metav1.Condition{ + ObservedGeneration: crunchybridgecluster.GetGeneration(), + Type: v1beta1.ConditionReady, + Status: metav1.ConditionUnknown, + Reason: "UnknownClusterState", + Message: fmt.Sprintf("Issue getting cluster status from Bridge: %v", err), + }) + crunchybridgecluster.Status.State = "unknown" + log.Error(err, "issue getting cluster status from Bridge") + return err + } + clusterStatus.AddDataToClusterStatus(crunchybridgecluster) + + if clusterStatus.State == "ready" { + meta.SetStatusCondition(&crunchybridgecluster.Status.Conditions, metav1.Condition{ + ObservedGeneration: crunchybridgecluster.GetGeneration(), + Type: v1beta1.ConditionReady, + Status: metav1.ConditionTrue, + Reason: clusterStatus.State, + Message: fmt.Sprintf("Bridge cluster state is %v.", clusterStatus.State), + }) + } else { + meta.SetStatusCondition(&crunchybridgecluster.Status.Conditions, metav1.Condition{ + ObservedGeneration: crunchybridgecluster.GetGeneration(), + Type: v1beta1.ConditionReady, + Status: metav1.ConditionFalse, + Reason: clusterStatus.State, + Message: fmt.Sprintf("Bridge cluster state is %v.", clusterStatus.State), + }) + } + + return nil +} + +// handleGetClusterUpgrade handles getting the ongoing upgrade operations from Bridge and +// updating the cluster CR's Status accordingly +func (r *CrunchyBridgeClusterReconciler) handleGetClusterUpgrade(ctx context.Context, + apiKey string, + crunchybridgecluster *v1beta1.CrunchyBridgeCluster, +) error { + log := ctrl.LoggerFrom(ctx) + + clusterUpgradeDetails, err := r.NewClient().GetClusterUpgrade(ctx, apiKey, crunchybridgecluster.Status.ID) + if err != nil { + meta.SetStatusCondition(&crunchybridgecluster.Status.Conditions, metav1.Condition{ + ObservedGeneration: crunchybridgecluster.GetGeneration(), + Type: v1beta1.ConditionUpgrading, + Status: metav1.ConditionUnknown, + Reason: "UnknownUpgradeState", + Message: fmt.Sprintf("Issue getting cluster upgrade from Bridge: %v", err), + }) + log.Error(err, "issue getting cluster upgrade from Bridge") + return err + } + clusterUpgradeDetails.AddDataToClusterStatus(crunchybridgecluster) + + if len(clusterUpgradeDetails.Operations) != 0 { + meta.SetStatusCondition(&crunchybridgecluster.Status.Conditions, metav1.Condition{ + ObservedGeneration: crunchybridgecluster.GetGeneration(), + Type: v1beta1.ConditionUpgrading, + Status: metav1.ConditionTrue, + Reason: clusterUpgradeDetails.Operations[0].Flavor, + Message: fmt.Sprintf( + "Performing an upgrade of type %v with a state of %v.", + clusterUpgradeDetails.Operations[0].Flavor, clusterUpgradeDetails.Operations[0].State), + }) + } else { + meta.SetStatusCondition(&crunchybridgecluster.Status.Conditions, metav1.Condition{ + ObservedGeneration: crunchybridgecluster.GetGeneration(), + Type: v1beta1.ConditionUpgrading, + Status: metav1.ConditionFalse, + Reason: "NoUpgradesInProgress", + Message: "No upgrades being performed", + }) + } + + return nil +} + +// handleUpgrade handles upgrades that hit the "POST /clusters//upgrade" endpoint +func (r *CrunchyBridgeClusterReconciler) handleUpgrade(ctx context.Context, + apiKey string, + crunchybridgecluster *v1beta1.CrunchyBridgeCluster, +) ctrl.Result { + log := ctrl.LoggerFrom(ctx) + + log.Info("Handling upgrade request") + + upgradeRequest := &bridge.PostClustersUpgradeRequestPayload{ + Plan: crunchybridgecluster.Spec.Plan, + PostgresVersion: intstr.FromInt(crunchybridgecluster.Spec.PostgresVersion), + Storage: bridge.ToGibibytes(crunchybridgecluster.Spec.Storage), + } + + clusterUpgrade, err := r.NewClient().UpgradeCluster(ctx, apiKey, + crunchybridgecluster.Status.ID, upgradeRequest) + if err != nil { + // TODO(crunchybridgecluster): consider what errors we might get + // and what different results/requeue times we want to return. + // Currently: don't requeue and wait for user to change spec. + meta.SetStatusCondition(&crunchybridgecluster.Status.Conditions, metav1.Condition{ + ObservedGeneration: crunchybridgecluster.GetGeneration(), + Type: v1beta1.ConditionUpgrading, + Status: metav1.ConditionFalse, + Reason: "UpgradeError", + Message: fmt.Sprintf( + "Error performing an upgrade: %s", err), + }) + log.Error(err, "Error while attempting cluster upgrade") + return ctrl.Result{} + } + clusterUpgrade.AddDataToClusterStatus(crunchybridgecluster) + + if len(clusterUpgrade.Operations) != 0 { + meta.SetStatusCondition(&crunchybridgecluster.Status.Conditions, metav1.Condition{ + ObservedGeneration: crunchybridgecluster.GetGeneration(), + Type: v1beta1.ConditionUpgrading, + Status: metav1.ConditionTrue, + Reason: clusterUpgrade.Operations[0].Flavor, + Message: fmt.Sprintf( + "Performing an upgrade of type %v with a state of %v.", + clusterUpgrade.Operations[0].Flavor, clusterUpgrade.Operations[0].State), + }) + } + + return runtime.RequeueWithoutBackoff(3 * time.Minute) +} + +// handleUpgradeHA handles upgrades that hit the +// "PUT /clusters//actions/[enable|disable]-ha" endpoint +func (r *CrunchyBridgeClusterReconciler) handleUpgradeHA(ctx context.Context, + apiKey string, + crunchybridgecluster *v1beta1.CrunchyBridgeCluster, +) ctrl.Result { + log := ctrl.LoggerFrom(ctx) + + log.Info("Handling HA change request") + + action := "enable-ha" + if !crunchybridgecluster.Spec.IsHA { + action = "disable-ha" + } + + clusterUpgrade, err := r.NewClient().UpgradeClusterHA(ctx, apiKey, crunchybridgecluster.Status.ID, action) + if err != nil { + // TODO(crunchybridgecluster): consider what errors we might get + // and what different results/requeue times we want to return. + // Currently: don't requeue and wait for user to change spec. + meta.SetStatusCondition(&crunchybridgecluster.Status.Conditions, metav1.Condition{ + ObservedGeneration: crunchybridgecluster.GetGeneration(), + Type: v1beta1.ConditionUpgrading, + Status: metav1.ConditionFalse, + Reason: "UpgradeError", + Message: fmt.Sprintf( + "Error performing an HA upgrade: %s", err), + }) + log.Error(err, "Error while attempting cluster HA change") + return ctrl.Result{} + } + clusterUpgrade.AddDataToClusterStatus(crunchybridgecluster) + if len(clusterUpgrade.Operations) != 0 { + meta.SetStatusCondition(&crunchybridgecluster.Status.Conditions, metav1.Condition{ + ObservedGeneration: crunchybridgecluster.GetGeneration(), + Type: v1beta1.ConditionUpgrading, + Status: metav1.ConditionTrue, + Reason: clusterUpgrade.Operations[0].Flavor, + Message: fmt.Sprintf( + "Performing an upgrade of type %v with a state of %v.", + clusterUpgrade.Operations[0].Flavor, clusterUpgrade.Operations[0].State), + }) + } + + return runtime.RequeueWithoutBackoff(3 * time.Minute) +} + +// handleUpdate handles upgrades that hit the "PATCH /clusters/" endpoint +func (r *CrunchyBridgeClusterReconciler) handleUpdate(ctx context.Context, + apiKey string, + crunchybridgecluster *v1beta1.CrunchyBridgeCluster, +) ctrl.Result { + log := ctrl.LoggerFrom(ctx) + + log.Info("Handling update request") + + updateRequest := &bridge.PatchClustersRequestPayload{ + IsProtected: &crunchybridgecluster.Spec.IsProtected, + Name: crunchybridgecluster.Spec.ClusterName, + } + + clusterUpdate, err := r.NewClient().UpdateCluster(ctx, apiKey, + crunchybridgecluster.Status.ID, updateRequest) + if err != nil { + // TODO(crunchybridgecluster): consider what errors we might get + // and what different results/requeue times we want to return. + // Currently: don't requeue and wait for user to change spec. + meta.SetStatusCondition(&crunchybridgecluster.Status.Conditions, metav1.Condition{ + ObservedGeneration: crunchybridgecluster.GetGeneration(), + Type: v1beta1.ConditionUpgrading, + Status: metav1.ConditionFalse, + Reason: "UpgradeError", + Message: fmt.Sprintf( + "Error performing an upgrade: %s", err), + }) + log.Error(err, "Error while attempting cluster update") + return ctrl.Result{} + } + clusterUpdate.AddDataToClusterStatus(crunchybridgecluster) + meta.SetStatusCondition(&crunchybridgecluster.Status.Conditions, metav1.Condition{ + ObservedGeneration: crunchybridgecluster.GetGeneration(), + Type: v1beta1.ConditionUpgrading, + Status: metav1.ConditionTrue, + Reason: "ClusterUpgrade", + Message: fmt.Sprintf( + "An upgrade is occurring, the clusters name is %v and the cluster is protected is %v.", + clusterUpdate.ClusterName, *clusterUpdate.IsProtected), + }) + + return runtime.RequeueWithoutBackoff(3 * time.Minute) +} + +// GetSecretKeys gets the secret and returns the expected API key and team id +// or an error if either of those fields or the Secret are missing +func (r *CrunchyBridgeClusterReconciler) GetSecretKeys( + ctx context.Context, crunchyBridgeCluster *v1beta1.CrunchyBridgeCluster, +) (string, string, error) { + + existing := &corev1.Secret{ObjectMeta: metav1.ObjectMeta{ + Namespace: crunchyBridgeCluster.GetNamespace(), + Name: crunchyBridgeCluster.Spec.Secret, + }} + + err := errors.WithStack( + r.Client.Get(ctx, client.ObjectKeyFromObject(existing), existing)) + + if err == nil { + if existing.Data["key"] != nil && existing.Data["team"] != nil { + return string(existing.Data["key"]), string(existing.Data["team"]), nil + } + err = fmt.Errorf("error handling secret; expected to find a key and a team: found key %t, found team %t", + existing.Data["key"] != nil, + existing.Data["team"] != nil) + } + + return "", "", err +} + +// deleteControlled safely deletes object when it is controlled by cluster. +func (r *CrunchyBridgeClusterReconciler) deleteControlled( + ctx context.Context, crunchyBridgeCluster *v1beta1.CrunchyBridgeCluster, object client.Object, +) error { + if metav1.IsControlledBy(object, crunchyBridgeCluster) { + uid := object.GetUID() + version := object.GetResourceVersion() + exactly := client.Preconditions{UID: &uid, ResourceVersion: &version} + + return r.Client.Delete(ctx, object, exactly) + } + + return nil +} diff --git a/internal/bridge/crunchybridgecluster/crunchybridgecluster_controller_test.go b/internal/bridge/crunchybridgecluster/crunchybridgecluster_controller_test.go new file mode 100644 index 0000000000..92d6b58d0e --- /dev/null +++ b/internal/bridge/crunchybridgecluster/crunchybridgecluster_controller_test.go @@ -0,0 +1,834 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package crunchybridgecluster + +import ( + "context" + "strings" + "testing" + "time" + + "gotest.tools/v3/assert" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/crunchydata/postgres-operator/internal/bridge" + "github.com/crunchydata/postgres-operator/internal/initialize" + "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/internal/testing/cmp" + "github.com/crunchydata/postgres-operator/internal/testing/require" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +var testTeamId = "5678" +var testApiKey = "9012" + +func TestReconcileBridgeConnectionSecret(t *testing.T) { + ctx := context.Background() + tClient := setupKubernetes(t) + require.ParallelCapacity(t, 0) + + reconciler := &CrunchyBridgeClusterReconciler{ + Client: tClient, + Owner: "crunchybridgecluster-controller", + } + + ns := setupNamespace(t, tClient).Name + cluster := testCluster() + cluster.Namespace = ns + + t.Run("Failure", func(t *testing.T) { + key, team, err := reconciler.reconcileBridgeConnectionSecret(ctx, cluster) + assert.Equal(t, key, "") + assert.Equal(t, team, "") + assert.Check(t, err != nil) + readyCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionReady) + if assert.Check(t, readyCondition != nil) { + assert.Equal(t, readyCondition.Status, metav1.ConditionUnknown) + assert.Equal(t, readyCondition.Reason, "SecretInvalid") + assert.Check(t, cmp.Contains(readyCondition.Message, + "The condition of the cluster is unknown because the secret is invalid:")) + } + upgradingCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionUpgrading) + if assert.Check(t, upgradingCondition != nil) { + assert.Equal(t, upgradingCondition.Status, metav1.ConditionUnknown) + assert.Equal(t, upgradingCondition.Reason, "SecretInvalid") + assert.Check(t, cmp.Contains(upgradingCondition.Message, + "The condition of the upgrade(s) is unknown because the secret is invalid:")) + } + }) + + t.Run("ValidSecretFound", func(t *testing.T) { + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "crunchy-bridge-api-key", + Namespace: ns, + }, + Data: map[string][]byte{ + "key": []byte(`asdf`), + "team": []byte(`jkl;`), + }, + } + assert.NilError(t, tClient.Create(ctx, secret)) + + key, team, err := reconciler.reconcileBridgeConnectionSecret(ctx, cluster) + assert.Equal(t, key, "asdf") + assert.Equal(t, team, "jkl;") + assert.NilError(t, err) + }) +} + +func TestHandleDuplicateClusterName(t *testing.T) { + ctx := context.Background() + tClient := setupKubernetes(t) + require.ParallelCapacity(t, 0) + + clusterInBridge := testClusterApiResource() + clusterInBridge.ClusterName = "bridge-cluster-1" // originally "hippo-cluster" + reconciler := &CrunchyBridgeClusterReconciler{ + Client: tClient, + Owner: "crunchybridgecluster-controller", + } + reconciler.NewClient = func() bridge.ClientInterface { + return &TestBridgeClient{ + ApiKey: testApiKey, + TeamId: testTeamId, + Clusters: []*bridge.ClusterApiResource{clusterInBridge}, + } + } + + ns := setupNamespace(t, tClient).Name + + t.Run("FailureToListClusters", func(t *testing.T) { + cluster := testCluster() + cluster.Namespace = ns + + controllerResult, err := reconciler.handleDuplicateClusterName(ctx, "bad_api_key", testTeamId, cluster) + assert.Check(t, err != nil) + assert.Equal(t, *controllerResult, ctrl.Result{}) + readyCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionReady) + if assert.Check(t, readyCondition != nil) { + assert.Equal(t, readyCondition.Status, metav1.ConditionUnknown) + assert.Equal(t, readyCondition.Reason, "UnknownClusterState") + assert.Check(t, cmp.Contains(readyCondition.Message, + "Issue listing existing clusters in Bridge:")) + } + }) + + t.Run("NoDuplicateFound", func(t *testing.T) { + cluster := testCluster() + cluster.Namespace = ns + + controllerResult, err := reconciler.handleDuplicateClusterName(ctx, testApiKey, testTeamId, cluster) + assert.NilError(t, err) + assert.Check(t, controllerResult == nil) + }) + + t.Run("DuplicateFoundAdoptionAnnotationNotPresent", func(t *testing.T) { + cluster := testCluster() + cluster.Namespace = ns + cluster.Spec.ClusterName = "bridge-cluster-1" // originally "hippo-cluster" + + controllerResult, err := reconciler.handleDuplicateClusterName(ctx, testApiKey, testTeamId, cluster) + assert.NilError(t, err) + assert.Equal(t, *controllerResult, ctrl.Result{}) + readyCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionReady) + if assert.Check(t, readyCondition != nil) { + assert.Equal(t, readyCondition.Status, metav1.ConditionFalse) + assert.Equal(t, readyCondition.Reason, "DuplicateClusterName") + assert.Check(t, cmp.Contains(readyCondition.Message, + "A cluster with the same name already exists for this team (Team ID: ")) + } + }) + + t.Run("DuplicateFoundAdoptionAnnotationPresent", func(t *testing.T) { + cluster := testCluster() + cluster.Namespace = ns + cluster.Spec.ClusterName = "bridge-cluster-1" // originally "hippo-cluster" + cluster.Annotations = map[string]string{} + cluster.Annotations[naming.CrunchyBridgeClusterAdoptionAnnotation] = "1234" + + controllerResult, err := reconciler.handleDuplicateClusterName(ctx, testApiKey, testTeamId, cluster) + assert.NilError(t, err) + assert.Equal(t, *controllerResult, ctrl.Result{Requeue: true}) + assert.Equal(t, cluster.Status.ID, "1234") + }) +} + +func TestHandleCreateCluster(t *testing.T) { + ctx := context.Background() + tClient := setupKubernetes(t) + require.ParallelCapacity(t, 0) + + ns := setupNamespace(t, tClient).Name + + reconciler := &CrunchyBridgeClusterReconciler{ + Client: tClient, + Owner: "crunchybridgecluster-controller", + } + reconciler.NewClient = func() bridge.ClientInterface { + return &TestBridgeClient{ + ApiKey: testApiKey, + TeamId: testTeamId, + Clusters: []*bridge.ClusterApiResource{}, + } + } + + t.Run("SuccessfulCreate", func(t *testing.T) { + cluster := testCluster() + cluster.Namespace = ns + + controllerResult := reconciler.handleCreateCluster(ctx, testApiKey, testTeamId, cluster) + assert.Equal(t, controllerResult.RequeueAfter, 3*time.Minute) + assert.Equal(t, cluster.Status.ID, "0") + + readyCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionReady) + if assert.Check(t, readyCondition != nil) { + assert.Equal(t, readyCondition.Status, metav1.ConditionUnknown) + assert.Equal(t, readyCondition.Reason, "UnknownClusterState") + assert.Check(t, cmp.Contains(readyCondition.Message, + "The condition of the cluster is unknown.")) + } + + upgradingCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionUpgrading) + if assert.Check(t, upgradingCondition != nil) { + assert.Equal(t, upgradingCondition.Status, metav1.ConditionUnknown) + assert.Equal(t, upgradingCondition.Reason, "UnknownUpgradeState") + assert.Check(t, cmp.Contains(upgradingCondition.Message, + "The condition of the upgrade(s) is unknown.")) + } + }) + + t.Run("UnsuccessfulCreate", func(t *testing.T) { + cluster := testCluster() + cluster.Namespace = ns + + controllerResult := reconciler.handleCreateCluster(ctx, "bad_api_key", testTeamId, cluster) + assert.Equal(t, controllerResult, ctrl.Result{}) + assert.Equal(t, cluster.Status.ID, "") + + readyCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionReady) + if assert.Check(t, readyCondition != nil) { + assert.Equal(t, readyCondition.Status, metav1.ConditionFalse) + assert.Equal(t, readyCondition.Reason, "ClusterInvalid") + assert.Check(t, cmp.Contains(readyCondition.Message, + "Cannot create from spec:")) + } + + upgradingCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionUpgrading) + assert.Check(t, upgradingCondition == nil) + }) +} + +func TestHandleGetCluster(t *testing.T) { + ctx := context.Background() + tClient := setupKubernetes(t) + require.ParallelCapacity(t, 0) + + ns := setupNamespace(t, tClient).Name + firstClusterInBridge := testClusterApiResource() + secondClusterInBridge := testClusterApiResource() + secondClusterInBridge.ID = "2345" // originally "1234" + secondClusterInBridge.ClusterName = "hippo-cluster-2" // originally "hippo-cluster" + + reconciler := &CrunchyBridgeClusterReconciler{ + Client: tClient, + Owner: "crunchybridgecluster-controller", + } + reconciler.NewClient = func() bridge.ClientInterface { + return &TestBridgeClient{ + ApiKey: testApiKey, + TeamId: testTeamId, + Clusters: []*bridge.ClusterApiResource{firstClusterInBridge, secondClusterInBridge}, + } + } + + t.Run("SuccessfulGet", func(t *testing.T) { + cluster := testCluster() + cluster.Namespace = ns + cluster.Status.ID = "1234" + + err := reconciler.handleGetCluster(ctx, testApiKey, cluster) + assert.NilError(t, err) + assert.Equal(t, cluster.Status.ClusterName, firstClusterInBridge.ClusterName) + assert.Equal(t, cluster.Status.Host, firstClusterInBridge.Host) + assert.Equal(t, cluster.Status.ID, firstClusterInBridge.ID) + assert.Equal(t, cluster.Status.IsHA, firstClusterInBridge.IsHA) + assert.Equal(t, cluster.Status.IsProtected, firstClusterInBridge.IsProtected) + assert.Equal(t, cluster.Status.MajorVersion, firstClusterInBridge.MajorVersion) + assert.Equal(t, cluster.Status.Plan, firstClusterInBridge.Plan) + assert.Equal(t, *cluster.Status.Storage, *bridge.FromGibibytes(firstClusterInBridge.Storage)) + }) + + t.Run("UnsuccessfulGet", func(t *testing.T) { + cluster := testCluster() + cluster.Namespace = ns + cluster.Status.ID = "bad_cluster_id" + + err := reconciler.handleGetCluster(ctx, testApiKey, cluster) + assert.Check(t, err != nil) + + readyCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionReady) + if assert.Check(t, readyCondition != nil) { + assert.Equal(t, readyCondition.Status, metav1.ConditionUnknown) + assert.Equal(t, readyCondition.Reason, "UnknownClusterState") + assert.Check(t, cmp.Contains(readyCondition.Message, + "Issue getting cluster information from Bridge:")) + } + }) +} + +func TestHandleGetClusterStatus(t *testing.T) { + ctx := context.Background() + tClient := setupKubernetes(t) + require.ParallelCapacity(t, 0) + + ns := setupNamespace(t, tClient).Name + readyClusterId := "1234" + creatingClusterId := "7890" + readyClusterStatusInBridge := testClusterStatusApiResource(readyClusterId) + creatingClusterStatusInBridge := testClusterStatusApiResource(creatingClusterId) + creatingClusterStatusInBridge.State = "creating" // originally "ready" + + reconciler := &CrunchyBridgeClusterReconciler{ + Client: tClient, + Owner: "crunchybridgecluster-controller", + } + reconciler.NewClient = func() bridge.ClientInterface { + return &TestBridgeClient{ + ApiKey: testApiKey, + TeamId: testTeamId, + ClusterStatuses: map[string]*bridge.ClusterStatusApiResource{ + readyClusterId: readyClusterStatusInBridge, + creatingClusterId: creatingClusterStatusInBridge, + }, + } + } + + t.Run("SuccessReadyState", func(t *testing.T) { + cluster := testCluster() + cluster.Namespace = ns + cluster.Status.ID = readyClusterId + + err := reconciler.handleGetClusterStatus(ctx, testApiKey, cluster) + assert.NilError(t, err) + assert.Equal(t, cluster.Status.State, "ready") + readyCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionReady) + if assert.Check(t, readyCondition != nil) { + assert.Equal(t, readyCondition.Status, metav1.ConditionTrue) + assert.Equal(t, readyCondition.Reason, "ready") + assert.Check(t, cmp.Contains(readyCondition.Message, + "Bridge cluster state is ready")) + } + }) + + t.Run("SuccessNonReadyState", func(t *testing.T) { + cluster := testCluster() + cluster.Namespace = ns + cluster.Status.ID = creatingClusterId + + err := reconciler.handleGetClusterStatus(ctx, testApiKey, cluster) + assert.NilError(t, err) + assert.Equal(t, cluster.Status.State, "creating") + readyCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionReady) + if assert.Check(t, readyCondition != nil) { + assert.Equal(t, readyCondition.Status, metav1.ConditionFalse) + assert.Equal(t, readyCondition.Reason, "creating") + assert.Check(t, cmp.Contains(readyCondition.Message, + "Bridge cluster state is creating")) + } + }) + + t.Run("UnsuccessfulGet", func(t *testing.T) { + cluster := testCluster() + cluster.Namespace = ns + cluster.Status.ID = creatingClusterId + + err := reconciler.handleGetClusterStatus(ctx, "bad_api_key", cluster) + assert.Check(t, err != nil) + assert.Equal(t, cluster.Status.State, "unknown") + readyCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionReady) + if assert.Check(t, readyCondition != nil) { + assert.Equal(t, readyCondition.Status, metav1.ConditionUnknown) + assert.Equal(t, readyCondition.Reason, "UnknownClusterState") + assert.Check(t, cmp.Contains(readyCondition.Message, + "Issue getting cluster status from Bridge:")) + } + }) +} + +func TestHandleGetClusterUpgrade(t *testing.T) { + ctx := context.Background() + tClient := setupKubernetes(t) + require.ParallelCapacity(t, 0) + + ns := setupNamespace(t, tClient).Name + upgradingClusterId := "1234" + notUpgradingClusterId := "7890" + upgradingClusterUpgradeInBridge := testClusterUpgradeApiResource(upgradingClusterId) + notUpgradingClusterUpgradeInBridge := testClusterUpgradeApiResource(notUpgradingClusterId) + notUpgradingClusterUpgradeInBridge.Operations = []*v1beta1.UpgradeOperation{} + + reconciler := &CrunchyBridgeClusterReconciler{ + Client: tClient, + Owner: "crunchybridgecluster-controller", + } + reconciler.NewClient = func() bridge.ClientInterface { + return &TestBridgeClient{ + ApiKey: testApiKey, + TeamId: testTeamId, + ClusterUpgrades: map[string]*bridge.ClusterUpgradeApiResource{ + upgradingClusterId: upgradingClusterUpgradeInBridge, + notUpgradingClusterId: notUpgradingClusterUpgradeInBridge, + }, + } + } + + t.Run("SuccessUpgrading", func(t *testing.T) { + cluster := testCluster() + cluster.Namespace = ns + cluster.Status.ID = upgradingClusterId + + err := reconciler.handleGetClusterUpgrade(ctx, testApiKey, cluster) + assert.NilError(t, err) + assert.Equal(t, *cluster.Status.OngoingUpgrade[0], v1beta1.UpgradeOperation{ + Flavor: "resize", + StartingFrom: "", + State: "in_progress", + }) + upgradingCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionUpgrading) + if assert.Check(t, upgradingCondition != nil) { + assert.Equal(t, upgradingCondition.Status, metav1.ConditionTrue) + assert.Equal(t, upgradingCondition.Reason, "resize") + assert.Check(t, cmp.Contains(upgradingCondition.Message, + "Performing an upgrade of type resize with a state of in_progress.")) + } + }) + + t.Run("SuccessNotUpgrading", func(t *testing.T) { + cluster := testCluster() + cluster.Namespace = ns + cluster.Status.ID = notUpgradingClusterId + + err := reconciler.handleGetClusterUpgrade(ctx, testApiKey, cluster) + assert.NilError(t, err) + assert.Equal(t, len(cluster.Status.OngoingUpgrade), 0) + upgradingCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionUpgrading) + if assert.Check(t, upgradingCondition != nil) { + assert.Equal(t, upgradingCondition.Status, metav1.ConditionFalse) + assert.Equal(t, upgradingCondition.Reason, "NoUpgradesInProgress") + assert.Check(t, cmp.Contains(upgradingCondition.Message, + "No upgrades being performed")) + } + }) + + t.Run("UnsuccessfulGet", func(t *testing.T) { + cluster := testCluster() + cluster.Namespace = ns + cluster.Status.ID = notUpgradingClusterId + + err := reconciler.handleGetClusterUpgrade(ctx, "bad_api_key", cluster) + assert.Check(t, err != nil) + upgradingCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionUpgrading) + if assert.Check(t, upgradingCondition != nil) { + assert.Equal(t, upgradingCondition.Status, metav1.ConditionUnknown) + assert.Equal(t, upgradingCondition.Reason, "UnknownUpgradeState") + assert.Check(t, cmp.Contains(upgradingCondition.Message, + "Issue getting cluster upgrade from Bridge:")) + } + }) +} + +func TestHandleUpgrade(t *testing.T) { + ctx := context.Background() + tClient := setupKubernetes(t) + require.ParallelCapacity(t, 0) + + ns := setupNamespace(t, tClient).Name + clusterInBridge := testClusterApiResource() + + reconciler := &CrunchyBridgeClusterReconciler{ + Client: tClient, + Owner: "crunchybridgecluster-controller", + } + reconciler.NewClient = func() bridge.ClientInterface { + return &TestBridgeClient{ + ApiKey: testApiKey, + TeamId: testTeamId, + Clusters: []*bridge.ClusterApiResource{clusterInBridge}, + } + } + + t.Run("UpgradePlan", func(t *testing.T) { + cluster := testCluster() + cluster.Namespace = ns + cluster.Status.ID = "1234" + cluster.Spec.Plan = "standard-16" // originally "standard-8" + + controllerResult := reconciler.handleUpgrade(ctx, testApiKey, cluster) + assert.Equal(t, controllerResult.RequeueAfter, 3*time.Minute) + upgradingCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionUpgrading) + if assert.Check(t, upgradingCondition != nil) { + assert.Equal(t, upgradingCondition.Status, metav1.ConditionTrue) + assert.Equal(t, upgradingCondition.Reason, "maintenance") + assert.Check(t, cmp.Contains(upgradingCondition.Message, + "Performing an upgrade of type maintenance with a state of in_progress.")) + assert.Equal(t, *cluster.Status.OngoingUpgrade[0], v1beta1.UpgradeOperation{ + Flavor: "maintenance", + StartingFrom: "", + State: "in_progress", + }) + } + }) + + t.Run("UpgradePostgres", func(t *testing.T) { + cluster := testCluster() + cluster.Namespace = ns + cluster.Status.ID = "1234" + cluster.Spec.PostgresVersion = 16 // originally "15" + + controllerResult := reconciler.handleUpgrade(ctx, testApiKey, cluster) + assert.Equal(t, controllerResult.RequeueAfter, 3*time.Minute) + upgradingCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionUpgrading) + if assert.Check(t, upgradingCondition != nil) { + assert.Equal(t, upgradingCondition.Status, metav1.ConditionTrue) + assert.Equal(t, upgradingCondition.Reason, "major_version_upgrade") + assert.Check(t, cmp.Contains(upgradingCondition.Message, + "Performing an upgrade of type major_version_upgrade with a state of in_progress.")) + assert.Equal(t, *cluster.Status.OngoingUpgrade[0], v1beta1.UpgradeOperation{ + Flavor: "major_version_upgrade", + StartingFrom: "", + State: "in_progress", + }) + } + }) + + t.Run("UpgradeStorage", func(t *testing.T) { + cluster := testCluster() + cluster.Namespace = ns + cluster.Status.ID = "1234" + cluster.Spec.Storage = resource.MustParse("15Gi") // originally "10Gi" + + controllerResult := reconciler.handleUpgrade(ctx, testApiKey, cluster) + assert.Equal(t, controllerResult.RequeueAfter, 3*time.Minute) + upgradingCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionUpgrading) + if assert.Check(t, upgradingCondition != nil) { + assert.Equal(t, upgradingCondition.Status, metav1.ConditionTrue) + assert.Equal(t, upgradingCondition.Reason, "resize") + assert.Check(t, cmp.Contains(upgradingCondition.Message, + "Performing an upgrade of type resize with a state of in_progress.")) + assert.Equal(t, *cluster.Status.OngoingUpgrade[0], v1beta1.UpgradeOperation{ + Flavor: "resize", + StartingFrom: "", + State: "in_progress", + }) + } + }) + + t.Run("UpgradeFailure", func(t *testing.T) { + cluster := testCluster() + cluster.Namespace = ns + cluster.Status.ID = "1234" + cluster.Spec.Storage = resource.MustParse("15Gi") // originally "10Gi" + + controllerResult := reconciler.handleUpgrade(ctx, "bad_api_key", cluster) + assert.Equal(t, controllerResult, ctrl.Result{}) + upgradingCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionUpgrading) + if assert.Check(t, upgradingCondition != nil) { + assert.Equal(t, upgradingCondition.Status, metav1.ConditionFalse) + assert.Equal(t, upgradingCondition.Reason, "UpgradeError") + assert.Check(t, cmp.Contains(upgradingCondition.Message, + "Error performing an upgrade: boom")) + } + }) +} + +func TestHandleUpgradeHA(t *testing.T) { + ctx := context.Background() + tClient := setupKubernetes(t) + require.ParallelCapacity(t, 0) + + ns := setupNamespace(t, tClient).Name + clusterInBridgeWithHaDisabled := testClusterApiResource() + clusterInBridgeWithHaEnabled := testClusterApiResource() + clusterInBridgeWithHaEnabled.ID = "2345" // originally "1234" + clusterInBridgeWithHaEnabled.IsHA = initialize.Bool(true) // originally "false" + + reconciler := &CrunchyBridgeClusterReconciler{ + Client: tClient, + Owner: "crunchybridgecluster-controller", + } + reconciler.NewClient = func() bridge.ClientInterface { + return &TestBridgeClient{ + ApiKey: testApiKey, + TeamId: testTeamId, + Clusters: []*bridge.ClusterApiResource{clusterInBridgeWithHaDisabled, + clusterInBridgeWithHaEnabled}, + } + } + + t.Run("EnableHA", func(t *testing.T) { + cluster := testCluster() + cluster.Namespace = ns + cluster.Status.ID = "1234" + cluster.Spec.IsHA = true // originally "false" + + controllerResult := reconciler.handleUpgradeHA(ctx, testApiKey, cluster) + assert.Equal(t, controllerResult.RequeueAfter, 3*time.Minute) + upgradingCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionUpgrading) + if assert.Check(t, upgradingCondition != nil) { + assert.Equal(t, upgradingCondition.Status, metav1.ConditionTrue) + assert.Equal(t, upgradingCondition.Reason, "ha_change") + assert.Check(t, cmp.Contains(upgradingCondition.Message, + "Performing an upgrade of type ha_change with a state of enabling_ha.")) + assert.Equal(t, *cluster.Status.OngoingUpgrade[0], v1beta1.UpgradeOperation{ + Flavor: "ha_change", + StartingFrom: "", + State: "enabling_ha", + }) + } + }) + + t.Run("DisableHA", func(t *testing.T) { + cluster := testCluster() + cluster.Namespace = ns + cluster.Status.ID = "2345" + + controllerResult := reconciler.handleUpgradeHA(ctx, testApiKey, cluster) + assert.Equal(t, controllerResult.RequeueAfter, 3*time.Minute) + upgradingCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionUpgrading) + if assert.Check(t, upgradingCondition != nil) { + assert.Equal(t, upgradingCondition.Status, metav1.ConditionTrue) + assert.Equal(t, upgradingCondition.Reason, "ha_change") + assert.Check(t, cmp.Contains(upgradingCondition.Message, + "Performing an upgrade of type ha_change with a state of disabling_ha.")) + assert.Equal(t, *cluster.Status.OngoingUpgrade[0], v1beta1.UpgradeOperation{ + Flavor: "ha_change", + StartingFrom: "", + State: "disabling_ha", + }) + } + }) + + t.Run("UpgradeFailure", func(t *testing.T) { + cluster := testCluster() + cluster.Namespace = ns + cluster.Status.ID = "1234" + + controllerResult := reconciler.handleUpgradeHA(ctx, "bad_api_key", cluster) + assert.Equal(t, controllerResult, ctrl.Result{}) + upgradingCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionUpgrading) + if assert.Check(t, upgradingCondition != nil) { + assert.Equal(t, upgradingCondition.Status, metav1.ConditionFalse) + assert.Equal(t, upgradingCondition.Reason, "UpgradeError") + assert.Check(t, cmp.Contains(upgradingCondition.Message, + "Error performing an HA upgrade: boom")) + } + }) +} + +func TestHandleUpdate(t *testing.T) { + ctx := context.Background() + tClient := setupKubernetes(t) + require.ParallelCapacity(t, 0) + + ns := setupNamespace(t, tClient).Name + clusterInBridge := testClusterApiResource() + + reconciler := &CrunchyBridgeClusterReconciler{ + Client: tClient, + Owner: "crunchybridgecluster-controller", + } + reconciler.NewClient = func() bridge.ClientInterface { + return &TestBridgeClient{ + ApiKey: testApiKey, + TeamId: testTeamId, + Clusters: []*bridge.ClusterApiResource{clusterInBridge}, + } + } + + t.Run("UpdateName", func(t *testing.T) { + cluster := testCluster() + cluster.Namespace = ns + cluster.Status.ID = "1234" + cluster.Spec.ClusterName = "new-cluster-name" // originally "hippo-cluster" + + controllerResult := reconciler.handleUpdate(ctx, testApiKey, cluster) + assert.Equal(t, controllerResult.RequeueAfter, 3*time.Minute) + upgradingCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionUpgrading) + if assert.Check(t, upgradingCondition != nil) { + assert.Equal(t, upgradingCondition.Status, metav1.ConditionTrue) + assert.Equal(t, upgradingCondition.Reason, "ClusterUpgrade") + assert.Check(t, cmp.Contains(upgradingCondition.Message, + "An upgrade is occurring, the clusters name is new-cluster-name and the cluster is protected is false.")) + } + assert.Equal(t, cluster.Status.ClusterName, "new-cluster-name") + }) + + t.Run("UpdateIsProtected", func(t *testing.T) { + cluster := testCluster() + cluster.Namespace = ns + cluster.Status.ID = "1234" + cluster.Spec.IsProtected = true // originally "false" + + controllerResult := reconciler.handleUpdate(ctx, testApiKey, cluster) + assert.Equal(t, controllerResult.RequeueAfter, 3*time.Minute) + upgradingCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionUpgrading) + if assert.Check(t, upgradingCondition != nil) { + assert.Equal(t, upgradingCondition.Status, metav1.ConditionTrue) + assert.Equal(t, upgradingCondition.Reason, "ClusterUpgrade") + assert.Check(t, cmp.Contains(upgradingCondition.Message, + "An upgrade is occurring, the clusters name is hippo-cluster and the cluster is protected is true.")) + } + assert.Equal(t, *cluster.Status.IsProtected, true) + }) + + t.Run("UpgradeFailure", func(t *testing.T) { + cluster := testCluster() + cluster.Namespace = ns + cluster.Status.ID = "1234" + cluster.Spec.IsProtected = true // originally "false" + + controllerResult := reconciler.handleUpdate(ctx, "bad_api_key", cluster) + assert.Equal(t, controllerResult, ctrl.Result{}) + upgradingCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionUpgrading) + if assert.Check(t, upgradingCondition != nil) { + assert.Equal(t, upgradingCondition.Status, metav1.ConditionFalse) + assert.Equal(t, upgradingCondition.Reason, "UpgradeError") + assert.Check(t, cmp.Contains(upgradingCondition.Message, "Error performing an upgrade: boom")) + } + }) +} + +func TestGetSecretKeys(t *testing.T) { + ctx := context.Background() + tClient := setupKubernetes(t) + require.ParallelCapacity(t, 0) + + reconciler := &CrunchyBridgeClusterReconciler{ + Client: tClient, + Owner: "crunchybridgecluster-controller", + } + + ns := setupNamespace(t, tClient).Name + cluster := testCluster() + cluster.Namespace = ns + + t.Run("NoSecret", func(t *testing.T) { + apiKey, team, err := reconciler.GetSecretKeys(ctx, cluster) + assert.Equal(t, apiKey, "") + assert.Equal(t, team, "") + assert.ErrorContains(t, err, "secrets \"crunchy-bridge-api-key\" not found") + }) + + t.Run("SecretMissingApiKey", func(t *testing.T) { + cluster.Spec.Secret = "secret-missing-api-key" // originally "crunchy-bridge-api-key" + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "secret-missing-api-key", + Namespace: ns, + }, + Data: map[string][]byte{ + "team": []byte(`jkl;`), + }, + } + assert.NilError(t, tClient.Create(ctx, secret)) + + apiKey, team, err := reconciler.GetSecretKeys(ctx, cluster) + assert.Equal(t, apiKey, "") + assert.Equal(t, team, "") + assert.ErrorContains(t, err, "error handling secret; expected to find a key and a team: found key false, found team true") + + assert.NilError(t, tClient.Delete(ctx, secret)) + }) + + t.Run("SecretMissingTeamId", func(t *testing.T) { + cluster.Spec.Secret = "secret-missing-team-id" + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "secret-missing-team-id", + Namespace: ns, + }, + Data: map[string][]byte{ + "key": []byte(`asdf`), + }, + } + assert.NilError(t, tClient.Create(ctx, secret)) + + apiKey, team, err := reconciler.GetSecretKeys(ctx, cluster) + assert.Equal(t, apiKey, "") + assert.Equal(t, team, "") + assert.ErrorContains(t, err, "error handling secret; expected to find a key and a team: found key true, found team false") + }) + + t.Run("GoodSecret", func(t *testing.T) { + cluster.Spec.Secret = "crunchy-bridge-api-key" + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "crunchy-bridge-api-key", + Namespace: ns, + }, + Data: map[string][]byte{ + "key": []byte(`asdf`), + "team": []byte(`jkl;`), + }, + } + assert.NilError(t, tClient.Create(ctx, secret)) + + apiKey, team, err := reconciler.GetSecretKeys(ctx, cluster) + assert.Equal(t, apiKey, "asdf") + assert.Equal(t, team, "jkl;") + assert.NilError(t, err) + }) +} + +func TestDeleteControlled(t *testing.T) { + ctx := context.Background() + tClient := setupKubernetes(t) + require.ParallelCapacity(t, 1) + + ns := setupNamespace(t, tClient) + reconciler := &CrunchyBridgeClusterReconciler{ + Client: tClient, + Owner: "crunchybridgecluster-controller", + } + + cluster := testCluster() + cluster.Namespace = ns.Name + cluster.Name = strings.ToLower(t.Name()) // originally "hippo-cr" + assert.NilError(t, tClient.Create(ctx, cluster)) + + t.Run("NotControlled", func(t *testing.T) { + secret := &corev1.Secret{} + secret.Namespace = ns.Name + secret.Name = "solo" + + assert.NilError(t, tClient.Create(ctx, secret)) + + // No-op when there's no ownership + assert.NilError(t, reconciler.deleteControlled(ctx, cluster, secret)) + assert.NilError(t, tClient.Get(ctx, client.ObjectKeyFromObject(secret), secret)) + }) + + t.Run("Controlled", func(t *testing.T) { + secret := &corev1.Secret{} + secret.Namespace = ns.Name + secret.Name = "controlled" + + assert.NilError(t, reconciler.setControllerReference(cluster, secret)) + assert.NilError(t, tClient.Create(ctx, secret)) + + // Deletes when controlled by cluster. + assert.NilError(t, reconciler.deleteControlled(ctx, cluster, secret)) + + err := tClient.Get(ctx, client.ObjectKeyFromObject(secret), secret) + assert.Assert(t, apierrors.IsNotFound(err), "expected NotFound, got %#v", err) + }) +} diff --git a/internal/bridge/crunchybridgecluster/delete.go b/internal/bridge/crunchybridgecluster/delete.go new file mode 100644 index 0000000000..8dcada31cf --- /dev/null +++ b/internal/bridge/crunchybridgecluster/delete.go @@ -0,0 +1,70 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package crunchybridgecluster + +import ( + "context" + "time" + + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + "github.com/crunchydata/postgres-operator/internal/controller/runtime" + "github.com/crunchydata/postgres-operator/internal/initialize" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +const finalizer = "crunchybridgecluster.postgres-operator.crunchydata.com/finalizer" + +// handleDelete sets a finalizer on cluster and performs the finalization of +// cluster when it is being deleted. It returns (nil, nil) when cluster is +// not being deleted and there are no errors patching the CrunchyBridgeCluster. +// The caller is responsible for returning other values to controller-runtime. +func (r *CrunchyBridgeClusterReconciler) handleDelete( + ctx context.Context, crunchybridgecluster *v1beta1.CrunchyBridgeCluster, key string, +) (*ctrl.Result, error) { + log := ctrl.LoggerFrom(ctx) + + // If the CrunchyBridgeCluster isn't being deleted, add the finalizer + if crunchybridgecluster.ObjectMeta.DeletionTimestamp.IsZero() { + if !controllerutil.ContainsFinalizer(crunchybridgecluster, finalizer) { + controllerutil.AddFinalizer(crunchybridgecluster, finalizer) + if err := r.Update(ctx, crunchybridgecluster); err != nil { + return nil, err + } + } + // If the CrunchyBridgeCluster is being deleted, + // handle the deletion, and remove the finalizer + } else { + if controllerutil.ContainsFinalizer(crunchybridgecluster, finalizer) { + log.Info("deleting cluster", "clusterName", crunchybridgecluster.Spec.ClusterName) + + // TODO(crunchybridgecluster): If is_protected is true, maybe skip this call, but allow the deletion of the K8s object? + _, deletedAlready, err := r.NewClient().DeleteCluster(ctx, key, crunchybridgecluster.Status.ID) + // Requeue if error + if err != nil { + return &ctrl.Result{}, err + } + + if !deletedAlready { + return initialize.Pointer(runtime.RequeueWithoutBackoff(time.Second)), err + } + + // Remove finalizer if deleted already + if deletedAlready { + log.Info("cluster deleted", "clusterName", crunchybridgecluster.Spec.ClusterName) + + controllerutil.RemoveFinalizer(crunchybridgecluster, finalizer) + if err := r.Update(ctx, crunchybridgecluster); err != nil { + return &ctrl.Result{}, err + } + } + } + // Stop reconciliation as the item is being deleted + return &ctrl.Result{}, nil + } + + return nil, nil +} diff --git a/internal/bridge/crunchybridgecluster/delete_test.go b/internal/bridge/crunchybridgecluster/delete_test.go new file mode 100644 index 0000000000..28e6feb1f8 --- /dev/null +++ b/internal/bridge/crunchybridgecluster/delete_test.go @@ -0,0 +1,133 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package crunchybridgecluster + +import ( + "context" + "testing" + "time" + + "gotest.tools/v3/assert" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + "github.com/crunchydata/postgres-operator/internal/bridge" + "github.com/crunchydata/postgres-operator/internal/testing/require" +) + +func TestHandleDeleteCluster(t *testing.T) { + ctx := context.Background() + tClient := setupKubernetes(t) + require.ParallelCapacity(t, 0) + + ns := setupNamespace(t, tClient).Name + + firstClusterInBridge := testClusterApiResource() + firstClusterInBridge.ClusterName = "bridge-cluster-1" + secondClusterInBridge := testClusterApiResource() + secondClusterInBridge.ClusterName = "bridge-cluster-2" + secondClusterInBridge.ID = "2345" + + reconciler := &CrunchyBridgeClusterReconciler{ + Client: tClient, + Owner: "crunchybridgecluster-controller", + } + testBridgeClient := &TestBridgeClient{ + ApiKey: "9012", + TeamId: "5678", + Clusters: []*bridge.ClusterApiResource{firstClusterInBridge, secondClusterInBridge}, + } + reconciler.NewClient = func() bridge.ClientInterface { + return testBridgeClient + } + + t.Run("SuccessfulDeletion", func(t *testing.T) { + // Create test cluster in kubernetes + cluster := testCluster() + cluster.Namespace = ns + cluster.Status.ID = "1234" + cluster.Spec.ClusterName = "bridge-cluster-1" + assert.NilError(t, tClient.Create(ctx, cluster)) + + // Run handleDelete + controllerResult, err := reconciler.handleDelete(ctx, cluster, "9012") + assert.NilError(t, err) + assert.Check(t, controllerResult == nil) + + // Make sure that finalizer was added + assert.Check(t, controllerutil.ContainsFinalizer(cluster, finalizer)) + + // Send delete request to kubernetes + assert.NilError(t, tClient.Delete(ctx, cluster)) + + // Get cluster from kubernetes and assert that the deletion timestamp was added + assert.NilError(t, tClient.Get(ctx, client.ObjectKeyFromObject(cluster), cluster)) + assert.Check(t, !cluster.ObjectMeta.DeletionTimestamp.IsZero()) + + // Note: We must run handleDelete multiple times because we don't want to remove the + // finalizer until we're sure that the cluster has been deleted from Bridge, so we + // have to do multiple calls/reconcile loops. + // Run handleDelete again to delete from Bridge + cluster.Status.ID = "1234" + controllerResult, err = reconciler.handleDelete(ctx, cluster, "9012") + assert.NilError(t, err) + assert.Equal(t, controllerResult.RequeueAfter, 1*time.Second) + assert.Equal(t, len(testBridgeClient.Clusters), 1) + assert.Equal(t, testBridgeClient.Clusters[0].ClusterName, "bridge-cluster-2") + + // Run handleDelete one last time to remove finalizer + controllerResult, err = reconciler.handleDelete(ctx, cluster, "9012") + assert.NilError(t, err) + assert.Equal(t, *controllerResult, ctrl.Result{}) + + // Make sure that finalizer was removed + assert.Check(t, !controllerutil.ContainsFinalizer(cluster, finalizer)) + }) + + t.Run("UnsuccessfulDeletion", func(t *testing.T) { + cluster := testCluster() + cluster.Namespace = ns + cluster.Status.ID = "2345" + cluster.Spec.ClusterName = "bridge-cluster-2" + assert.NilError(t, tClient.Create(ctx, cluster)) + + // Run handleDelete + controllerResult, err := reconciler.handleDelete(ctx, cluster, "9012") + assert.NilError(t, err) + assert.Check(t, controllerResult == nil) + + // Make sure that finalizer was added + assert.Check(t, controllerutil.ContainsFinalizer(cluster, finalizer)) + + // Send delete request to kubernetes + assert.NilError(t, tClient.Delete(ctx, cluster)) + + // Get cluster from kubernetes and assert that the deletion timestamp was added + assert.NilError(t, tClient.Get(ctx, client.ObjectKeyFromObject(cluster), cluster)) + assert.Check(t, !cluster.ObjectMeta.DeletionTimestamp.IsZero()) + + // Run handleDelete again to attempt to delete from Bridge, but provide bad api key + cluster.Status.ID = "2345" + controllerResult, err = reconciler.handleDelete(ctx, cluster, "bad_api_key") + assert.ErrorContains(t, err, "boom") + assert.Equal(t, *controllerResult, ctrl.Result{}) + + // Run handleDelete a couple times with good api key so test can cleanup properly. + // Note: We must run handleDelete multiple times because we don't want to remove the + // finalizer until we're sure that the cluster has been deleted from Bridge, so we + // have to do multiple calls/reconcile loops. + // delete from bridge + _, err = reconciler.handleDelete(ctx, cluster, "9012") + assert.NilError(t, err) + + // remove finalizer + _, err = reconciler.handleDelete(ctx, cluster, "9012") + assert.NilError(t, err) + + // Make sure that finalizer was removed + assert.Check(t, !controllerutil.ContainsFinalizer(cluster, finalizer)) + }) +} diff --git a/internal/bridge/crunchybridgecluster/helpers_test.go b/internal/bridge/crunchybridgecluster/helpers_test.go new file mode 100644 index 0000000000..f40ad3d054 --- /dev/null +++ b/internal/bridge/crunchybridgecluster/helpers_test.go @@ -0,0 +1,178 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package crunchybridgecluster + +import ( + "context" + "os" + "strconv" + "testing" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/yaml" + + "github.com/crunchydata/postgres-operator/internal/bridge" + "github.com/crunchydata/postgres-operator/internal/initialize" + "github.com/crunchydata/postgres-operator/internal/testing/require" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +// Scale extends d according to PGO_TEST_TIMEOUT_SCALE. +var Scale = func(d time.Duration) time.Duration { return d } + +// This function was duplicated from the postgrescluster package. +// TODO: Pull these duplicated functions out into a separate, shared package. +func init() { + setting := os.Getenv("PGO_TEST_TIMEOUT_SCALE") + factor, _ := strconv.ParseFloat(setting, 64) + + if setting != "" { + if factor <= 0 { + panic("PGO_TEST_TIMEOUT_SCALE must be a fractional number greater than zero") + } + + Scale = func(d time.Duration) time.Duration { + return time.Duration(factor * float64(d)) + } + } +} + +// setupKubernetes starts or connects to a Kubernetes API and returns a client +// that uses it. See [require.Kubernetes] for more details. +func setupKubernetes(t testing.TB) client.Client { + t.Helper() + + // Start and/or connect to a Kubernetes API, or Skip when that's not configured. + cc := require.Kubernetes(t) + + // Log the status of any test namespaces after this test fails. + t.Cleanup(func() { + if t.Failed() { + var namespaces corev1.NamespaceList + _ = cc.List(context.Background(), &namespaces, client.HasLabels{"postgres-operator-test"}) + + type shaped map[string]corev1.NamespaceStatus + result := make([]shaped, len(namespaces.Items)) + + for i, ns := range namespaces.Items { + result[i] = shaped{ns.Labels["postgres-operator-test"]: ns.Status} + } + + formatted, _ := yaml.Marshal(result) + t.Logf("Test Namespaces:\n%s", formatted) + } + }) + + return cc +} + +// setupNamespace creates a random namespace that will be deleted by t.Cleanup. +// +// Deprecated: Use [require.Namespace] instead. +func setupNamespace(t testing.TB, cc client.Client) *corev1.Namespace { + t.Helper() + return require.Namespace(t, cc) +} + +// testCluster defines a base cluster spec that can be used by tests to +// generate a CrunchyBridgeCluster CR +func testCluster() *v1beta1.CrunchyBridgeCluster { + cluster := v1beta1.CrunchyBridgeCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "hippo-cr", + }, + Spec: v1beta1.CrunchyBridgeClusterSpec{ + ClusterName: "hippo-cluster", + IsHA: false, + PostgresVersion: 15, + Plan: "standard-8", + Provider: "aws", + Region: "us-east-2", + Secret: "crunchy-bridge-api-key", + Storage: resource.MustParse("10Gi"), + }, + } + return cluster.DeepCopy() +} + +func testClusterApiResource() *bridge.ClusterApiResource { + cluster := bridge.ClusterApiResource{ + ID: "1234", + Host: "example.com", + IsHA: initialize.Bool(false), + IsProtected: initialize.Bool(false), + MajorVersion: 15, + ClusterName: "hippo-cluster", + Plan: "standard-8", + Provider: "aws", + Region: "us-east-2", + Storage: 10, + Team: "5678", + } + return &cluster +} + +func testClusterStatusApiResource(clusterId string) *bridge.ClusterStatusApiResource { + teamId := "5678" + state := "ready" + + clusterStatus := bridge.ClusterStatusApiResource{ + DiskUsage: &bridge.ClusterDiskUsageApiResource{ + DiskAvailableMB: 16, + DiskTotalSizeMB: 16, + DiskUsedMB: 0, + }, + OldestBackup: "oldbackup", + OngoingUpgrade: &bridge.ClusterUpgradeApiResource{ + ClusterID: clusterId, + Operations: []*v1beta1.UpgradeOperation{}, + Team: teamId, + }, + State: state, + } + + return &clusterStatus +} + +func testClusterUpgradeApiResource(clusterId string) *bridge.ClusterUpgradeApiResource { + teamId := "5678" + + clusterUpgrade := bridge.ClusterUpgradeApiResource{ + ClusterID: clusterId, + Operations: []*v1beta1.UpgradeOperation{ + { + Flavor: "resize", + StartingFrom: "", + State: "in_progress", + }, + }, + Team: teamId, + } + + return &clusterUpgrade +} + +func testClusterRoleApiResource() *bridge.ClusterRoleApiResource { + clusterId := "1234" + teamId := "5678" + roleName := "application" + + clusterRole := bridge.ClusterRoleApiResource{ + AccountEmail: "test@email.com", + AccountId: "12345678", + ClusterId: clusterId, + Flavor: "chocolate", + Name: roleName, + Password: "application-password", + Team: teamId, + URI: "connection-string", + } + + return &clusterRole +} diff --git a/internal/bridge/crunchybridgecluster/mock_bridge_api.go b/internal/bridge/crunchybridgecluster/mock_bridge_api.go new file mode 100644 index 0000000000..5c6b243714 --- /dev/null +++ b/internal/bridge/crunchybridgecluster/mock_bridge_api.go @@ -0,0 +1,247 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package crunchybridgecluster + +import ( + "context" + "errors" + "fmt" + + "k8s.io/apimachinery/pkg/util/intstr" + + "github.com/crunchydata/postgres-operator/internal/bridge" + "github.com/crunchydata/postgres-operator/internal/initialize" + + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +type TestBridgeClient struct { + ApiKey string `json:"apiKey,omitempty"` + TeamId string `json:"teamId,omitempty"` + Clusters []*bridge.ClusterApiResource `json:"clusters,omitempty"` + ClusterRoles []*bridge.ClusterRoleApiResource `json:"clusterRoles,omitempty"` + ClusterStatuses map[string]*bridge.ClusterStatusApiResource `json:"clusterStatuses,omitempty"` + ClusterUpgrades map[string]*bridge.ClusterUpgradeApiResource `json:"clusterUpgrades,omitempty"` +} + +func (tbc *TestBridgeClient) ListClusters(ctx context.Context, apiKey, teamId string) ([]*bridge.ClusterApiResource, error) { + + if apiKey == tbc.ApiKey && teamId == tbc.TeamId { + return tbc.Clusters, nil + } + + return nil, errors.New("boom") +} + +func (tbc *TestBridgeClient) UpgradeCluster(ctx context.Context, apiKey, id string, clusterRequestPayload *bridge.PostClustersUpgradeRequestPayload, +) (*bridge.ClusterUpgradeApiResource, error) { + // look for cluster + var desiredCluster *bridge.ClusterApiResource + clusterFound := false + for _, cluster := range tbc.Clusters { + if cluster.ID == id { + desiredCluster = cluster + clusterFound = true + } + } + if !clusterFound { + return nil, errors.New("cluster not found") + } + + // happy path + if apiKey == tbc.ApiKey { + result := &bridge.ClusterUpgradeApiResource{ + ClusterID: id, + Team: tbc.TeamId, + } + if clusterRequestPayload.Plan != desiredCluster.Plan { + result.Operations = []*v1beta1.UpgradeOperation{ + { + Flavor: "maintenance", + StartingFrom: "", + State: "in_progress", + }, + } + } else if clusterRequestPayload.PostgresVersion != intstr.FromInt(desiredCluster.MajorVersion) { + result.Operations = []*v1beta1.UpgradeOperation{ + { + Flavor: "major_version_upgrade", + StartingFrom: "", + State: "in_progress", + }, + } + } else if clusterRequestPayload.Storage != desiredCluster.Storage { + result.Operations = []*v1beta1.UpgradeOperation{ + { + Flavor: "resize", + StartingFrom: "", + State: "in_progress", + }, + } + } + return result, nil + } + // sad path + return nil, errors.New("boom") +} + +func (tbc *TestBridgeClient) UpgradeClusterHA(ctx context.Context, apiKey, id, action string, +) (*bridge.ClusterUpgradeApiResource, error) { + // look for cluster + var desiredCluster *bridge.ClusterApiResource + clusterFound := false + for _, cluster := range tbc.Clusters { + if cluster.ID == id { + desiredCluster = cluster + clusterFound = true + } + } + if !clusterFound { + return nil, errors.New("cluster not found") + } + + // happy path + if apiKey == tbc.ApiKey { + result := &bridge.ClusterUpgradeApiResource{ + ClusterID: id, + Team: tbc.TeamId, + } + if action == "enable-ha" && !*desiredCluster.IsHA { + result.Operations = []*v1beta1.UpgradeOperation{ + { + Flavor: "ha_change", + StartingFrom: "", + State: "enabling_ha", + }, + } + } else if action == "disable-ha" && *desiredCluster.IsHA { + result.Operations = []*v1beta1.UpgradeOperation{ + { + Flavor: "ha_change", + StartingFrom: "", + State: "disabling_ha", + }, + } + } else { + return nil, errors.New("no change detected") + } + return result, nil + } + // sad path + return nil, errors.New("boom") +} + +func (tbc *TestBridgeClient) UpdateCluster(ctx context.Context, apiKey, id string, clusterRequestPayload *bridge.PatchClustersRequestPayload, +) (*bridge.ClusterApiResource, error) { + // look for cluster + var desiredCluster *bridge.ClusterApiResource + clusterFound := false + for _, cluster := range tbc.Clusters { + if cluster.ID == id { + desiredCluster = cluster + clusterFound = true + } + } + if !clusterFound { + return nil, errors.New("cluster not found") + } + + // happy path + if apiKey == tbc.ApiKey { + desiredCluster.ClusterName = clusterRequestPayload.Name + desiredCluster.IsProtected = clusterRequestPayload.IsProtected + return desiredCluster, nil + } + // sad path + return nil, errors.New("boom") +} + +func (tbc *TestBridgeClient) CreateCluster(ctx context.Context, apiKey string, + clusterRequestPayload *bridge.PostClustersRequestPayload) (*bridge.ClusterApiResource, error) { + + if apiKey == tbc.ApiKey && clusterRequestPayload.Team == tbc.TeamId && clusterRequestPayload.Name != "" && + clusterRequestPayload.Plan != "" { + cluster := &bridge.ClusterApiResource{ + ID: fmt.Sprint(len(tbc.Clusters)), + Host: "example.com", + IsHA: initialize.Bool(clusterRequestPayload.IsHA), + MajorVersion: clusterRequestPayload.PostgresVersion.IntValue(), + ClusterName: clusterRequestPayload.Name, + Plan: clusterRequestPayload.Plan, + Provider: clusterRequestPayload.Provider, + Region: clusterRequestPayload.Region, + Storage: clusterRequestPayload.Storage, + } + tbc.Clusters = append(tbc.Clusters, cluster) + + return cluster, nil + } + + return nil, errors.New("boom") +} + +func (tbc *TestBridgeClient) GetCluster(ctx context.Context, apiKey, id string) (*bridge.ClusterApiResource, error) { + + if apiKey == tbc.ApiKey { + for _, cluster := range tbc.Clusters { + if cluster.ID == id { + return cluster, nil + } + } + } + + return nil, errors.New("boom") +} + +func (tbc *TestBridgeClient) GetClusterStatus(ctx context.Context, apiKey, id string) (*bridge.ClusterStatusApiResource, error) { + + if apiKey == tbc.ApiKey { + return tbc.ClusterStatuses[id], nil + } + + return nil, errors.New("boom") +} + +func (tbc *TestBridgeClient) GetClusterUpgrade(ctx context.Context, apiKey, id string) (*bridge.ClusterUpgradeApiResource, error) { + + if apiKey == tbc.ApiKey { + return tbc.ClusterUpgrades[id], nil + } + + return nil, errors.New("boom") +} + +func (tbc *TestBridgeClient) GetClusterRole(ctx context.Context, apiKey, clusterId, roleName string) (*bridge.ClusterRoleApiResource, error) { + + if apiKey == tbc.ApiKey { + for _, clusterRole := range tbc.ClusterRoles { + if clusterRole.ClusterId == clusterId && clusterRole.Name == roleName { + return clusterRole, nil + } + } + } + + return nil, errors.New("boom") +} + +func (tbc *TestBridgeClient) DeleteCluster(ctx context.Context, apiKey, clusterId string) (*bridge.ClusterApiResource, bool, error) { + alreadyDeleted := true + var cluster *bridge.ClusterApiResource + + if apiKey == tbc.ApiKey { + for i := len(tbc.Clusters) - 1; i >= 0; i-- { + if tbc.Clusters[i].ID == clusterId { + cluster = tbc.Clusters[i] + alreadyDeleted = false + tbc.Clusters = append(tbc.Clusters[:i], tbc.Clusters[i+1:]...) + return cluster, alreadyDeleted, nil + } + } + } else { + return nil, alreadyDeleted, errors.New("boom") + } + + return nil, alreadyDeleted, nil +} diff --git a/internal/bridge/crunchybridgecluster/postgres.go b/internal/bridge/crunchybridgecluster/postgres.go new file mode 100644 index 0000000000..024631de67 --- /dev/null +++ b/internal/bridge/crunchybridgecluster/postgres.go @@ -0,0 +1,164 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package crunchybridgecluster + +import ( + "context" + "fmt" + + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/crunchydata/postgres-operator/internal/bridge" + "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +// generatePostgresRoleSecret returns a Secret containing a password and +// connection details for the appropriate database. +func (r *CrunchyBridgeClusterReconciler) generatePostgresRoleSecret( + cluster *v1beta1.CrunchyBridgeCluster, roleSpec *v1beta1.CrunchyBridgeClusterRoleSpec, + clusterRole *bridge.ClusterRoleApiResource, +) (*corev1.Secret, error) { + roleName := roleSpec.Name + secretName := roleSpec.SecretName + intent := &corev1.Secret{ObjectMeta: metav1.ObjectMeta{ + Namespace: cluster.Namespace, + Name: secretName, + }} + intent.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("Secret")) + intent.StringData = map[string]string{ + "name": clusterRole.Name, + "password": clusterRole.Password, + "uri": clusterRole.URI, + } + + intent.Annotations = cluster.Spec.Metadata.GetAnnotationsOrNil() + intent.Labels = naming.Merge( + cluster.Spec.Metadata.GetLabelsOrNil(), + map[string]string{ + naming.LabelCluster: cluster.Name, + naming.LabelRole: naming.RoleCrunchyBridgeClusterPostgresRole, + naming.LabelCrunchyBridgeClusterPostgresRole: roleName, + }) + + err := errors.WithStack(r.setControllerReference(cluster, intent)) + + return intent, err +} + +// reconcilePostgresRoles writes the objects necessary to manage roles and their +// passwords in PostgreSQL. +func (r *CrunchyBridgeClusterReconciler) reconcilePostgresRoles( + ctx context.Context, apiKey string, cluster *v1beta1.CrunchyBridgeCluster, +) error { + _, _, err := r.reconcilePostgresRoleSecrets(ctx, apiKey, cluster) + + // TODO: If we ever add a PgAdmin feature to CrunchyBridgeCluster, we will + // want to add the role credentials to PgAdmin here + + return err +} + +func (r *CrunchyBridgeClusterReconciler) reconcilePostgresRoleSecrets( + ctx context.Context, apiKey string, cluster *v1beta1.CrunchyBridgeCluster, +) ( + []*v1beta1.CrunchyBridgeClusterRoleSpec, map[string]*corev1.Secret, error, +) { + log := ctrl.LoggerFrom(ctx) + specRoles := cluster.Spec.Roles + + // Index role specifications by PostgreSQL role name and make sure that none of the + // secretNames are identical in the spec + secretNames := make(map[string]bool) + roleSpecs := make(map[string]*v1beta1.CrunchyBridgeClusterRoleSpec, len(specRoles)) + for i := range specRoles { + if secretNames[specRoles[i].SecretName] { + // Duplicate secretName found, return early with error + err := errors.New("Two or more of the Roles in the CrunchyBridgeCluster spec " + + "have the same SecretName. Role SecretNames must be unique.") + return nil, nil, err + } + secretNames[specRoles[i].SecretName] = true + + roleSpecs[specRoles[i].Name] = specRoles[i] + } + + // Make sure that this cluster's role secret names are not being used by any other + // secrets in the namespace + allSecretsInNamespace := &corev1.SecretList{} + err := errors.WithStack(r.Client.List(ctx, allSecretsInNamespace, client.InNamespace(cluster.Namespace))) + if err != nil { + return nil, nil, err + } + for _, secret := range allSecretsInNamespace.Items { + if secretNames[secret.Name] { + existingSecretLabels := secret.GetLabels() + if existingSecretLabels[naming.LabelCluster] != cluster.Name || + existingSecretLabels[naming.LabelRole] != naming.RoleCrunchyBridgeClusterPostgresRole { + err = errors.New( + fmt.Sprintf("There is already an existing Secret in this namespace with the name %v. "+ + "Please choose a different name for this role's Secret.", secret.Name), + ) + return nil, nil, err + } + } + } + + // Gather existing role secrets + secrets := &corev1.SecretList{} + selector, err := naming.AsSelector(naming.CrunchyBridgeClusterPostgresRoles(cluster.Name)) + if err == nil { + err = errors.WithStack( + r.Client.List(ctx, secrets, + client.InNamespace(cluster.Namespace), + client.MatchingLabelsSelector{Selector: selector}, + )) + } + + // Index secrets by PostgreSQL role name and delete any that are not in the + // cluster spec. + roleSecrets := make(map[string]*corev1.Secret, len(secrets.Items)) + if err == nil { + for i := range secrets.Items { + secret := &secrets.Items[i] + secretRoleName := secret.Labels[naming.LabelCrunchyBridgeClusterPostgresRole] + + roleSpec, specified := roleSpecs[secretRoleName] + if specified && roleSpec.SecretName == secret.Name { + roleSecrets[secretRoleName] = secret + } else if err == nil { + err = errors.WithStack(r.deleteControlled(ctx, cluster, secret)) + } + } + } + + // Reconcile each PostgreSQL role in the cluster spec. + for roleName, role := range roleSpecs { + // Get ClusterRole from Bridge API + clusterRole, err := r.NewClient().GetClusterRole(ctx, apiKey, cluster.Status.ID, roleName) + // If issue with getting ClusterRole, log error and move on to next role + if err != nil { + // TODO (dsessler7): Emit event here? + log.Error(err, "issue retrieving cluster role from Bridge") + continue + } + if err == nil { + roleSecrets[roleName], err = r.generatePostgresRoleSecret(cluster, role, clusterRole) + } + if err == nil { + err = errors.WithStack(r.apply(ctx, roleSecrets[roleName])) + } + if err != nil { + log.Error(err, "Issue creating role secret.") + } + } + + return specRoles, roleSecrets, err +} diff --git a/internal/bridge/crunchybridgecluster/postgres_test.go b/internal/bridge/crunchybridgecluster/postgres_test.go new file mode 100644 index 0000000000..66add7b789 --- /dev/null +++ b/internal/bridge/crunchybridgecluster/postgres_test.go @@ -0,0 +1,239 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package crunchybridgecluster + +import ( + "context" + "testing" + + "sigs.k8s.io/controller-runtime/pkg/client" + + "gotest.tools/v3/assert" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/crunchydata/postgres-operator/internal/bridge" + "github.com/crunchydata/postgres-operator/internal/testing/require" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +func TestGeneratePostgresRoleSecret(t *testing.T) { + tClient := setupKubernetes(t) + require.ParallelCapacity(t, 0) + + reconciler := &CrunchyBridgeClusterReconciler{ + Client: tClient, + Owner: "crunchybridgecluster-controller", + } + + cluster := testCluster() + cluster.Namespace = setupNamespace(t, tClient).Name + + spec := &v1beta1.CrunchyBridgeClusterRoleSpec{ + Name: "application", + SecretName: "application-role-secret", + } + role := &bridge.ClusterRoleApiResource{ + Name: "application", + Password: "password", + URI: "postgres://application:password@example.com:5432/postgres", + } + t.Run("ObjectMeta", func(t *testing.T) { + secret, err := reconciler.generatePostgresRoleSecret(cluster, spec, role) + assert.NilError(t, err) + + if assert.Check(t, secret != nil) { + assert.Equal(t, secret.Namespace, cluster.Namespace) + assert.Assert(t, metav1.IsControlledBy(secret, cluster)) + assert.DeepEqual(t, secret.Labels, map[string]string{ + "postgres-operator.crunchydata.com/cluster": "hippo-cr", + "postgres-operator.crunchydata.com/role": "cbc-pgrole", + "postgres-operator.crunchydata.com/cbc-pgrole": "application", + }) + } + }) + + t.Run("Data", func(t *testing.T) { + secret, err := reconciler.generatePostgresRoleSecret(cluster, spec, role) + assert.NilError(t, err) + + if assert.Check(t, secret != nil) { + assert.Equal(t, secret.StringData["name"], "application") + assert.Equal(t, secret.StringData["password"], "password") + assert.Equal(t, secret.StringData["uri"], + "postgres://application:password@example.com:5432/postgres") + } + }) +} + +func TestReconcilePostgresRoleSecrets(t *testing.T) { + ctx := context.Background() + tClient := setupKubernetes(t) + require.ParallelCapacity(t, 0) + + apiKey := "9012" + ns := setupNamespace(t, tClient).Name + + reconciler := &CrunchyBridgeClusterReconciler{ + Client: tClient, + Owner: "crunchybridgecluster-controller", + } + + t.Run("DuplicateSecretNameInSpec", func(t *testing.T) { + cluster := testCluster() + cluster.Namespace = ns + + spec1 := &v1beta1.CrunchyBridgeClusterRoleSpec{ + Name: "application", + SecretName: "role-secret", + } + spec2 := &v1beta1.CrunchyBridgeClusterRoleSpec{ + Name: "postgres", + SecretName: "role-secret", + } + cluster.Spec.Roles = append(cluster.Spec.Roles, spec1, spec2) + + roleSpecSlice, secretMap, err := reconciler.reconcilePostgresRoleSecrets(ctx, apiKey, cluster) + assert.Check(t, roleSpecSlice == nil) + assert.Check(t, secretMap == nil) + assert.ErrorContains(t, err, "Two or more of the Roles in the CrunchyBridgeCluster spec have "+ + "the same SecretName. Role SecretNames must be unique.", "expected duplicate secret name error") + }) + + t.Run("DuplicateSecretNameInNamespace", func(t *testing.T) { + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "role-secret", + Namespace: ns, + }, + StringData: map[string]string{ + "path": "stuff", + }, + } + assert.NilError(t, tClient.Create(ctx, secret)) + + cluster := testCluster() + cluster.Namespace = ns + + spec1 := &v1beta1.CrunchyBridgeClusterRoleSpec{ + Name: "application", + SecretName: "role-secret", + } + + cluster.Spec.Roles = append(cluster.Spec.Roles, spec1) + + roleSpecSlice, secretMap, err := reconciler.reconcilePostgresRoleSecrets(ctx, apiKey, cluster) + assert.Check(t, roleSpecSlice == nil) + assert.Check(t, secretMap == nil) + assert.ErrorContains(t, err, "There is already an existing Secret in this namespace with the name role-secret. "+ + "Please choose a different name for this role's Secret.", "expected duplicate secret name error") + }) + + t.Run("UnusedSecretsGetRemoved", func(t *testing.T) { + applicationRoleInBridge := testClusterRoleApiResource() + postgresRoleInBridge := testClusterRoleApiResource() + postgresRoleInBridge.Name = "postgres" + postgresRoleInBridge.Password = "postgres-password" + reconciler.NewClient = func() bridge.ClientInterface { + return &TestBridgeClient{ + ApiKey: apiKey, + TeamId: "5678", + ClusterRoles: []*bridge.ClusterRoleApiResource{applicationRoleInBridge, postgresRoleInBridge}, + } + } + + applicationSpec := &v1beta1.CrunchyBridgeClusterRoleSpec{ + Name: "application", + SecretName: "application-role-secret", + } + postgresSpec := &v1beta1.CrunchyBridgeClusterRoleSpec{ + Name: "postgres", + SecretName: "postgres-role-secret", + } + + cluster := testCluster() + cluster.Namespace = ns + cluster.Status.ID = "1234" + // Add one role to cluster spec + cluster.Spec.Roles = append(cluster.Spec.Roles, applicationSpec) + assert.NilError(t, tClient.Create(ctx, cluster)) + + applicationRole := &bridge.ClusterRoleApiResource{ + Name: "application", + Password: "application-password", + URI: "connection-string", + } + postgresRole := &bridge.ClusterRoleApiResource{ + Name: "postgres", + Password: "postgres-password", + URI: "connection-string", + } + + // Generate secrets + applicationSecret, err := reconciler.generatePostgresRoleSecret(cluster, applicationSpec, applicationRole) + assert.NilError(t, err) + postgresSecret, err := reconciler.generatePostgresRoleSecret(cluster, postgresSpec, postgresRole) + assert.NilError(t, err) + + // Create secrets in k8s + assert.NilError(t, tClient.Create(ctx, applicationSecret)) + assert.NilError(t, tClient.Create(ctx, postgresSecret)) + + roleSpecSlice, secretMap, err := reconciler.reconcilePostgresRoleSecrets(ctx, apiKey, cluster) + assert.Check(t, roleSpecSlice != nil) + assert.Check(t, secretMap != nil) + assert.NilError(t, err) + + // Assert that postgresSecret was deleted since its associated role is not in the spec + err = tClient.Get(ctx, client.ObjectKeyFromObject(postgresSecret), postgresSecret) + assert.Assert(t, apierrors.IsNotFound(err), "expected NotFound, got %#v", err) + + // Assert that applicationSecret is still there + err = tClient.Get(ctx, client.ObjectKeyFromObject(applicationSecret), applicationSecret) + assert.NilError(t, err) + }) + + t.Run("SecretsGetUpdated", func(t *testing.T) { + clusterRoleInBridge := testClusterRoleApiResource() + clusterRoleInBridge.Password = "different-password" + reconciler.NewClient = func() bridge.ClientInterface { + return &TestBridgeClient{ + ApiKey: apiKey, + TeamId: "5678", + ClusterRoles: []*bridge.ClusterRoleApiResource{clusterRoleInBridge}, + } + } + + cluster := testCluster() + cluster.Namespace = ns + err := tClient.Get(ctx, client.ObjectKeyFromObject(cluster), cluster) + assert.NilError(t, err) + cluster.Status.ID = "1234" + + spec1 := &v1beta1.CrunchyBridgeClusterRoleSpec{ + Name: "application", + SecretName: "application-role-secret", + } + role1 := &bridge.ClusterRoleApiResource{ + Name: "application", + Password: "test", + URI: "connection-string", + } + // Generate secret + secret1, err := reconciler.generatePostgresRoleSecret(cluster, spec1, role1) + assert.NilError(t, err) + + roleSpecSlice, secretMap, err := reconciler.reconcilePostgresRoleSecrets(ctx, apiKey, cluster) + assert.Check(t, roleSpecSlice != nil) + assert.Check(t, secretMap != nil) + assert.NilError(t, err) + + // Assert that secret1 was updated + err = tClient.Get(ctx, client.ObjectKeyFromObject(secret1), secret1) + assert.NilError(t, err) + assert.Equal(t, string(secret1.Data["password"]), "different-password") + }) +} diff --git a/internal/bridge/crunchybridgecluster/watches.go b/internal/bridge/crunchybridgecluster/watches.go new file mode 100644 index 0000000000..79687b3476 --- /dev/null +++ b/internal/bridge/crunchybridgecluster/watches.go @@ -0,0 +1,103 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package crunchybridgecluster + +import ( + "context" + + "k8s.io/client-go/util/workqueue" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +// watchForRelatedSecret handles create/update/delete events for secrets, +// passing the Secret ObjectKey to findCrunchyBridgeClustersForSecret +func (r *CrunchyBridgeClusterReconciler) watchForRelatedSecret() handler.EventHandler { + handle := func(ctx context.Context, secret client.Object, q workqueue.RateLimitingInterface) { + key := client.ObjectKeyFromObject(secret) + + for _, cluster := range r.findCrunchyBridgeClustersForSecret(ctx, key) { + q.Add(ctrl.Request{ + NamespacedName: client.ObjectKeyFromObject(cluster), + }) + } + } + + return handler.Funcs{ + CreateFunc: func(ctx context.Context, e event.CreateEvent, q workqueue.RateLimitingInterface) { + handle(ctx, e.Object, q) + }, + UpdateFunc: func(ctx context.Context, e event.UpdateEvent, q workqueue.RateLimitingInterface) { + handle(ctx, e.ObjectNew, q) + }, + // If the secret is deleted, we want to reconcile + // in order to emit an event/status about this problem. + // We will also emit a matching event/status about this problem + // when we reconcile the cluster and can't find the secret. + // That way, users will get two alerts: one when the secret is deleted + // and another when the cluster is being reconciled. + DeleteFunc: func(ctx context.Context, e event.DeleteEvent, q workqueue.RateLimitingInterface) { + handle(ctx, e.Object, q) + }, + } +} + +//+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="crunchybridgeclusters",verbs={list} + +// findCrunchyBridgeClustersForSecret returns CrunchyBridgeClusters +// that are connected to the Secret +func (r *CrunchyBridgeClusterReconciler) findCrunchyBridgeClustersForSecret( + ctx context.Context, secret client.ObjectKey, +) []*v1beta1.CrunchyBridgeCluster { + var matching []*v1beta1.CrunchyBridgeCluster + var clusters v1beta1.CrunchyBridgeClusterList + + // NOTE: If this becomes slow due to a large number of CrunchyBridgeClusters in a single + // namespace, we can configure the [ctrl.Manager] field indexer and pass a + // [fields.Selector] here. + // - https://book.kubebuilder.io/reference/watching-resources/externally-managed.html + if err := r.List(ctx, &clusters, &client.ListOptions{ + Namespace: secret.Namespace, + }); err == nil { + for i := range clusters.Items { + if clusters.Items[i].Spec.Secret == secret.Name { + matching = append(matching, &clusters.Items[i]) + } + } + } + return matching +} + +//+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="crunchybridgeclusters",verbs={list} + +// Watch enqueues all existing CrunchyBridgeClusters for reconciles. +func (r *CrunchyBridgeClusterReconciler) Watch() handler.EventHandler { + return handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, _ client.Object) []reconcile.Request { + log := ctrl.LoggerFrom(ctx) + + crunchyBridgeClusterList := &v1beta1.CrunchyBridgeClusterList{} + if err := r.List(ctx, crunchyBridgeClusterList); err != nil { + log.Error(err, "Error listing CrunchyBridgeClusters.") + } + + reconcileRequests := []reconcile.Request{} + for index := range crunchyBridgeClusterList.Items { + reconcileRequests = append(reconcileRequests, + reconcile.Request{ + NamespacedName: client.ObjectKeyFromObject( + &crunchyBridgeClusterList.Items[index], + ), + }, + ) + } + + return reconcileRequests + }) +} diff --git a/internal/bridge/crunchybridgecluster/watches_test.go b/internal/bridge/crunchybridgecluster/watches_test.go new file mode 100644 index 0000000000..48dba2ba14 --- /dev/null +++ b/internal/bridge/crunchybridgecluster/watches_test.go @@ -0,0 +1,84 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package crunchybridgecluster + +import ( + "context" + "testing" + + "gotest.tools/v3/assert" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/crunchydata/postgres-operator/internal/testing/require" +) + +func TestFindCrunchyBridgeClustersForSecret(t *testing.T) { + ctx := context.Background() + tClient := setupKubernetes(t) + require.ParallelCapacity(t, 0) + + ns := setupNamespace(t, tClient) + reconciler := &CrunchyBridgeClusterReconciler{Client: tClient} + + secret := &corev1.Secret{} + secret.Namespace = ns.Name + secret.Name = "crunchy-bridge-api-key" + + assert.NilError(t, tClient.Create(ctx, secret)) + secretObjectKey := client.ObjectKeyFromObject(secret) + + t.Run("NoClusters", func(t *testing.T) { + clusters := reconciler.findCrunchyBridgeClustersForSecret(ctx, secretObjectKey) + + assert.Equal(t, len(clusters), 0) + }) + + t.Run("OneCluster", func(t *testing.T) { + cluster1 := testCluster() + cluster1.Namespace = ns.Name + cluster1.Name = "first-cluster" + assert.NilError(t, tClient.Create(ctx, cluster1)) + + clusters := reconciler.findCrunchyBridgeClustersForSecret(ctx, secretObjectKey) + + assert.Equal(t, len(clusters), 1) + assert.Equal(t, clusters[0].Name, "first-cluster") + }) + + t.Run("TwoClusters", func(t *testing.T) { + cluster2 := testCluster() + cluster2.Namespace = ns.Name + cluster2.Name = "second-cluster" + assert.NilError(t, tClient.Create(ctx, cluster2)) + clusters := reconciler.findCrunchyBridgeClustersForSecret(ctx, secretObjectKey) + + assert.Equal(t, len(clusters), 2) + clusterCount := map[string]int{} + for _, cluster := range clusters { + clusterCount[cluster.Name] += 1 + } + assert.Equal(t, clusterCount["first-cluster"], 1) + assert.Equal(t, clusterCount["second-cluster"], 1) + }) + + t.Run("ClusterWithDifferentSecretNameNotIncluded", func(t *testing.T) { + cluster3 := testCluster() + cluster3.Namespace = ns.Name + cluster3.Name = "third-cluster" + cluster3.Spec.Secret = "different-secret-name" + assert.NilError(t, tClient.Create(ctx, cluster3)) + clusters := reconciler.findCrunchyBridgeClustersForSecret(ctx, secretObjectKey) + + assert.Equal(t, len(clusters), 2) + clusterCount := map[string]int{} + for _, cluster := range clusters { + clusterCount[cluster.Name] += 1 + } + assert.Equal(t, clusterCount["first-cluster"], 1) + assert.Equal(t, clusterCount["second-cluster"], 1) + assert.Equal(t, clusterCount["third-cluster"], 0) + }) +} diff --git a/internal/bridge/installation.go b/internal/bridge/installation.go index 33ca9e6ca5..c76a073348 100644 --- a/internal/bridge/installation.go +++ b/internal/bridge/installation.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package bridge @@ -61,7 +50,7 @@ type Installation struct { type InstallationReconciler struct { Owner client.FieldOwner Reader interface { - Get(context.Context, client.ObjectKey, client.Object) error + Get(context.Context, client.ObjectKey, client.Object, ...client.GetOption) error } Writer interface { Patch(context.Context, client.Object, client.Patch, ...client.PatchOption) error @@ -102,11 +91,14 @@ func ManagedInstallationReconciler(m manager.Manager, newClient func() *Client) )). // // Wake periodically even when that Secret does not exist. - Watches( - runtime.NewTickerImmediate(time.Hour, event.GenericEvent{}), - handler.EnqueueRequestsFromMapFunc(func(client.Object) []reconcile.Request { - return []reconcile.Request{{NamespacedName: reconciler.SecretRef}} - }), + WatchesRawSource( + runtime.NewTickerImmediate(time.Hour, event.GenericEvent{}, + handler.EnqueueRequestsFromMapFunc( + func(context.Context, client.Object) []reconcile.Request { + return []reconcile.Request{{NamespacedName: reconciler.SecretRef}} + }, + ), + ), ). // Complete(reconciler) @@ -128,13 +120,15 @@ func (r *InstallationReconciler) Reconcile( result.RequeueAfter, err = r.reconcile(ctx, secret) } - // TODO: Check for corev1.NamespaceTerminatingCause after - // k8s.io/apimachinery@v0.25; see https://issue.k8s.io/108528. + // Nothing can be written to a deleted namespace. + if err != nil && apierrors.HasStatusCause(err, corev1.NamespaceTerminatingCause) { + return runtime.ErrorWithoutBackoff(err) + } // Write conflicts are returned as errors; log and retry with backoff. if err != nil && apierrors.IsConflict(err) { logging.FromContext(ctx).Info("Requeue", "reason", err) - err, result.Requeue, result.RequeueAfter = nil, true, 0 + return runtime.RequeueWithBackoff(), nil } return result, err diff --git a/internal/bridge/installation_test.go b/internal/bridge/installation_test.go index 100a1842f9..96223a2233 100644 --- a/internal/bridge/installation_test.go +++ b/internal/bridge/installation_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package bridge diff --git a/internal/bridge/naming.go b/internal/bridge/naming.go index b42bc4331f..cabe8e9cf6 100644 --- a/internal/bridge/naming.go +++ b/internal/bridge/naming.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package bridge diff --git a/internal/bridge/quantity.go b/internal/bridge/quantity.go new file mode 100644 index 0000000000..a948c6b4cf --- /dev/null +++ b/internal/bridge/quantity.go @@ -0,0 +1,44 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package bridge + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/resource" +) + +func FromCPU(n int64) *resource.Quantity { + // Assume the Bridge API returns numbers that can be parsed by the + // [resource] package. + if q, err := resource.ParseQuantity(fmt.Sprint(n)); err == nil { + return &q + } + + return resource.NewQuantity(0, resource.DecimalSI) +} + +// FromGibibytes returns n gibibytes as a [resource.Quantity]. +func FromGibibytes(n int64) *resource.Quantity { + // Assume the Bridge API returns numbers that can be parsed by the + // [resource] package. + if q, err := resource.ParseQuantity(fmt.Sprint(n) + "Gi"); err == nil { + return &q + } + + return resource.NewQuantity(0, resource.BinarySI) +} + +// ToGibibytes returns q rounded up to a non-negative gibibyte. +func ToGibibytes(q resource.Quantity) int64 { + v := q.Value() + + if v <= 0 { + return 0 + } + + // https://stackoverflow.com/a/2745086 + return 1 + ((v - 1) >> 30) +} diff --git a/internal/bridge/quantity_test.go b/internal/bridge/quantity_test.go new file mode 100644 index 0000000000..7cfebb4a86 --- /dev/null +++ b/internal/bridge/quantity_test.go @@ -0,0 +1,59 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package bridge + +import ( + "testing" + + "gotest.tools/v3/assert" + "k8s.io/apimachinery/pkg/api/resource" +) + +func TestFromCPU(t *testing.T) { + zero := FromCPU(0) + assert.Assert(t, zero.IsZero()) + assert.Equal(t, zero.String(), "0") + + one := FromCPU(1) + assert.Equal(t, one.String(), "1") + + negative := FromCPU(-2) + assert.Equal(t, negative.String(), "-2") +} + +func TestFromGibibytes(t *testing.T) { + zero := FromGibibytes(0) + assert.Assert(t, zero.IsZero()) + assert.Equal(t, zero.String(), "0") + + one := FromGibibytes(1) + assert.Equal(t, one.String(), "1Gi") + + negative := FromGibibytes(-2) + assert.Equal(t, negative.String(), "-2Gi") +} + +func TestToGibibytes(t *testing.T) { + zero := resource.MustParse("0") + assert.Equal(t, ToGibibytes(zero), int64(0)) + + // Negative quantities become zero. + negative := resource.MustParse("-4G") + assert.Equal(t, ToGibibytes(negative), int64(0)) + + // Decimal quantities round up. + decimal := resource.MustParse("9000M") + assert.Equal(t, ToGibibytes(decimal), int64(9)) + + // Binary quantities round up. + binary := resource.MustParse("8000Mi") + assert.Equal(t, ToGibibytes(binary), int64(8)) + + fourGi := resource.MustParse("4096Mi") + assert.Equal(t, ToGibibytes(fourGi), int64(4)) + + moreThanFourGi := resource.MustParse("4097Mi") + assert.Equal(t, ToGibibytes(moreThanFourGi), int64(5)) +} diff --git a/internal/config/config.go b/internal/config/config.go index a5a874eee5..e3f9ced215 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package config @@ -30,22 +19,30 @@ func defaultFromEnv(value, key string) string { return value } -func RegistrationRequired() bool { - return os.Getenv("REGISTRATION_REQUIRED") == "true" -} - -// Get the version of CPK that applied the first RegistrationRequired status to this cluster. -func RegistrationRequiredBy(cluster *v1beta1.PostgresCluster) string { - if cluster.Status.RegistrationRequired == nil { - return "" +// FetchKeyCommand returns the fetch_key_cmd value stored in the encryption_key_command +// variable used to enable TDE. +func FetchKeyCommand(spec *v1beta1.PostgresClusterSpec) string { + if spec.Patroni != nil { + if spec.Patroni.DynamicConfiguration != nil { + configuration := spec.Patroni.DynamicConfiguration + if configuration != nil { + if postgresql, ok := configuration["postgresql"].(map[string]any); ok { + if parameters, ok := postgresql["parameters"].(map[string]any); ok { + if parameters["encryption_key_command"] != nil { + return fmt.Sprintf("%s", parameters["encryption_key_command"]) + } + } + } + } + } } - return cluster.Status.RegistrationRequired.PGOVersion + return "" } // Red Hat Marketplace requires operators to use environment variables be used // for any image other than the operator itself. Those variables must start with // "RELATED_IMAGE_" so that OSBS can transform their tag values into digests -// for a "disconncted" OLM CSV. +// for a "disconnected" OLM CSV. // - https://redhat-connect.gitbook.io/certified-operator-guide/troubleshooting-and-resources/offline-enabled-operators // - https://osbs.readthedocs.io/en/latest/users.html#pullspec-locations diff --git a/internal/config/config_test.go b/internal/config/config_test.go index 5444a509bd..7b8ca2f863 100644 --- a/internal/config/config_test.go +++ b/internal/config/config_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package config @@ -25,40 +14,82 @@ import ( "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) -func saveEnv(t testing.TB, key string) { - t.Helper() - previous, ok := os.LookupEnv(key) - t.Cleanup(func() { - if ok { - os.Setenv(key, previous) - } else { - os.Unsetenv(key) - } - }) -} +func TestFetchKeyCommand(t *testing.T) { -func setEnv(t testing.TB, key, value string) { - t.Helper() - saveEnv(t, key) - assert.NilError(t, os.Setenv(key, value)) -} + spec1 := v1beta1.PostgresClusterSpec{} + assert.Assert(t, FetchKeyCommand(&spec1) == "") + + spec2 := v1beta1.PostgresClusterSpec{ + Patroni: &v1beta1.PatroniSpec{}, + } + assert.Assert(t, FetchKeyCommand(&spec2) == "") + + spec3 := v1beta1.PostgresClusterSpec{ + Patroni: &v1beta1.PatroniSpec{ + DynamicConfiguration: map[string]any{}, + }, + } + assert.Assert(t, FetchKeyCommand(&spec3) == "") + + spec4 := v1beta1.PostgresClusterSpec{ + Patroni: &v1beta1.PatroniSpec{ + DynamicConfiguration: map[string]any{ + "postgresql": map[string]any{}, + }, + }, + } + assert.Assert(t, FetchKeyCommand(&spec4) == "") + + spec5 := v1beta1.PostgresClusterSpec{ + Patroni: &v1beta1.PatroniSpec{ + DynamicConfiguration: map[string]any{ + "postgresql": map[string]any{ + "parameters": map[string]any{}, + }, + }, + }, + } + assert.Assert(t, FetchKeyCommand(&spec5) == "") + + spec6 := v1beta1.PostgresClusterSpec{ + Patroni: &v1beta1.PatroniSpec{ + DynamicConfiguration: map[string]any{ + "postgresql": map[string]any{ + "parameters": map[string]any{ + "encryption_key_command": "", + }, + }, + }, + }, + } + assert.Assert(t, FetchKeyCommand(&spec6) == "") + + spec7 := v1beta1.PostgresClusterSpec{ + Patroni: &v1beta1.PatroniSpec{ + DynamicConfiguration: map[string]any{ + "postgresql": map[string]any{ + "parameters": map[string]any{ + "encryption_key_command": "echo mykey", + }, + }, + }, + }, + } + assert.Assert(t, FetchKeyCommand(&spec7) == "echo mykey") -func unsetEnv(t testing.TB, key string) { - t.Helper() - saveEnv(t, key) - assert.NilError(t, os.Unsetenv(key)) } func TestPGAdminContainerImage(t *testing.T) { cluster := &v1beta1.PostgresCluster{} - unsetEnv(t, "RELATED_IMAGE_PGADMIN") + t.Setenv("RELATED_IMAGE_PGADMIN", "") + os.Unsetenv("RELATED_IMAGE_PGADMIN") assert.Equal(t, PGAdminContainerImage(cluster), "") - setEnv(t, "RELATED_IMAGE_PGADMIN", "") + t.Setenv("RELATED_IMAGE_PGADMIN", "") assert.Equal(t, PGAdminContainerImage(cluster), "") - setEnv(t, "RELATED_IMAGE_PGADMIN", "env-var-pgadmin") + t.Setenv("RELATED_IMAGE_PGADMIN", "env-var-pgadmin") assert.Equal(t, PGAdminContainerImage(cluster), "env-var-pgadmin") assert.NilError(t, yaml.Unmarshal([]byte(`{ @@ -70,13 +101,14 @@ func TestPGAdminContainerImage(t *testing.T) { func TestPGBackRestContainerImage(t *testing.T) { cluster := &v1beta1.PostgresCluster{} - unsetEnv(t, "RELATED_IMAGE_PGBACKREST") + t.Setenv("RELATED_IMAGE_PGBACKREST", "") + os.Unsetenv("RELATED_IMAGE_PGBACKREST") assert.Equal(t, PGBackRestContainerImage(cluster), "") - setEnv(t, "RELATED_IMAGE_PGBACKREST", "") + t.Setenv("RELATED_IMAGE_PGBACKREST", "") assert.Equal(t, PGBackRestContainerImage(cluster), "") - setEnv(t, "RELATED_IMAGE_PGBACKREST", "env-var-pgbackrest") + t.Setenv("RELATED_IMAGE_PGBACKREST", "env-var-pgbackrest") assert.Equal(t, PGBackRestContainerImage(cluster), "env-var-pgbackrest") assert.NilError(t, yaml.Unmarshal([]byte(`{ @@ -88,13 +120,14 @@ func TestPGBackRestContainerImage(t *testing.T) { func TestPGBouncerContainerImage(t *testing.T) { cluster := &v1beta1.PostgresCluster{} - unsetEnv(t, "RELATED_IMAGE_PGBOUNCER") + t.Setenv("RELATED_IMAGE_PGBOUNCER", "") + os.Unsetenv("RELATED_IMAGE_PGBOUNCER") assert.Equal(t, PGBouncerContainerImage(cluster), "") - setEnv(t, "RELATED_IMAGE_PGBOUNCER", "") + t.Setenv("RELATED_IMAGE_PGBOUNCER", "") assert.Equal(t, PGBouncerContainerImage(cluster), "") - setEnv(t, "RELATED_IMAGE_PGBOUNCER", "env-var-pgbouncer") + t.Setenv("RELATED_IMAGE_PGBOUNCER", "env-var-pgbouncer") assert.Equal(t, PGBouncerContainerImage(cluster), "env-var-pgbouncer") assert.NilError(t, yaml.Unmarshal([]byte(`{ @@ -106,13 +139,14 @@ func TestPGBouncerContainerImage(t *testing.T) { func TestPGExporterContainerImage(t *testing.T) { cluster := &v1beta1.PostgresCluster{} - unsetEnv(t, "RELATED_IMAGE_PGEXPORTER") + t.Setenv("RELATED_IMAGE_PGEXPORTER", "") + os.Unsetenv("RELATED_IMAGE_PGEXPORTER") assert.Equal(t, PGExporterContainerImage(cluster), "") - setEnv(t, "RELATED_IMAGE_PGEXPORTER", "") + t.Setenv("RELATED_IMAGE_PGEXPORTER", "") assert.Equal(t, PGExporterContainerImage(cluster), "") - setEnv(t, "RELATED_IMAGE_PGEXPORTER", "env-var-pgexporter") + t.Setenv("RELATED_IMAGE_PGEXPORTER", "env-var-pgexporter") assert.Equal(t, PGExporterContainerImage(cluster), "env-var-pgexporter") assert.NilError(t, yaml.Unmarshal([]byte(`{ @@ -124,13 +158,14 @@ func TestPGExporterContainerImage(t *testing.T) { func TestStandalonePGAdminContainerImage(t *testing.T) { pgadmin := &v1beta1.PGAdmin{} - unsetEnv(t, "RELATED_IMAGE_STANDALONE_PGADMIN") + t.Setenv("RELATED_IMAGE_STANDALONE_PGADMIN", "") + os.Unsetenv("RELATED_IMAGE_STANDALONE_PGADMIN") assert.Equal(t, StandalonePGAdminContainerImage(pgadmin), "") - setEnv(t, "RELATED_IMAGE_STANDALONE_PGADMIN", "") + t.Setenv("RELATED_IMAGE_STANDALONE_PGADMIN", "") assert.Equal(t, StandalonePGAdminContainerImage(pgadmin), "") - setEnv(t, "RELATED_IMAGE_STANDALONE_PGADMIN", "env-var-pgadmin") + t.Setenv("RELATED_IMAGE_STANDALONE_PGADMIN", "env-var-pgadmin") assert.Equal(t, StandalonePGAdminContainerImage(pgadmin), "env-var-pgadmin") assert.NilError(t, yaml.Unmarshal([]byte(`{ @@ -143,13 +178,14 @@ func TestPostgresContainerImage(t *testing.T) { cluster := &v1beta1.PostgresCluster{} cluster.Spec.PostgresVersion = 12 - unsetEnv(t, "RELATED_IMAGE_POSTGRES_12") + t.Setenv("RELATED_IMAGE_POSTGRES_12", "") + os.Unsetenv("RELATED_IMAGE_POSTGRES_12") assert.Equal(t, PostgresContainerImage(cluster), "") - setEnv(t, "RELATED_IMAGE_POSTGRES_12", "") + t.Setenv("RELATED_IMAGE_POSTGRES_12", "") assert.Equal(t, PostgresContainerImage(cluster), "") - setEnv(t, "RELATED_IMAGE_POSTGRES_12", "env-var-postgres") + t.Setenv("RELATED_IMAGE_POSTGRES_12", "env-var-postgres") assert.Equal(t, PostgresContainerImage(cluster), "env-var-postgres") cluster.Spec.Image = "spec-image" @@ -157,7 +193,7 @@ func TestPostgresContainerImage(t *testing.T) { cluster.Spec.Image = "" cluster.Spec.PostGISVersion = "3.0" - setEnv(t, "RELATED_IMAGE_POSTGRES_12_GIS_3.0", "env-var-postgis") + t.Setenv("RELATED_IMAGE_POSTGRES_12_GIS_3.0", "env-var-postgis") assert.Equal(t, PostgresContainerImage(cluster), "env-var-postgis") cluster.Spec.Image = "spec-image" @@ -168,7 +204,9 @@ func TestVerifyImageValues(t *testing.T) { cluster := &v1beta1.PostgresCluster{} verifyImageCheck := func(t *testing.T, envVar, errString string, cluster *v1beta1.PostgresCluster) { - unsetEnv(t, envVar) + + t.Setenv(envVar, "") + os.Unsetenv(envVar) err := VerifyImageValues(cluster) assert.ErrorContains(t, err, errString) } diff --git a/internal/controller/pgupgrade/apply.go b/internal/controller/pgupgrade/apply.go index dc2513e48c..71cf65cd4f 100644 --- a/internal/controller/pgupgrade/apply.go +++ b/internal/controller/pgupgrade/apply.go @@ -1,16 +1,6 @@ -// Copyright 2021 - 2023 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package pgupgrade diff --git a/internal/controller/pgupgrade/jobs.go b/internal/controller/pgupgrade/jobs.go index 7bfbf7e7ca..a1722dfc12 100644 --- a/internal/controller/pgupgrade/jobs.go +++ b/internal/controller/pgupgrade/jobs.go @@ -1,16 +1,6 @@ -// Copyright 2021 - 2023 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package pgupgrade @@ -42,10 +32,16 @@ func pgUpgradeJob(upgrade *v1beta1.PGUpgrade) metav1.ObjectMeta { // upgradeCommand returns an entrypoint that prepares the filesystem for // and performs a PostgreSQL major version upgrade using pg_upgrade. -func upgradeCommand(upgrade *v1beta1.PGUpgrade) []string { +func upgradeCommand(upgrade *v1beta1.PGUpgrade, fetchKeyCommand string) []string { oldVersion := fmt.Sprint(upgrade.Spec.FromPostgresVersion) newVersion := fmt.Sprint(upgrade.Spec.ToPostgresVersion) + // if the fetch key command is set for TDE, provide the value during initialization + initdb := `/usr/pgsql-"${new_version}"/bin/initdb -k -D /pgdata/pg"${new_version}"` + if fetchKeyCommand != "" { + initdb += ` --encryption-key-command "` + fetchKeyCommand + `"` + } + args := []string{oldVersion, newVersion} script := strings.Join([]string{ `declare -r data_volume='/pgdata' old_version="$1" new_version="$2"`, @@ -63,7 +59,7 @@ func upgradeCommand(upgrade *v1beta1.PGUpgrade) []string { `echo "postgres:x:${gid%% *}:") > "${NSS_WRAPPER_GROUP}"`, // Create a copy of the system user definitions, but remove the "postgres" - // user or any user with the currrent UID. Replace them with our own that + // user or any user with the current UID. Replace them with our own that // has the current UID and GID. `uid=$(id -u); NSS_WRAPPER_PASSWD=$(mktemp)`, `(sed "/^postgres:x:/ d; /^[^:]*:x:${uid}:/ d" /etc/passwd`, @@ -74,7 +70,7 @@ func upgradeCommand(upgrade *v1beta1.PGUpgrade) []string { `export LD_PRELOAD='libnss_wrapper.so' NSS_WRAPPER_GROUP NSS_WRAPPER_PASSWD`, // Below is the pg_upgrade script used to upgrade a PostgresCluster from - // one major verson to another. Additional information concerning the + // one major version to another. Additional information concerning the // steps used and command flag specifics can be found in the documentation: // - https://www.postgresql.org/docs/current/pgupgrade.html @@ -84,7 +80,7 @@ func upgradeCommand(upgrade *v1beta1.PGUpgrade) []string { `echo -e "Step 1: Making new pgdata directory...\n"`, `mkdir /pgdata/pg"${new_version}"`, `echo -e "Step 2: Initializing new pgdata directory...\n"`, - `/usr/pgsql-"${new_version}"/bin/initdb -k -D /pgdata/pg"${new_version}"`, + initdb, // Before running the upgrade check, which ensures the clusters are compatible, // proper permissions have to be set on the old pgdata directory and the @@ -124,7 +120,8 @@ func upgradeCommand(upgrade *v1beta1.PGUpgrade) []string { // generateUpgradeJob returns a Job that can upgrade the PostgreSQL data // directory of the startup instance. func (r *PGUpgradeReconciler) generateUpgradeJob( - _ context.Context, upgrade *v1beta1.PGUpgrade, startup *appsv1.StatefulSet, + _ context.Context, upgrade *v1beta1.PGUpgrade, + startup *appsv1.StatefulSet, fetchKeyCommand string, ) *batchv1.Job { job := &batchv1.Job{} job.SetGroupVersionKind(batchv1.SchemeGroupVersion.WithKind("Job")) @@ -177,7 +174,7 @@ func (r *PGUpgradeReconciler) generateUpgradeJob( VolumeMounts: database.VolumeMounts, // Use our upgrade command and the specified image and resources. - Command: upgradeCommand(upgrade), + Command: upgradeCommand(upgrade, fetchKeyCommand), Image: pgUpgradeContainerImage(upgrade), ImagePullPolicy: upgrade.Spec.ImagePullPolicy, Resources: upgrade.Spec.Resources, @@ -185,8 +182,8 @@ func (r *PGUpgradeReconciler) generateUpgradeJob( // The following will set these fields to null if not set in the spec job.Spec.Template.Spec.Affinity = upgrade.Spec.Affinity - job.Spec.Template.Spec.PriorityClassName = initialize.FromPointer( - upgrade.Spec.PriorityClassName) + job.Spec.Template.Spec.PriorityClassName = + initialize.FromPointer(upgrade.Spec.PriorityClassName) job.Spec.Template.Spec.Tolerations = upgrade.Spec.Tolerations r.setControllerReference(upgrade, job) @@ -295,8 +292,8 @@ func (r *PGUpgradeReconciler) generateRemoveDataJob( // The following will set these fields to null if not set in the spec job.Spec.Template.Spec.Affinity = upgrade.Spec.Affinity - job.Spec.Template.Spec.PriorityClassName = initialize.FromPointer( - upgrade.Spec.PriorityClassName) + job.Spec.Template.Spec.PriorityClassName = + initialize.FromPointer(upgrade.Spec.PriorityClassName) job.Spec.Template.Spec.Tolerations = upgrade.Spec.Tolerations r.setControllerReference(upgrade, job) diff --git a/internal/controller/pgupgrade/jobs_test.go b/internal/controller/pgupgrade/jobs_test.go index ee5664ed5f..8dfc4731a2 100644 --- a/internal/controller/pgupgrade/jobs_test.go +++ b/internal/controller/pgupgrade/jobs_test.go @@ -1,16 +1,6 @@ -// Copyright 2021 - 2023 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package pgupgrade @@ -21,25 +11,16 @@ import ( "testing" "gotest.tools/v3/assert" - "gotest.tools/v3/assert/cmp" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "sigs.k8s.io/yaml" "github.com/crunchydata/postgres-operator/internal/initialize" + "github.com/crunchydata/postgres-operator/internal/testing/cmp" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) -// marshalMatches converts actual to YAML and compares that to expected. -func marshalMatches(actual interface{}, expected string) cmp.Comparison { - b, err := yaml.Marshal(actual) - if err != nil { - return func() cmp.Result { return cmp.ResultFromError(err) } - } - return cmp.DeepEqual(string(b), strings.Trim(expected, "\t\n")+"\n") -} - func TestGenerateUpgradeJob(t *testing.T) { ctx := context.Background() reconciler := &PGUpgradeReconciler{} @@ -76,8 +57,8 @@ func TestGenerateUpgradeJob(t *testing.T) { }, } - job := reconciler.generateUpgradeJob(ctx, upgrade, startup) - assert.Assert(t, marshalMatches(job, ` + job := reconciler.generateUpgradeJob(ctx, upgrade, startup, "") + assert.Assert(t, cmp.MarshalMatches(job, ` apiVersion: batch/v1 kind: Job metadata: @@ -163,6 +144,11 @@ spec: name: vol2 status: {} `)) + + tdeJob := reconciler.generateUpgradeJob(ctx, upgrade, startup, "echo testKey") + b, _ := yaml.Marshal(tdeJob) + assert.Assert(t, strings.Contains(string(b), + `/usr/pgsql-"${new_version}"/bin/initdb -k -D /pgdata/pg"${new_version}" --encryption-key-command "echo testKey"`)) } func TestGenerateRemoveDataJob(t *testing.T) { @@ -203,7 +189,7 @@ func TestGenerateRemoveDataJob(t *testing.T) { } job := reconciler.generateRemoveDataJob(ctx, upgrade, sts) - assert.Assert(t, marshalMatches(job, ` + assert.Assert(t, cmp.MarshalMatches(job, ` apiVersion: batch/v1 kind: Job metadata: @@ -266,42 +252,17 @@ status: {} `)) } -// saveEnv preserves environment variables so that any modifications needed for -// the tests can be undone once completed. -func saveEnv(t testing.TB, key string) { - t.Helper() - previous, ok := os.LookupEnv(key) - t.Cleanup(func() { - if ok { - os.Setenv(key, previous) - } else { - os.Unsetenv(key) - } - }) -} - -func setEnv(t testing.TB, key, value string) { - t.Helper() - saveEnv(t, key) - assert.NilError(t, os.Setenv(key, value)) -} - -func unsetEnv(t testing.TB, key string) { - t.Helper() - saveEnv(t, key) - assert.NilError(t, os.Unsetenv(key)) -} - func TestPGUpgradeContainerImage(t *testing.T) { upgrade := &v1beta1.PGUpgrade{} - unsetEnv(t, "RELATED_IMAGE_PGUPGRADE") + t.Setenv("RELATED_IMAGE_PGUPGRADE", "") + os.Unsetenv("RELATED_IMAGE_PGUPGRADE") assert.Equal(t, pgUpgradeContainerImage(upgrade), "") - setEnv(t, "RELATED_IMAGE_PGUPGRADE", "") + t.Setenv("RELATED_IMAGE_PGUPGRADE", "") assert.Equal(t, pgUpgradeContainerImage(upgrade), "") - setEnv(t, "RELATED_IMAGE_PGUPGRADE", "env-var-pgbackrest") + t.Setenv("RELATED_IMAGE_PGUPGRADE", "env-var-pgbackrest") assert.Equal(t, pgUpgradeContainerImage(upgrade), "env-var-pgbackrest") assert.NilError(t, yaml.Unmarshal( @@ -313,7 +274,8 @@ func TestVerifyUpgradeImageValue(t *testing.T) { upgrade := &v1beta1.PGUpgrade{} t.Run("crunchy-postgres", func(t *testing.T) { - unsetEnv(t, "RELATED_IMAGE_PGUPGRADE") + t.Setenv("RELATED_IMAGE_PGUPGRADE", "") + os.Unsetenv("RELATED_IMAGE_PGUPGRADE") err := verifyUpgradeImageValue(upgrade) assert.ErrorContains(t, err, "crunchy-upgrade") }) diff --git a/internal/controller/pgupgrade/labels.go b/internal/controller/pgupgrade/labels.go index f9982307a1..187fe6bf6f 100644 --- a/internal/controller/pgupgrade/labels.go +++ b/internal/controller/pgupgrade/labels.go @@ -1,16 +1,6 @@ -// Copyright 2021 - 2023 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package pgupgrade diff --git a/internal/controller/pgupgrade/pgupgrade_controller.go b/internal/controller/pgupgrade/pgupgrade_controller.go index b4fb506645..d6d145b793 100644 --- a/internal/controller/pgupgrade/pgupgrade_controller.go +++ b/internal/controller/pgupgrade/pgupgrade_controller.go @@ -1,16 +1,6 @@ -// Copyright 2021 - 2023 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package pgupgrade @@ -23,14 +13,16 @@ import ( "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/record" "k8s.io/client-go/util/workqueue" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/source" + "github.com/crunchydata/postgres-operator/internal/config" + "github.com/crunchydata/postgres-operator/internal/controller/runtime" + "github.com/crunchydata/postgres-operator/internal/registration" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -40,14 +32,11 @@ const ( // PGUpgradeReconciler reconciles a PGUpgrade object type PGUpgradeReconciler struct { - client.Client + Client client.Client Owner client.FieldOwner - Scheme *runtime.Scheme - // For this iteration, we will only be setting conditions rather than - // setting conditions and emitting events. That may change in the future, - // so we're leaving this EventRecorder here for now. - // record.EventRecorder + Recorder record.EventRecorder + Registration registration.Registration } //+kubebuilder:rbac:groups="batch",resources="jobs",verbs={list,watch} @@ -60,7 +49,7 @@ func (r *PGUpgradeReconciler) SetupWithManager(mgr ctrl.Manager) error { For(&v1beta1.PGUpgrade{}). Owns(&batchv1.Job{}). Watches( - &source.Kind{Type: v1beta1.NewPostgresCluster()}, + v1beta1.NewPostgresCluster(), r.watchPostgresClusters(), ). Complete(r) @@ -79,7 +68,7 @@ func (r *PGUpgradeReconciler) findUpgradesForPostgresCluster( // namespace, we can configure the [ctrl.Manager] field indexer and pass a // [fields.Selector] here. // - https://book.kubebuilder.io/reference/watching-resources/externally-managed.html - if r.List(ctx, &upgrades, &client.ListOptions{ + if r.Client.List(ctx, &upgrades, &client.ListOptions{ Namespace: cluster.Namespace, }) == nil { for i := range upgrades.Items { @@ -93,8 +82,7 @@ func (r *PGUpgradeReconciler) findUpgradesForPostgresCluster( // watchPostgresClusters returns a [handler.EventHandler] for PostgresClusters. func (r *PGUpgradeReconciler) watchPostgresClusters() handler.Funcs { - handle := func(cluster client.Object, q workqueue.RateLimitingInterface) { - ctx := context.Background() + handle := func(ctx context.Context, cluster client.Object, q workqueue.RateLimitingInterface) { key := client.ObjectKeyFromObject(cluster) for _, upgrade := range r.findUpgradesForPostgresCluster(ctx, key) { @@ -105,14 +93,14 @@ func (r *PGUpgradeReconciler) watchPostgresClusters() handler.Funcs { } return handler.Funcs{ - CreateFunc: func(e event.CreateEvent, q workqueue.RateLimitingInterface) { - handle(e.Object, q) + CreateFunc: func(ctx context.Context, e event.CreateEvent, q workqueue.RateLimitingInterface) { + handle(ctx, e.Object, q) }, - UpdateFunc: func(e event.UpdateEvent, q workqueue.RateLimitingInterface) { - handle(e.ObjectNew, q) + UpdateFunc: func(ctx context.Context, e event.UpdateEvent, q workqueue.RateLimitingInterface) { + handle(ctx, e.ObjectNew, q) }, - DeleteFunc: func(e event.DeleteEvent, q workqueue.RateLimitingInterface) { - handle(e.Object, q) + DeleteFunc: func(ctx context.Context, e event.DeleteEvent, q workqueue.RateLimitingInterface) { + handle(ctx, e.Object, q) }, } } @@ -139,14 +127,14 @@ func (r *PGUpgradeReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( // copy before returning from its cache. // - https://github.com/kubernetes-sigs/controller-runtime/issues/1235 upgrade := &v1beta1.PGUpgrade{} - err = r.Get(ctx, req.NamespacedName, upgrade) + err = r.Client.Get(ctx, req.NamespacedName, upgrade) if err == nil { // Write any changes to the upgrade status on the way out. before := upgrade.DeepCopy() defer func() { if !equality.Semantic.DeepEqual(before.Status, upgrade.Status) { - status := r.Status().Patch(ctx, upgrade, client.MergeFrom(before), r.Owner) + status := r.Client.Status().Patch(ctx, upgrade, client.MergeFrom(before), r.Owner) if err == nil && status != nil { err = status @@ -177,6 +165,10 @@ func (r *PGUpgradeReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( return } + if !r.UpgradeAuthorized(upgrade) { + return ctrl.Result{}, nil + } + // Set progressing condition to true if it doesn't exist already setStatusToProgressingIfReasonWas("", upgrade) @@ -451,7 +443,7 @@ func (r *PGUpgradeReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( // Set the pgBackRest status for bootstrapping patch.Status.PGBackRest.Repos = []v1beta1.RepoStatus{} - err = r.Status().Patch(ctx, patch, client.MergeFrom(world.Cluster), r.Owner) + err = r.Client.Status().Patch(ctx, patch, client.MergeFrom(world.Cluster), r.Owner) } return ctrl.Result{}, err @@ -460,7 +452,7 @@ func (r *PGUpgradeReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( // TODO: error from apply could mean that the job exists with a different spec. if err == nil && !upgradeJobComplete { err = errors.WithStack(r.apply(ctx, - r.generateUpgradeJob(ctx, upgrade, world.ClusterPrimary))) + r.generateUpgradeJob(ctx, upgrade, world.ClusterPrimary, config.FetchKeyCommand(&world.Cluster.Spec)))) } // Create the jobs to remove the data from the replicas, as long as @@ -492,7 +484,7 @@ func (r *PGUpgradeReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( } // Requeue to verify that Patroni endpoints are deleted - return ctrl.Result{Requeue: true}, err // FIXME + return runtime.RequeueWithBackoff(), err // FIXME } // TODO: write upgradeJob back to world? No, we will wake and see it when it @@ -500,9 +492,7 @@ func (r *PGUpgradeReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( // TODO: consider what it means to "re-use" the same PGUpgrade for more than // one postgres version. Should the job name include the version number? - log.Info("Reconciled", "requeue", err != nil || - result.Requeue || - result.RequeueAfter > 0) + log.Info("Reconciled", "requeue", !result.IsZero() || err != nil) return } diff --git a/internal/controller/pgupgrade/registration.go b/internal/controller/pgupgrade/registration.go new file mode 100644 index 0000000000..05d0d80cbd --- /dev/null +++ b/internal/controller/pgupgrade/registration.go @@ -0,0 +1,27 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package pgupgrade + +import ( + "k8s.io/apimachinery/pkg/api/meta" + + "github.com/crunchydata/postgres-operator/internal/registration" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +func (r *PGUpgradeReconciler) UpgradeAuthorized(upgrade *v1beta1.PGUpgrade) bool { + // Allow an upgrade in progress to complete, when the registration requirement is introduced. + // But don't allow new upgrades to be started until a valid token is applied. + progressing := meta.FindStatusCondition(upgrade.Status.Conditions, ConditionPGUpgradeProgressing) != nil + required := r.Registration.Required(r.Recorder, upgrade, &upgrade.Status.Conditions) + + // If a valid token has not been applied, warn the user. + if required && !progressing { + registration.SetRequiredWarning(r.Recorder, upgrade, &upgrade.Status.Conditions) + return false + } + + return true +} diff --git a/internal/controller/pgupgrade/registration_test.go b/internal/controller/pgupgrade/registration_test.go new file mode 100644 index 0000000000..dc3a4144bc --- /dev/null +++ b/internal/controller/pgupgrade/registration_test.go @@ -0,0 +1,95 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package pgupgrade + +import ( + "testing" + + "gotest.tools/v3/assert" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/crunchydata/postgres-operator/internal/controller/runtime" + "github.com/crunchydata/postgres-operator/internal/registration" + "github.com/crunchydata/postgres-operator/internal/testing/cmp" + "github.com/crunchydata/postgres-operator/internal/testing/events" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +func TestUpgradeAuthorized(t *testing.T) { + t.Run("UpgradeAlreadyInProgress", func(t *testing.T) { + reconciler := new(PGUpgradeReconciler) + upgrade := new(v1beta1.PGUpgrade) + + for _, required := range []bool{false, true} { + reconciler.Registration = registration.RegistrationFunc( + func(record.EventRecorder, client.Object, *[]metav1.Condition) bool { + return required + }) + + meta.SetStatusCondition(&upgrade.Status.Conditions, metav1.Condition{ + Type: ConditionPGUpgradeProgressing, + Status: metav1.ConditionTrue, + }) + + result := reconciler.UpgradeAuthorized(upgrade) + assert.Assert(t, result, "expected signal to proceed") + + progressing := meta.FindStatusCondition(upgrade.Status.Conditions, ConditionPGUpgradeProgressing) + assert.Equal(t, progressing.Status, metav1.ConditionTrue) + } + }) + + t.Run("RegistrationRequired", func(t *testing.T) { + recorder := events.NewRecorder(t, runtime.Scheme) + upgrade := new(v1beta1.PGUpgrade) + upgrade.Name = "some-upgrade" + + reconciler := PGUpgradeReconciler{ + Recorder: recorder, + Registration: registration.RegistrationFunc( + func(record.EventRecorder, client.Object, *[]metav1.Condition) bool { + return true + }), + } + + meta.RemoveStatusCondition(&upgrade.Status.Conditions, ConditionPGUpgradeProgressing) + + result := reconciler.UpgradeAuthorized(upgrade) + assert.Assert(t, !result, "expected signal to not proceed") + + condition := meta.FindStatusCondition(upgrade.Status.Conditions, v1beta1.Registered) + if assert.Check(t, condition != nil) { + assert.Equal(t, condition.Status, metav1.ConditionFalse) + } + + if assert.Check(t, len(recorder.Events) > 0) { + assert.Equal(t, recorder.Events[0].Type, "Warning") + assert.Equal(t, recorder.Events[0].Regarding.Kind, "PGUpgrade") + assert.Equal(t, recorder.Events[0].Regarding.Name, "some-upgrade") + assert.Assert(t, cmp.Contains(recorder.Events[0].Note, "requires")) + } + }) + + t.Run("RegistrationCompleted", func(t *testing.T) { + reconciler := new(PGUpgradeReconciler) + upgrade := new(v1beta1.PGUpgrade) + + called := false + reconciler.Registration = registration.RegistrationFunc( + func(record.EventRecorder, client.Object, *[]metav1.Condition) bool { + called = true + return false + }) + + meta.RemoveStatusCondition(&upgrade.Status.Conditions, ConditionPGUpgradeProgressing) + + result := reconciler.UpgradeAuthorized(upgrade) + assert.Assert(t, result, "expected signal to proceed") + assert.Assert(t, called, "expected registration package to clear conditions") + }) +} diff --git a/internal/controller/pgupgrade/utils.go b/internal/controller/pgupgrade/utils.go index 794563192b..292107e440 100644 --- a/internal/controller/pgupgrade/utils.go +++ b/internal/controller/pgupgrade/utils.go @@ -1,16 +1,6 @@ -// Copyright 2021 - 2023 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package pgupgrade diff --git a/internal/controller/pgupgrade/world.go b/internal/controller/pgupgrade/world.go index f25bedc7cc..18d056fe25 100644 --- a/internal/controller/pgupgrade/world.go +++ b/internal/controller/pgupgrade/world.go @@ -1,16 +1,6 @@ -// Copyright 2021 - 2023 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package pgupgrade @@ -49,7 +39,7 @@ func (r *PGUpgradeReconciler) observeWorld( cluster := v1beta1.NewPostgresCluster() err := errors.WithStack( - r.Get(ctx, client.ObjectKey{ + r.Client.Get(ctx, client.ObjectKey{ Namespace: upgrade.Namespace, Name: upgrade.Spec.PostgresClusterName, }, cluster)) @@ -58,7 +48,7 @@ func (r *PGUpgradeReconciler) observeWorld( if err == nil { var endpoints corev1.EndpointsList err = errors.WithStack( - r.List(ctx, &endpoints, + r.Client.List(ctx, &endpoints, client.InNamespace(upgrade.Namespace), client.MatchingLabelsSelector{Selector: selectCluster}, )) @@ -68,7 +58,7 @@ func (r *PGUpgradeReconciler) observeWorld( if err == nil { var jobs batchv1.JobList err = errors.WithStack( - r.List(ctx, &jobs, + r.Client.List(ctx, &jobs, client.InNamespace(upgrade.Namespace), client.MatchingLabelsSelector{Selector: selectCluster}, )) @@ -80,7 +70,7 @@ func (r *PGUpgradeReconciler) observeWorld( if err == nil { var statefulsets appsv1.StatefulSetList err = errors.WithStack( - r.List(ctx, &statefulsets, + r.Client.List(ctx, &statefulsets, client.InNamespace(upgrade.Namespace), client.MatchingLabelsSelector{Selector: selectCluster}, )) diff --git a/internal/controller/pgupgrade/world_test.go b/internal/controller/pgupgrade/world_test.go index 1adb0e53a3..4aa24f714d 100644 --- a/internal/controller/pgupgrade/world_test.go +++ b/internal/controller/pgupgrade/world_test.go @@ -1,16 +1,6 @@ -// Copyright 2021 - 2023 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package pgupgrade diff --git a/internal/controller/postgrescluster/apply.go b/internal/controller/postgrescluster/apply.go index a619ce7e42..2dae1f7d80 100644 --- a/internal/controller/postgrescluster/apply.go +++ b/internal/controller/postgrescluster/apply.go @@ -1,33 +1,15 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster import ( "context" - "encoding/json" - "fmt" "reflect" - jsonpatch "github.com/evanphx/json-patch/v5" - "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/validation/field" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/crunchydata/postgres-operator/internal/kubeapi" @@ -57,11 +39,6 @@ func (r *Reconciler) apply(ctx context.Context, object client.Object) error { // does not match the intent, send a json-patch to get really specific. switch actual := object.(type) { case *corev1.Service: - // Changing Service.Spec.Type requires a special apply-patch sometimes. - if err != nil { - err = r.handleServiceError(ctx, object.(*corev1.Service), data, err) - } - applyServiceSpec(patch, actual.Spec, intent.(*corev1.Service).Spec, "spec") } @@ -72,53 +49,6 @@ func (r *Reconciler) apply(ctx context.Context, object client.Object) error { return err } -// handleServiceError inspects err for expected Kubernetes API responses to -// writing a Service. It returns err when it cannot resolve the issue, otherwise -// it returns nil. -func (r *Reconciler) handleServiceError( - ctx context.Context, service *corev1.Service, apply []byte, err error, -) error { - var status metav1.Status - if api := apierrors.APIStatus(nil); errors.As(err, &api) { - status = api.Status() - } - - // Service.Spec.Ports.NodePort must be cleared for ClusterIP prior to - // Kubernetes 1.20. When all the errors are about disallowed "nodePort", - // run a json-patch on the apply-patch to set them all to null. - // - https://issue.k8s.io/33766 - if service.Spec.Type == corev1.ServiceTypeClusterIP { - add := json.RawMessage(`"add"`) - null := json.RawMessage(`null`) - patch := make(jsonpatch.Patch, 0, len(service.Spec.Ports)) - - if apierrors.IsInvalid(err) && status.Details != nil { - for i, cause := range status.Details.Causes { - path := json.RawMessage(fmt.Sprintf(`"/spec/ports/%d/nodePort"`, i)) - - if cause.Type == metav1.CauseType(field.ErrorTypeForbidden) && - cause.Field == fmt.Sprintf("spec.ports[%d].nodePort", i) { - patch = append(patch, - jsonpatch.Operation{"op": &add, "value": &null, "path": &path}) - } - } - } - - // Amend the apply-patch when all the errors can be fixed. - if len(patch) == len(service.Spec.Ports) { - apply, err = patch.Apply(apply) - } - - // Send the apply-patch with force=true. - if err == nil { - patch := client.RawPatch(client.Apply.Type(), apply) - err = r.patch(ctx, service, patch, client.ForceOwnership) - } - } - - return err -} - // applyServiceSpec is called by Reconciler.apply to work around issues // with server-side apply. func applyServiceSpec( diff --git a/internal/controller/postgrescluster/apply_test.go b/internal/controller/postgrescluster/apply_test.go index dda7e22fa1..c163e8a5ab 100644 --- a/internal/controller/postgrescluster/apply_test.go +++ b/internal/controller/postgrescluster/apply_test.go @@ -1,20 +1,6 @@ -//go:build envtest -// +build envtest - -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster @@ -43,12 +29,12 @@ import ( func TestServerSideApply(t *testing.T) { ctx := context.Background() - env, cc := setupKubernetes(t) + cfg, cc := setupKubernetes(t) require.ParallelCapacity(t, 0) ns := setupNamespace(t, cc) - dc, err := discovery.NewDiscoveryClientForConfig(env.Config) + dc, err := discovery.NewDiscoveryClientForConfig(cfg) assert.NilError(t, err) server, err := dc.ServerVersion() @@ -81,10 +67,9 @@ func TestServerSideApply(t *testing.T) { assert.Assert(t, after.GetResourceVersion() != "") switch { - // TODO(tjmoore4): The update currently impacts 1.28+ only, but may be - // backpatched in the future. - // - https://github.com/kubernetes/kubernetes/pull/116865 - case serverVersion.LessThan(version.MustParseGeneric("1.28")): + case serverVersion.LessThan(version.MustParseGeneric("1.25.15")): + case serverVersion.AtLeast(version.MustParseGeneric("1.26")) && serverVersion.LessThan(version.MustParseGeneric("1.26.10")): + case serverVersion.AtLeast(version.MustParseGeneric("1.27")) && serverVersion.LessThan(version.MustParseGeneric("1.27.7")): assert.Assert(t, after.GetResourceVersion() != before.GetResourceVersion(), "expected https://issue.k8s.io/116861") @@ -314,55 +299,4 @@ func TestServerSideApply(t *testing.T) { }) } }) - - t.Run("ServiceType", func(t *testing.T) { - constructor := func(name string) *corev1.Service { - var service corev1.Service - service.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("Service")) - service.Namespace, service.Name = ns.Name, name - service.Spec.Ports = []corev1.ServicePort{ - {Name: "one", Port: 9999, Protocol: corev1.ProtocolTCP}, - {Name: "two", Port: 1234, Protocol: corev1.ProtocolTCP}, - } - return &service - } - - reconciler := Reconciler{Client: cc, Owner: client.FieldOwner(t.Name())} - - // Start as NodePort. - intent := constructor("node-port") - intent.Spec.Type = corev1.ServiceTypeNodePort - - // Create the Service. - before := intent.DeepCopy() - assert.NilError(t, - cc.Patch(ctx, before, client.Apply, client.ForceOwnership, reconciler.Owner)) - - // Change to ClusterIP. - intent.Spec.Type = corev1.ServiceTypeClusterIP - - // client.Apply cannot change it in old versions of Kubernetes. - after := intent.DeepCopy() - err := cc.Patch(ctx, after, client.Apply, client.ForceOwnership, reconciler.Owner) - - switch { - case serverVersion.LessThan(version.MustParseGeneric("1.20")): - - assert.ErrorContains(t, err, "nodePort: Forbidden", - "expected https://issue.k8s.io/33766") - - default: - assert.NilError(t, err) - assert.Equal(t, after.Spec.Type, intent.Spec.Type) - assert.Equal(t, after.Spec.ClusterIP, before.Spec.ClusterIP, - "expected to keep the same ClusterIP") - } - - // Our apply method changes it. - again := intent.DeepCopy() - assert.NilError(t, reconciler.apply(ctx, again)) - assert.Equal(t, again.Spec.Type, intent.Spec.Type) - assert.Equal(t, again.Spec.ClusterIP, before.Spec.ClusterIP, - "expected to keep the same ClusterIP") - }) } diff --git a/internal/controller/postgrescluster/cluster.go b/internal/controller/postgrescluster/cluster.go index aa935a42f4..3ba6eab0e8 100644 --- a/internal/controller/postgrescluster/cluster.go +++ b/internal/controller/postgrescluster/cluster.go @@ -1,22 +1,12 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster import ( "context" + "fmt" "io" "github.com/pkg/errors" @@ -25,6 +15,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" + "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/internal/patroni" "github.com/crunchydata/postgres-operator/internal/pki" @@ -199,33 +190,66 @@ func (r *Reconciler) generateClusterReplicaService( service := &corev1.Service{ObjectMeta: naming.ClusterReplicaService(cluster)} service.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("Service")) - service.Annotations = naming.Merge( - cluster.Spec.Metadata.GetAnnotationsOrNil()) + service.Annotations = cluster.Spec.Metadata.GetAnnotationsOrNil() + service.Labels = cluster.Spec.Metadata.GetLabelsOrNil() + + if spec := cluster.Spec.ReplicaService; spec != nil { + service.Annotations = naming.Merge(service.Annotations, + spec.Metadata.GetAnnotationsOrNil()) + service.Labels = naming.Merge(service.Labels, + spec.Metadata.GetLabelsOrNil()) + } + + // add our labels last so they aren't overwritten service.Labels = naming.Merge( - cluster.Spec.Metadata.GetLabelsOrNil(), + service.Labels, map[string]string{ naming.LabelCluster: cluster.Name, naming.LabelRole: naming.RoleReplica, }) - // Allocate an IP address and let Kubernetes manage the Endpoints by - // selecting Pods with the Patroni replica role. - // - https://docs.k8s.io/concepts/services-networking/service/#defining-a-service - service.Spec.Type = corev1.ServiceTypeClusterIP - service.Spec.Selector = map[string]string{ - naming.LabelCluster: cluster.Name, - naming.LabelRole: naming.RolePatroniReplica, - } - // The TargetPort must be the name (not the number) of the PostgreSQL // ContainerPort. This name allows the port number to differ between Pods, // which can happen during a rolling update. - service.Spec.Ports = []corev1.ServicePort{{ + servicePort := corev1.ServicePort{ Name: naming.PortPostgreSQL, Port: *cluster.Spec.Port, Protocol: corev1.ProtocolTCP, TargetPort: intstr.FromString(naming.PortPostgreSQL), - }} + } + + // Default to a service type of ClusterIP + service.Spec.Type = corev1.ServiceTypeClusterIP + + // Check user provided spec for a specified type + if spec := cluster.Spec.ReplicaService; spec != nil { + service.Spec.Type = corev1.ServiceType(spec.Type) + if spec.NodePort != nil { + if service.Spec.Type == corev1.ServiceTypeClusterIP { + // The NodePort can only be set when the Service type is NodePort or + // LoadBalancer. However, due to a known issue prior to Kubernetes + // 1.20, we clear these errors during our apply. To preserve the + // appropriate behavior, we log an Event and return an error. + // TODO(tjmoore4): Once Validation Rules are available, this check + // and event could potentially be removed in favor of that validation + r.Recorder.Eventf(cluster, corev1.EventTypeWarning, "MisconfiguredClusterIP", + "NodePort cannot be set with type ClusterIP on Service %q", service.Name) + return nil, fmt.Errorf("NodePort cannot be set with type ClusterIP on Service %q", service.Name) + } + servicePort.NodePort = *spec.NodePort + } + service.Spec.ExternalTrafficPolicy = initialize.FromPointer(spec.ExternalTrafficPolicy) + service.Spec.InternalTrafficPolicy = spec.InternalTrafficPolicy + } + service.Spec.Ports = []corev1.ServicePort{servicePort} + + // Allocate an IP address and let Kubernetes manage the Endpoints by + // selecting Pods with the Patroni replica role. + // - https://docs.k8s.io/concepts/services-networking/service/#defining-a-service + service.Spec.Selector = map[string]string{ + naming.LabelCluster: cluster.Name, + naming.LabelRole: naming.RolePatroniReplica, + } err := errors.WithStack(r.setControllerReference(cluster, service)) @@ -258,7 +282,9 @@ func (r *Reconciler) reconcileClusterReplicaService( func (r *Reconciler) reconcileDataSource(ctx context.Context, cluster *v1beta1.PostgresCluster, observed *observedInstances, clusterVolumes []corev1.PersistentVolumeClaim, - rootCA *pki.RootCertificateAuthority) (bool, error) { + rootCA *pki.RootCertificateAuthority, + backupsSpecFound bool, +) (bool, error) { // a hash func to hash the pgBackRest restore options hashFunc := func(jobConfigs []string) (string, error) { @@ -381,7 +407,8 @@ func (r *Reconciler) reconcileDataSource(ctx context.Context, switch { case dataSource != nil: if err := r.reconcilePostgresClusterDataSource(ctx, cluster, dataSource, - configHash, clusterVolumes, rootCA); err != nil { + configHash, clusterVolumes, rootCA, + backupsSpecFound); err != nil { return true, err } case cloudDataSource != nil: diff --git a/internal/controller/postgrescluster/cluster_test.go b/internal/controller/postgrescluster/cluster_test.go index ff224977af..be9e371a56 100644 --- a/internal/controller/postgrescluster/cluster_test.go +++ b/internal/controller/postgrescluster/cluster_test.go @@ -1,23 +1,9 @@ -//go:build envtest -// +build envtest +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - import ( "context" "testing" @@ -39,6 +25,7 @@ import ( "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/internal/testing/cmp" "github.com/crunchydata/postgres-operator/internal/testing/require" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -614,11 +601,11 @@ func TestGenerateClusterPrimaryService(t *testing.T) { assert.ErrorContains(t, err, "not implemented") alwaysExpect := func(t testing.TB, service *corev1.Service, endpoints *corev1.Endpoints) { - assert.Assert(t, marshalMatches(service.TypeMeta, ` + assert.Assert(t, cmp.MarshalMatches(service.TypeMeta, ` apiVersion: v1 kind: Service `)) - assert.Assert(t, marshalMatches(service.ObjectMeta, ` + assert.Assert(t, cmp.MarshalMatches(service.ObjectMeta, ` creationTimestamp: null labels: postgres-operator.crunchydata.com/cluster: pg5 @@ -633,7 +620,7 @@ ownerReferences: name: pg5 uid: "" `)) - assert.Assert(t, marshalMatches(service.Spec.Ports, ` + assert.Assert(t, cmp.MarshalMatches(service.Spec.Ports, ` - name: postgres port: 2600 protocol: TCP @@ -644,7 +631,7 @@ ownerReferences: assert.Assert(t, service.Spec.Selector == nil, "got %v", service.Spec.Selector) - assert.Assert(t, marshalMatches(endpoints, ` + assert.Assert(t, cmp.MarshalMatches(endpoints, ` apiVersion: v1 kind: Endpoints metadata: @@ -732,11 +719,12 @@ func TestGenerateClusterReplicaServiceIntent(t *testing.T) { service, err := reconciler.generateClusterReplicaService(cluster) assert.NilError(t, err) - assert.Assert(t, marshalMatches(service.TypeMeta, ` + alwaysExpect := func(t testing.TB, service *corev1.Service) { + assert.Assert(t, cmp.MarshalMatches(service.TypeMeta, ` apiVersion: v1 kind: Service - `)) - assert.Assert(t, marshalMatches(service.ObjectMeta, ` + `)) + assert.Assert(t, cmp.MarshalMatches(service.ObjectMeta, ` creationTimestamp: null labels: postgres-operator.crunchydata.com/cluster: pg2 @@ -750,8 +738,11 @@ ownerReferences: kind: PostgresCluster name: pg2 uid: "" - `)) - assert.Assert(t, marshalMatches(service.Spec, ` + `)) + } + + alwaysExpect(t, service) + assert.Assert(t, cmp.MarshalMatches(service.Spec, ` ports: - name: postgres port: 9876 @@ -763,6 +754,39 @@ selector: type: ClusterIP `)) + types := []struct { + Type string + Expect func(testing.TB, *corev1.Service) + }{ + {Type: "ClusterIP", Expect: func(t testing.TB, service *corev1.Service) { + assert.Equal(t, service.Spec.Type, corev1.ServiceTypeClusterIP) + }}, + {Type: "NodePort", Expect: func(t testing.TB, service *corev1.Service) { + assert.Equal(t, service.Spec.Type, corev1.ServiceTypeNodePort) + }}, + {Type: "LoadBalancer", Expect: func(t testing.TB, service *corev1.Service) { + assert.Equal(t, service.Spec.Type, corev1.ServiceTypeLoadBalancer) + }}, + } + + for _, test := range types { + t.Run(test.Type, func(t *testing.T) { + cluster := cluster.DeepCopy() + cluster.Spec.ReplicaService = &v1beta1.ServiceSpec{Type: test.Type} + + service, err := reconciler.generateClusterReplicaService(cluster) + assert.NilError(t, err) + alwaysExpect(t, service) + test.Expect(t, service) + assert.Assert(t, cmp.MarshalMatches(service.Spec.Ports, ` +- name: postgres + port: 9876 + protocol: TCP + targetPort: postgres + `)) + }) + } + t.Run("AnnotationsLabels", func(t *testing.T) { cluster := cluster.DeepCopy() cluster.Spec.Metadata = &v1beta1.Metadata{ @@ -774,19 +798,19 @@ type: ClusterIP assert.NilError(t, err) // Annotations present in the metadata. - assert.Assert(t, marshalMatches(service.ObjectMeta.Annotations, ` + assert.Assert(t, cmp.MarshalMatches(service.ObjectMeta.Annotations, ` some: note `)) // Labels present in the metadata. - assert.Assert(t, marshalMatches(service.ObjectMeta.Labels, ` + assert.Assert(t, cmp.MarshalMatches(service.ObjectMeta.Labels, ` happy: label postgres-operator.crunchydata.com/cluster: pg2 postgres-operator.crunchydata.com/role: replica `)) // Labels not in the selector. - assert.Assert(t, marshalMatches(service.Spec.Selector, ` + assert.Assert(t, cmp.MarshalMatches(service.Spec.Selector, ` postgres-operator.crunchydata.com/cluster: pg2 postgres-operator.crunchydata.com/role: replica `)) diff --git a/internal/controller/postgrescluster/controller.go b/internal/controller/postgrescluster/controller.go index f7a9263481..d459d30a10 100644 --- a/internal/controller/postgrescluster/controller.go +++ b/internal/controller/postgrescluster/controller.go @@ -1,29 +1,16 @@ -package postgrescluster - -/* -Copyright 2021 - 2023 Crunchy Data Solutions, Inc. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +package postgrescluster import ( "context" + "errors" "fmt" "io" - "os" - "strconv" "time" - "github.com/pkg/errors" "go.opentelemetry.io/otel/trace" appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" @@ -31,19 +18,21 @@ import ( policyv1 "k8s.io/api/policy/v1" rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/client-go/discovery" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" "github.com/crunchydata/postgres-operator/internal/config" + "github.com/crunchydata/postgres-operator/internal/controller/runtime" + "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/logging" "github.com/crunchydata/postgres-operator/internal/pgaudit" "github.com/crunchydata/postgres-operator/internal/pgbackrest" @@ -51,7 +40,7 @@ import ( "github.com/crunchydata/postgres-operator/internal/pgmonitor" "github.com/crunchydata/postgres-operator/internal/pki" "github.com/crunchydata/postgres-operator/internal/postgres" - "github.com/crunchydata/postgres-operator/internal/util" + "github.com/crunchydata/postgres-operator/internal/registration" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -62,18 +51,17 @@ const ( // Reconciler holds resources for the PostgresCluster reconciler type Reconciler struct { - Client client.Client - IsOpenShift bool - Owner client.FieldOwner - PGOVersion string - PodExec func( - namespace, pod, container string, + Client client.Client + DiscoveryClient *discovery.DiscoveryClient + IsOpenShift bool + Owner client.FieldOwner + PodExec func( + ctx context.Context, namespace, pod, container string, stdin io.Reader, stdout, stderr io.Writer, command ...string, ) error - Recorder record.EventRecorder - Registration util.Registration - RegistrationURL string - Tracer trace.Tracer + Recorder record.EventRecorder + Registration registration.Registration + Tracer trace.Tracer } // +kubebuilder:rbac:groups="",resources="events",verbs={create,patch} @@ -88,15 +76,6 @@ func (r *Reconciler) Reconcile( log := logging.FromContext(ctx) defer span.End() - // create the result that will be updated following a call to each reconciler - result := reconcile.Result{} - updateResult := func(next reconcile.Result, err error) error { - if err == nil { - result = updateReconcileResult(result, next) - } - return err - } - // get the postgrescluster from the cache cluster := &v1beta1.PostgresCluster{} if err := r.Client.Get(ctx, request.NamespacedName, cluster); err != nil { @@ -107,7 +86,7 @@ func (r *Reconciler) Reconcile( log.Error(err, "unable to fetch PostgresCluster") span.RecordError(err) } - return result, err + return runtime.ErrorWithBackoff(err) } // Set any defaults that may not have been stored in the API. No DeepCopy @@ -133,15 +112,10 @@ func (r *Reconciler) Reconcile( if result, err := r.handleDelete(ctx, cluster); err != nil { span.RecordError(err) log.Error(err, "deleting") - return reconcile.Result{}, err + return runtime.ErrorWithBackoff(err) } else if result != nil { if log := log.V(1); log.Enabled() { - if result.RequeueAfter > 0 { - // RequeueAfter implies Requeue, but set both to make the next - // log message more clear. - result.Requeue = true - } log.Info("deleting", "result", fmt.Sprintf("%+v", *result)) } return *result, nil @@ -158,9 +132,8 @@ func (r *Reconciler) Reconcile( err.Error()) // specifically allow reconciliation if the cluster is shutdown to // facilitate upgrades, otherwise return - if cluster.Spec.Shutdown == nil || - (cluster.Spec.Shutdown != nil && !*cluster.Spec.Shutdown) { - return result, err + if !initialize.FromPointer(cluster.Spec.Shutdown) { + return runtime.ErrorWithBackoff(err) } } @@ -173,75 +146,50 @@ func (r *Reconciler) Reconcile( // this configuration and provide an event path := field.NewPath("spec", "standby") err := field.Invalid(path, cluster.Name, "Standby requires a host or repoName to be enabled") - r.Recorder.Event(cluster, corev1.EventTypeWarning, "InvalidStandbyConfiguration", - err.Error()) - return result, err + r.Recorder.Event(cluster, corev1.EventTypeWarning, "InvalidStandbyConfiguration", err.Error()) + return runtime.ErrorWithBackoff(err) } var ( - clusterConfigMap *corev1.ConfigMap - clusterReplicationSecret *corev1.Secret - clusterPodService *corev1.Service - clusterVolumes []corev1.PersistentVolumeClaim - instanceServiceAccount *corev1.ServiceAccount - instances *observedInstances - patroniLeaderService *corev1.Service - primaryCertificate *corev1.SecretProjection - primaryService *corev1.Service - replicaService *corev1.Service - rootCA *pki.RootCertificateAuthority - monitoringSecret *corev1.Secret - exporterQueriesConfig *corev1.ConfigMap - exporterWebConfig *corev1.ConfigMap - err error + clusterConfigMap *corev1.ConfigMap + clusterReplicationSecret *corev1.Secret + clusterPodService *corev1.Service + clusterVolumes []corev1.PersistentVolumeClaim + instanceServiceAccount *corev1.ServiceAccount + instances *observedInstances + patroniLeaderService *corev1.Service + primaryCertificate *corev1.SecretProjection + primaryService *corev1.Service + replicaService *corev1.Service + rootCA *pki.RootCertificateAuthority + monitoringSecret *corev1.Secret + exporterQueriesConfig *corev1.ConfigMap + exporterWebConfig *corev1.ConfigMap + err error + backupsSpecFound bool + backupsReconciliationAllowed bool + dedicatedSnapshotPVC *corev1.PersistentVolumeClaim ) - // Define a function for updating PostgresCluster status. Returns any error that - // occurs while attempting to patch the status, while otherwise simply returning the - // Result and error variables that are populated while reconciling the PostgresCluster. - patchClusterStatus := func() (reconcile.Result, error) { + patchClusterStatus := func() error { if !equality.Semantic.DeepEqual(before.Status, cluster.Status) { // NOTE(cbandy): Kubernetes prior to v1.16.10 and v1.17.6 does not track // managed fields on the status subresource: https://issue.k8s.io/88901 - if err := errors.WithStack(r.Client.Status().Patch( - ctx, cluster, client.MergeFrom(before), r.Owner)); err != nil { + if err := r.Client.Status().Patch( + ctx, cluster, client.MergeFrom(before), r.Owner); err != nil { log.Error(err, "patching cluster status") - return result, err + return err } log.V(1).Info("patched cluster status") } - return result, err + return nil } - if config.RegistrationRequired() && !r.registrationValid() { - if !registrationRequiredStatusFound(cluster) { - addRegistrationRequiredStatus(cluster, r.PGOVersion) - return patchClusterStatus() - } - - if r.tokenAuthenticationFailed() { - r.Recorder.Event(cluster, corev1.EventTypeWarning, "Token Authentication Failed", "See "+r.RegistrationURL+" for details.") - } - - if shouldEncumberReconciliation(r.Registration.Authenticated, cluster, r.PGOVersion) { - emitEncumbranceWarning(cluster, r) - // Encumbrance is just an early return from the reconciliation loop. - return patchClusterStatus() - } else { - emitAdvanceWarning(cluster, r) - } - } - - if config.RegistrationRequired() && r.registrationValid() { - if tokenRequiredConditionFound(cluster) { - meta.RemoveStatusCondition(&cluster.Status.Conditions, v1beta1.TokenRequired) - } - - if registrationRequiredStatusFound(cluster) { - cluster.Status.RegistrationRequired = nil - r.Recorder.Event(cluster, corev1.EventTypeNormal, "Token Verified", "Thank you for registering your installation of Crunchy Postgres for Kubernetes.") - } + if r.Registration != nil && r.Registration.Required(r.Recorder, cluster, &cluster.Status.Conditions) { + registration.SetAdvanceWarning(r.Recorder, cluster, &cluster.Status.Conditions) } + cluster.Status.RegistrationRequired = nil + cluster.Status.TokenRequired = "" // if the cluster is paused, set a condition and return if cluster.Spec.Paused != nil && *cluster.Spec.Paused { @@ -253,18 +201,39 @@ func (r *Reconciler) Reconcile( ObservedGeneration: cluster.GetGeneration(), }) - return patchClusterStatus() + return runtime.ErrorWithBackoff(patchClusterStatus()) } else { meta.RemoveStatusCondition(&cluster.Status.Conditions, v1beta1.PostgresClusterProgressing) } + if err == nil { + backupsSpecFound, backupsReconciliationAllowed, err = r.BackupsEnabled(ctx, cluster) + + // If we cannot reconcile because the backup reconciliation is paused, set a condition and exit + if !backupsReconciliationAllowed { + meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{ + Type: v1beta1.PostgresClusterProgressing, + Status: metav1.ConditionFalse, + Reason: "Paused", + Message: "Reconciliation is paused: please fill in spec.backups " + + "or add the postgres-operator.crunchydata.com/authorizeBackupRemoval " + + "annotation to authorize backup removal.", + + ObservedGeneration: cluster.GetGeneration(), + }) + return runtime.ErrorWithBackoff(patchClusterStatus()) + } else { + meta.RemoveStatusCondition(&cluster.Status.Conditions, v1beta1.PostgresClusterProgressing) + } + } + pgHBAs := postgres.NewHBAs() pgmonitor.PostgreSQLHBAs(cluster, &pgHBAs) pgbouncer.PostgreSQL(cluster, &pgHBAs) pgParameters := postgres.NewParameters() pgaudit.PostgreSQLParameters(&pgParameters) - pgbackrest.PostgreSQL(cluster, &pgParameters) + pgbackrest.PostgreSQL(cluster, &pgParameters, backupsSpecFound) pgmonitor.PostgreSQLParameters(cluster, &pgParameters) // Set huge_pages = try if a hugepages resource limit > 0, otherwise set "off" @@ -281,10 +250,9 @@ func (r *Reconciler) Reconcile( // return a bool indicating that the controller should return early while any // required Jobs are running, after which it will indicate that an early // return is no longer needed, and reconciliation can proceed normally. - var returnEarly bool - returnEarly, err = r.reconcileDirMoveJobs(ctx, cluster) + returnEarly, err := r.reconcileDirMoveJobs(ctx, cluster) if err != nil || returnEarly { - return patchClusterStatus() + return runtime.ErrorWithBackoff(errors.Join(err, patchClusterStatus())) } } if err == nil { @@ -296,8 +264,14 @@ func (r *Reconciler) Reconcile( if err == nil { instances, err = r.observeInstances(ctx, cluster) } + + result := reconcile.Result{} + if err == nil { - err = updateResult(r.reconcilePatroniStatus(ctx, cluster, instances)) + var requeue time.Duration + if requeue, err = r.reconcilePatroniStatus(ctx, cluster, instances); err == nil && requeue > 0 { + result.RequeueAfter = requeue + } } if err == nil { err = r.reconcilePatroniSwitchover(ctx, cluster, instances) @@ -326,10 +300,9 @@ func (r *Reconciler) Reconcile( // the controller should return early while data initialization is in progress, after // which it will indicate that an early return is no longer needed, and reconciliation // can proceed normally. - var returnEarly bool - returnEarly, err = r.reconcileDataSource(ctx, cluster, instances, clusterVolumes, rootCA) + returnEarly, err := r.reconcileDataSource(ctx, cluster, instances, clusterVolumes, rootCA, backupsSpecFound) if err != nil || returnEarly { - return patchClusterStatus() + return runtime.ErrorWithBackoff(errors.Join(err, patchClusterStatus())) } } if err == nil { @@ -369,7 +342,9 @@ func (r *Reconciler) Reconcile( err = r.reconcileInstanceSets( ctx, cluster, clusterConfigMap, clusterReplicationSecret, rootCA, clusterPodService, instanceServiceAccount, instances, patroniLeaderService, - primaryCertificate, clusterVolumes, exporterQueriesConfig, exporterWebConfig) + primaryCertificate, clusterVolumes, exporterQueriesConfig, exporterWebConfig, + backupsSpecFound, + ) } if err == nil { @@ -380,7 +355,20 @@ func (r *Reconciler) Reconcile( } if err == nil { - err = updateResult(r.reconcilePGBackRest(ctx, cluster, instances, rootCA)) + var next reconcile.Result + if next, err = r.reconcilePGBackRest(ctx, cluster, + instances, rootCA, backupsSpecFound); err == nil && !next.IsZero() { + result.Requeue = result.Requeue || next.Requeue + if next.RequeueAfter > 0 { + result.RequeueAfter = next.RequeueAfter + } + } + } + if err == nil { + dedicatedSnapshotPVC, err = r.reconcileDedicatedSnapshotVolume(ctx, cluster, clusterVolumes) + } + if err == nil { + err = r.reconcileVolumeSnapshots(ctx, cluster, dedicatedSnapshotPVC) } if err == nil { err = r.reconcilePGBouncer(ctx, cluster, instances, primaryCertificate, rootCA) @@ -406,21 +394,7 @@ func (r *Reconciler) Reconcile( log.V(1).Info("reconciled cluster") - return patchClusterStatus() -} - -func (r *Reconciler) tokenAuthenticationFailed() bool { - return r.Registration.TokenFileFound && r.Registration.Authenticated -} - -func (r *Reconciler) registrationValid() bool { - expiry := r.Registration.Exp - authenticated := r.Registration.Authenticated - // Use epoch time in seconds, consistent with RFC 7519. - now := time.Now().Unix() - expired := expiry < now - - return authenticated && !expired + return result, errors.Join(err, patchClusterStatus()) } // deleteControlled safely deletes object when it is controlled by cluster. @@ -492,30 +466,22 @@ func (r *Reconciler) setOwnerReference( func (r *Reconciler) SetupWithManager(mgr manager.Manager) error { if r.PodExec == nil { var err error - r.PodExec, err = newPodExecutor(mgr.GetConfig()) + r.PodExec, err = runtime.NewPodExecutor(mgr.GetConfig()) if err != nil { return err } } - var opts controller.Options - - // TODO(cbandy): Move this to main with controller-runtime v0.9+ - // - https://github.com/kubernetes-sigs/controller-runtime/commit/82fc2564cf - if s := os.Getenv("PGO_WORKERS"); s != "" { - if i, err := strconv.Atoi(s); err == nil && i > 0 { - opts.MaxConcurrentReconciles = i - } else { - mgr.GetLogger().Error(err, "PGO_WORKERS must be a positive number") + if r.DiscoveryClient == nil { + var err error + r.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(mgr.GetConfig()) + if err != nil { + return err } } - if opts.MaxConcurrentReconciles == 0 { - opts.MaxConcurrentReconciles = 2 - } return builder.ControllerManagedBy(mgr). For(&v1beta1.PostgresCluster{}). - WithOptions(opts). Owns(&corev1.ConfigMap{}). Owns(&corev1.Endpoints{}). Owns(&corev1.PersistentVolumeClaim{}). @@ -529,8 +495,33 @@ func (r *Reconciler) SetupWithManager(mgr manager.Manager) error { Owns(&rbacv1.RoleBinding{}). Owns(&batchv1.CronJob{}). Owns(&policyv1.PodDisruptionBudget{}). - Watches(&source.Kind{Type: &corev1.Pod{}}, r.watchPods()). - Watches(&source.Kind{Type: &appsv1.StatefulSet{}}, + Watches(&corev1.Pod{}, r.watchPods()). + Watches(&appsv1.StatefulSet{}, r.controllerRefHandlerFuncs()). // watch all StatefulSets Complete(r) } + +// GroupVersionKindExists checks to see whether a given Kind for a given +// GroupVersion exists in the Kubernetes API Server. +func (r *Reconciler) GroupVersionKindExists(groupVersion, kind string) (*bool, error) { + if r.DiscoveryClient == nil { + return initialize.Bool(false), nil + } + + resourceList, err := r.DiscoveryClient.ServerResourcesForGroupVersion(groupVersion) + if err != nil { + if apierrors.IsNotFound(err) { + return initialize.Bool(false), nil + } + + return nil, err + } + + for _, resource := range resourceList.APIResources { + if resource.Kind == kind { + return initialize.Bool(true), nil + } + } + + return initialize.Bool(false), nil +} diff --git a/internal/controller/postgrescluster/controller_ref_manager.go b/internal/controller/postgrescluster/controller_ref_manager.go index 83ad72218c..8c4a34189f 100644 --- a/internal/controller/postgrescluster/controller_ref_manager.go +++ b/internal/controller/postgrescluster/controller_ref_manager.go @@ -1,19 +1,8 @@ -package postgrescluster - -/* -Copyright 2021 - 2023 Crunchy Data Solutions, Inc. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +package postgrescluster import ( "context" @@ -192,23 +181,21 @@ func (r *Reconciler) releaseObject(ctx context.Context, // StatefulSets within the cluster as needed to manage controller ownership refs. func (r *Reconciler) controllerRefHandlerFuncs() *handler.Funcs { - // var err error - ctx := context.Background() - log := logging.FromContext(ctx) + log := logging.FromContext(context.Background()) errMsg := "managing StatefulSet controller refs" return &handler.Funcs{ - CreateFunc: func(updateEvent event.CreateEvent, workQueue workqueue.RateLimitingInterface) { + CreateFunc: func(ctx context.Context, updateEvent event.CreateEvent, workQueue workqueue.RateLimitingInterface) { if err := r.manageControllerRefs(ctx, updateEvent.Object); err != nil { log.Error(err, errMsg) } }, - UpdateFunc: func(updateEvent event.UpdateEvent, workQueue workqueue.RateLimitingInterface) { + UpdateFunc: func(ctx context.Context, updateEvent event.UpdateEvent, workQueue workqueue.RateLimitingInterface) { if err := r.manageControllerRefs(ctx, updateEvent.ObjectNew); err != nil { log.Error(err, errMsg) } }, - DeleteFunc: func(updateEvent event.DeleteEvent, workQueue workqueue.RateLimitingInterface) { + DeleteFunc: func(ctx context.Context, updateEvent event.DeleteEvent, workQueue workqueue.RateLimitingInterface) { if err := r.manageControllerRefs(ctx, updateEvent.Object); err != nil { log.Error(err, errMsg) } diff --git a/internal/controller/postgrescluster/controller_ref_manager_test.go b/internal/controller/postgrescluster/controller_ref_manager_test.go index 00f6a2680f..8543fe390d 100644 --- a/internal/controller/postgrescluster/controller_ref_manager_test.go +++ b/internal/controller/postgrescluster/controller_ref_manager_test.go @@ -1,23 +1,9 @@ -//go:build envtest -// +build envtest +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - import ( "context" "testing" @@ -67,7 +53,7 @@ func TestManageControllerRefs(t *testing.T) { t.Run("adopt Object", func(t *testing.T) { obj := objBase.DeepCopy() - obj.Name = "adpot" + obj.Name = "adopt" obj.Labels = map[string]string{naming.LabelCluster: clusterName} if err := r.Client.Create(ctx, obj); err != nil { @@ -155,7 +141,7 @@ func TestManageControllerRefs(t *testing.T) { obj := objBase.DeepCopy() obj.Name = "ignore-no-postgrescluster" - obj.Labels = map[string]string{naming.LabelCluster: "noexist"} + obj.Labels = map[string]string{naming.LabelCluster: "nonexistent"} if err := r.Client.Create(ctx, obj); err != nil { t.Error(err) diff --git a/internal/controller/postgrescluster/controller_test.go b/internal/controller/postgrescluster/controller_test.go index da33c9d991..e6fdc5cb86 100644 --- a/internal/controller/postgrescluster/controller_test.go +++ b/internal/controller/postgrescluster/controller_test.go @@ -1,27 +1,12 @@ -//go:build envtest -// +build envtest - -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster import ( "context" "fmt" - "os" "strings" "testing" @@ -35,6 +20,7 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/rand" "k8s.io/apimachinery/pkg/util/version" @@ -43,11 +29,9 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/yaml" - "github.com/crunchydata/postgres-operator/internal/config" - "github.com/crunchydata/postgres-operator/internal/logging" "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/internal/registration" "github.com/crunchydata/postgres-operator/internal/testing/require" - "github.com/crunchydata/postgres-operator/internal/util" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -147,15 +131,13 @@ var _ = Describe("PostgresCluster Reconciler", func() { test.Namespace.Name = "postgres-operator-test-" + rand.String(6) Expect(suite.Client.Create(ctx, test.Namespace)).To(Succeed()) - // Initialize the feature gate - Expect(util.AddAndSetFeatureGates("")).To(Succeed()) - test.Recorder = record.NewFakeRecorder(100) test.Recorder.IncludeObject = true test.Reconciler.Client = suite.Client test.Reconciler.Owner = "asdf" test.Reconciler.Recorder = test.Recorder + test.Reconciler.Registration = nil test.Reconciler.Tracer = otel.Tracer("asdf") }) @@ -196,112 +178,15 @@ var _ = Describe("PostgresCluster Reconciler", func() { return result } - Context("New Unregistered Cluster with Registration Requirement, no Token, no need to Encumber", func() { - var cluster *v1beta1.PostgresCluster - - BeforeEach(func() { - ctx := context.Background() - rsaKey, _ := os.ReadFile("../../../cpk_rsa_key.pub") - test.Reconciler.Registration = util.GetRegistration(string(rsaKey), "", logging.FromContext(ctx)) - test.Reconciler.PGOVersion = "v5.4.2" - - // REGISTRATION_REQUIRED will be set by OLM installers. - os.Setenv("REGISTRATION_REQUIRED", "true") - cluster = create(olmClusterYAML) - Expect(reconcile(cluster)).To(BeZero()) - }) - - AfterEach(func() { - ctx := context.Background() - - if cluster != nil { - Expect(client.IgnoreNotFound( - suite.Client.Delete(ctx, cluster), - )).To(Succeed()) - - // Remove finalizers, if any, so the namespace can terminate. - Expect(client.IgnoreNotFound( - suite.Client.Patch(ctx, cluster, client.RawPatch( - client.Merge.Type(), []byte(`{"metadata":{"finalizers":[]}}`))), - )).To(Succeed()) - } - os.Unsetenv("REGISTRATION_REQUIRED") - }) - - Specify("Cluster RegistrationRequired Status", func() { - existing := &v1beta1.PostgresCluster{} - Expect(suite.Client.Get( - context.Background(), client.ObjectKeyFromObject(cluster), existing, - )).To(Succeed()) - - registrationRequired := config.RegistrationRequired() - Expect(registrationRequired).To(BeTrue()) - - pgoVersion := existing.Status.RegistrationRequired.PGOVersion - Expect(pgoVersion).To(Equal("v5.4.2")) - - shouldEncumber := shouldEncumberReconciliation(test.Reconciler.Registration.Authenticated, existing, test.Reconciler.PGOVersion) - Expect(shouldEncumber).To(BeFalse()) - }) - }) - - Context("Cluster with Registration Requirement and an invalid token, must Encumber", func() { + Context("Cluster with Registration Requirement, no token", func() { var cluster *v1beta1.PostgresCluster BeforeEach(func() { - test.Reconciler.PGOVersion = "v5.4.3" - // REGISTRATION_REQUIRED will be set by an OLM installer. - os.Setenv("REGISTRATION_REQUIRED", "true") - ctx := context.Background() - rsaKey, _ := os.ReadFile("../../../cpk_rsa_key.pub") - test.Reconciler.Registration = util.GetRegistration(string(rsaKey), "../../testing/invalid_token", logging.FromContext(ctx)) - cluster = create(olmClusterYAML) - Expect(reconcile(cluster)).To(BeZero()) - }) - - AfterEach(func() { - ctx := context.Background() - - if cluster != nil { - Expect(client.IgnoreNotFound( - suite.Client.Delete(ctx, cluster), - )).To(Succeed()) - - // Remove finalizers, if any, so the namespace can terminate. - Expect(client.IgnoreNotFound( - suite.Client.Patch(ctx, cluster, client.RawPatch( - client.Merge.Type(), []byte(`{"metadata":{"finalizers":[]}}`))), - )).To(Succeed()) - } - os.Unsetenv("REGISTRATION_REQUIRED") - }) - - Specify("Cluster RegistrationRequired Status", func() { - existing := &v1beta1.PostgresCluster{} - Expect(suite.Client.Get( - context.Background(), client.ObjectKeyFromObject(cluster), existing, - )).To(Succeed()) + test.Reconciler.Registration = registration.RegistrationFunc( + func(record.EventRecorder, client.Object, *[]metav1.Condition) bool { + return true + }) - reg := test.Reconciler.Registration - Expect(reg.TokenFileFound).To(BeTrue()) - Expect(reg.Authenticated).To(BeFalse()) - // Simulate an upgrade of the operator by bumping the Reconciler PGOVersion. - shouldEncumber := shouldEncumberReconciliation(reg.Authenticated, existing, "v5.4.4") - Expect(shouldEncumber).To(BeTrue()) - }) - }) - - Context("Old Unregistered Cluster with Registration Requirement, need to Encumber", func() { - var cluster *v1beta1.PostgresCluster - - BeforeEach(func() { - test.Reconciler.PGOVersion = "v5.4.3" - // REGISTRATION_REQUIRED will be set by OLM installers. - os.Setenv("REGISTRATION_REQUIRED", "true") - ctx := context.Background() - rsaKey, _ := os.ReadFile("../../../cpk_rsa_key.pub") - test.Reconciler.Registration = util.GetRegistration(string(rsaKey), "", logging.FromContext(ctx)) - test.Reconciler.PGOVersion = "v5.4.3" cluster = create(olmClusterYAML) Expect(reconcile(cluster)).To(BeZero()) }) @@ -320,7 +205,6 @@ var _ = Describe("PostgresCluster Reconciler", func() { client.Merge.Type(), []byte(`{"metadata":{"finalizers":[]}}`))), )).To(Succeed()) } - os.Unsetenv("REGISTRATION_REQUIRED") }) Specify("Cluster RegistrationRequired Status", func() { @@ -329,73 +213,11 @@ var _ = Describe("PostgresCluster Reconciler", func() { context.Background(), client.ObjectKeyFromObject(cluster), existing, )).To(Succeed()) - reg := test.Reconciler.Registration - Expect(reg.TokenFileFound).To(BeFalse()) - Expect(reg.Authenticated).To(BeFalse()) - - // Simulate an upgrade of the operator. - shouldEncumber := shouldEncumberReconciliation(reg.Authenticated, existing, "v5.4.4") - Expect(shouldEncumber).To(BeTrue()) - }) - }) - - Context("New Registered Cluster with Registration Requirement, no need to Encumber", func() { - var cluster *v1beta1.PostgresCluster - - BeforeEach(func() { - test.Reconciler.PGOVersion = "v5.4.2" - // REGISTRATION_REQUIRED will be set by OLM installers. - os.Setenv("REGISTRATION_REQUIRED", "true") - - ctx := context.Background() - rsaKey, _ := os.ReadFile("../../../cpk_rsa_key.pub") - test.Reconciler.Registration = util.GetRegistration(string(rsaKey), "../../testing/cpk_token", logging.FromContext(ctx)) - test.Reconciler.PGOVersion = "v5.4.3" - - cluster = create(olmClusterYAML) - Expect(reconcile(cluster)).To(BeZero()) - }) - - AfterEach(func() { - ctx := context.Background() - - if cluster != nil { - Expect(client.IgnoreNotFound( - suite.Client.Delete(ctx, cluster), - )).To(Succeed()) - - // Remove finalizers, if any, so the namespace can terminate. - Expect(client.IgnoreNotFound( - suite.Client.Patch(ctx, cluster, client.RawPatch( - client.Merge.Type(), []byte(`{"metadata":{"finalizers":[]}}`))), - )).To(Succeed()) - } - os.Unsetenv("REGISTRATION_REQUIRED") - }) - - Specify("Cluster RegistrationRequired Status", func() { - existing := &v1beta1.PostgresCluster{} - Expect(suite.Client.Get( - context.Background(), client.ObjectKeyFromObject(cluster), existing, - )).To(Succeed()) + Expect(meta.IsStatusConditionFalse(existing.Status.Conditions, v1beta1.Registered)).To(BeTrue()) - registrationRequired := config.RegistrationRequired() - Expect(registrationRequired).To(BeTrue()) - - registrationRequiredStatus := existing.Status.RegistrationRequired - Expect(registrationRequiredStatus).To(BeNil()) - - reg := test.Reconciler.Registration - shouldEncumber := shouldEncumberReconciliation(reg.Authenticated, existing, "v5.4.2") - Expect(shouldEncumber).To(BeFalse()) - Expect(reg.TokenFileFound).To(BeTrue()) - Expect(reg.Authenticated).To(BeTrue()) - Expect(reg.Aud).To(Equal("CPK")) - Expect(reg.Sub).To(Equal("point.of.contact@company.com")) - Expect(reg.Iss).To(Equal("Crunchy Data")) - Expect(reg.Exp).To(Equal(int64(1727451935))) - Expect(reg.Nbf).To(Equal(int64(1516239022))) - Expect(reg.Iat).To(Equal(int64(1516239022))) + event, ok := <-test.Recorder.Events + Expect(ok).To(BeTrue()) + Expect(event).To(ContainSubstring("Register Soon")) }) }) diff --git a/internal/controller/postgrescluster/delete.go b/internal/controller/postgrescluster/delete.go index 871f85181e..63fc007f40 100644 --- a/internal/controller/postgrescluster/delete.go +++ b/internal/controller/postgrescluster/delete.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster diff --git a/internal/controller/postgrescluster/helpers_test.go b/internal/controller/postgrescluster/helpers_test.go index f78e909d16..0536b466d4 100644 --- a/internal/controller/postgrescluster/helpers_test.go +++ b/internal/controller/postgrescluster/helpers_test.go @@ -1,42 +1,29 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster import ( "context" "os" - "path/filepath" "strconv" - "sync" "testing" "time" - "gotest.tools/v3/assert" + batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/envtest" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/yaml" "github.com/crunchydata/postgres-operator/internal/controller/runtime" "github.com/crunchydata/postgres-operator/internal/initialize" - "github.com/crunchydata/postgres-operator/internal/testing/cmp" + "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/internal/testing/require" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -66,109 +53,55 @@ func init() { } } -// marshalMatches converts actual to YAML and compares that to expected. -func marshalMatches(actual interface{}, expected string) cmp.Comparison { - return cmp.MarshalMatches(actual, expected) -} - -var kubernetes struct { - sync.Mutex - - env *envtest.Environment - count int -} - // setupKubernetes starts or connects to a Kubernetes API and returns a client -// that uses it. When starting a local API, the client is a member of the -// "system:masters" group. It also creates any CRDs present in the -// "/config/crd/bases" directory. When any of these fail, it calls t.Fatal. -// It deletes CRDs and stops the local API using t.Cleanup. -func setupKubernetes(t testing.TB) (*envtest.Environment, client.Client) { +// that uses it. See [require.Kubernetes] for more details. +func setupKubernetes(t testing.TB) (*rest.Config, client.Client) { t.Helper() - kubernetes.Lock() - defer kubernetes.Unlock() - - if kubernetes.env == nil { - env := &envtest.Environment{ - CRDDirectoryPaths: []string{ - filepath.Join("..", "..", "..", "config", "crd", "bases"), - }, - } - - _, err := env.Start() - assert.NilError(t, err) - - kubernetes.env = env - } - - kubernetes.count++ + // Start and/or connect to a Kubernetes API, or Skip when that's not configured. + cfg, cc := require.Kubernetes2(t) + // Log the status of any test namespaces after this test fails. t.Cleanup(func() { - kubernetes.Lock() - defer kubernetes.Unlock() - if t.Failed() { - if cc, err := client.New(kubernetes.env.Config, client.Options{}); err == nil { - var namespaces corev1.NamespaceList - _ = cc.List(context.Background(), &namespaces, client.HasLabels{"postgres-operator-test"}) + var namespaces corev1.NamespaceList + _ = cc.List(context.Background(), &namespaces, client.HasLabels{"postgres-operator-test"}) - type shaped map[string]corev1.NamespaceStatus - result := make([]shaped, len(namespaces.Items)) + type shaped map[string]corev1.NamespaceStatus + result := make([]shaped, len(namespaces.Items)) - for i, ns := range namespaces.Items { - result[i] = shaped{ns.Labels["postgres-operator-test"]: ns.Status} - } - - formatted, _ := yaml.Marshal(result) - t.Logf("Test Namespaces:\n%s", formatted) + for i, ns := range namespaces.Items { + result[i] = shaped{ns.Labels["postgres-operator-test"]: ns.Status} } - } - - kubernetes.count-- - if kubernetes.count == 0 { - assert.Check(t, kubernetes.env.Stop()) - kubernetes.env = nil + formatted, _ := yaml.Marshal(result) + t.Logf("Test Namespaces:\n%s", formatted) } }) - scheme, err := runtime.CreatePostgresOperatorScheme() - assert.NilError(t, err) - - client, err := client.New(kubernetes.env.Config, client.Options{Scheme: scheme}) - assert.NilError(t, err) - - return kubernetes.env, client + return cfg, cc } // setupNamespace creates a random namespace that will be deleted by t.Cleanup. -// When creation fails, it calls t.Fatal. The caller may delete the namespace -// at any time. +// +// Deprecated: Use [require.Namespace] instead. func setupNamespace(t testing.TB, cc client.Client) *corev1.Namespace { t.Helper() - ns := &corev1.Namespace{} - ns.GenerateName = "postgres-operator-test-" - ns.Labels = map[string]string{"postgres-operator-test": t.Name()} - - ctx := context.Background() - assert.NilError(t, cc.Create(ctx, ns)) - t.Cleanup(func() { assert.Check(t, client.IgnoreNotFound(cc.Delete(ctx, ns))) }) - - return ns + return require.Namespace(t, cc) } func testVolumeClaimSpec() corev1.PersistentVolumeClaimSpec { // Defines a volume claim spec that can be used to create instances return corev1.PersistentVolumeClaimSpec{ AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, - Resources: corev1.ResourceRequirements{ + Resources: corev1.VolumeResourceRequirements{ Requests: map[corev1.ResourceName]resource.Quantity{ corev1.ResourceStorage: resource.MustParse("1Gi"), }, }, } } + func testCluster() *v1beta1.PostgresCluster { // Defines a base cluster spec that can be used by tests to generate a // cluster with an expected number of instances @@ -208,18 +141,75 @@ func testCluster() *v1beta1.PostgresCluster { return cluster.DeepCopy() } +func testBackupJob(cluster *v1beta1.PostgresCluster) *batchv1.Job { + job := batchv1.Job{ + TypeMeta: metav1.TypeMeta{ + APIVersion: batchv1.SchemeGroupVersion.String(), + Kind: "Job", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "backup-job-1", + Namespace: cluster.Namespace, + Labels: map[string]string{ + naming.LabelCluster: cluster.Name, + naming.LabelPGBackRestBackup: "", + naming.LabelPGBackRestRepo: "repo1", + }, + }, + Spec: batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{Name: "test", Image: "test"}}, + RestartPolicy: corev1.RestartPolicyNever, + }, + }, + }, + } + + return job.DeepCopy() +} + +func testRestoreJob(cluster *v1beta1.PostgresCluster) *batchv1.Job { + job := batchv1.Job{ + TypeMeta: metav1.TypeMeta{ + APIVersion: batchv1.SchemeGroupVersion.String(), + Kind: "Job", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "restore-job-1", + Namespace: cluster.Namespace, + Labels: naming.PGBackRestRestoreJobLabels(cluster.Name), + }, + Spec: batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{Name: "test", Image: "test"}}, + RestartPolicy: corev1.RestartPolicyNever, + }, + }, + }, + } + + return job.DeepCopy() +} + // setupManager creates the runtime manager used during controller testing func setupManager(t *testing.T, cfg *rest.Config, - contollerSetup func(mgr manager.Manager)) (context.Context, context.CancelFunc) { + controllerSetup func(mgr manager.Manager)) (context.Context, context.CancelFunc) { + ctx, cancel := context.WithCancel(context.Background()) - mgr, err := runtime.CreateRuntimeManager("", cfg, true) + // Disable health endpoints + options := runtime.Options{} + options.HealthProbeBindAddress = "0" + options.Metrics.BindAddress = "0" + + mgr, err := runtime.NewManager(cfg, options) if err != nil { t.Fatal(err) } - contollerSetup(mgr) + controllerSetup(mgr) - ctx, cancel := context.WithCancel(context.Background()) go func() { if err := mgr.Start(ctx); err != nil { t.Error(err) diff --git a/internal/controller/postgrescluster/instance.go b/internal/controller/postgrescluster/instance.go index 7b6c8530cc..66321cc738 100644 --- a/internal/controller/postgrescluster/instance.go +++ b/internal/controller/postgrescluster/instance.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster @@ -29,6 +18,7 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" policyv1 "k8s.io/api/policy/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" @@ -38,6 +28,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/crunchydata/postgres-operator/internal/config" + "github.com/crunchydata/postgres-operator/internal/controller/runtime" + "github.com/crunchydata/postgres-operator/internal/feature" "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/logging" "github.com/crunchydata/postgres-operator/internal/naming" @@ -209,7 +201,7 @@ type observedInstances struct { byName map[string]*Instance bySet map[string][]*Instance forCluster []*Instance - setNames sets.String + setNames sets.Set[string] } // newObservedInstances builds an observedInstances from Kubernetes API objects. @@ -221,7 +213,7 @@ func newObservedInstances( observed := observedInstances{ byName: make(map[string]*Instance), bySet: make(map[string][]*Instance), - setNames: make(sets.String), + setNames: make(sets.Set[string]), } sets := make(map[string]*v1beta1.PostgresInstanceSetSpec) @@ -302,6 +294,8 @@ func (r *Reconciler) observeInstances( pods := &corev1.PodList{} runners := &appsv1.StatefulSetList{} + autogrow := feature.Enabled(ctx, feature.AutoGrowVolumes) + selector, err := naming.AsSelector(naming.ClusterInstances(cluster.Name)) if err == nil { err = errors.WithStack( @@ -320,13 +314,28 @@ func (r *Reconciler) observeInstances( observed := newObservedInstances(cluster, runners.Items, pods.Items) + // Save desired volume size values in case the status is removed. + // This may happen in cases where the Pod is restarted, the cluster + // is shutdown, etc. Only save values for instances defined in the spec. + previousDesiredRequests := make(map[string]string) + if autogrow { + for _, statusIS := range cluster.Status.InstanceSets { + if statusIS.DesiredPGDataVolume != nil { + for k, v := range statusIS.DesiredPGDataVolume { + previousDesiredRequests[k] = v + } + } + } + } + // Fill out status sorted by set name. cluster.Status.InstanceSets = cluster.Status.InstanceSets[:0] - for _, name := range observed.setNames.List() { + for _, name := range sets.List(observed.setNames) { status := v1beta1.PostgresInstanceSetStatus{Name: name} + status.DesiredPGDataVolume = make(map[string]string) for _, instance := range observed.bySet[name] { - status.Replicas += int32(len(instance.Pods)) + status.Replicas += int32(len(instance.Pods)) //nolint:gosec if ready, known := instance.IsReady(); known && ready { status.ReadyReplicas++ @@ -334,6 +343,26 @@ func (r *Reconciler) observeInstances( if matches, known := instance.PodMatchesPodTemplate(); known && matches { status.UpdatedReplicas++ } + if autogrow { + // Store desired pgData volume size for each instance Pod. + // The 'suggested-pgdata-pvc-size' annotation value is stored in the PostgresCluster + // status so that 1) it is available to the function 'reconcilePostgresDataVolume' + // and 2) so that the value persists after Pod restart and cluster shutdown events. + for _, pod := range instance.Pods { + // don't set an empty status + if pod.Annotations["suggested-pgdata-pvc-size"] != "" { + status.DesiredPGDataVolume[instance.Name] = pod.Annotations["suggested-pgdata-pvc-size"] + } + } + } + } + + // If autogrow is enabled, get the desired volume size for each instance. + if autogrow { + for _, instance := range observed.bySet[name] { + status.DesiredPGDataVolume[instance.Name] = r.storeDesiredRequest(ctx, cluster, + name, status.DesiredPGDataVolume[instance.Name], previousDesiredRequests[instance.Name]) + } } cluster.Status.InstanceSets = append(cluster.Status.InstanceSets, status) @@ -342,6 +371,67 @@ func (r *Reconciler) observeInstances( return observed, err } +// storeDesiredRequest saves the appropriate request value to the PostgresCluster +// status. If the value has grown, create an Event. +func (r *Reconciler) storeDesiredRequest( + ctx context.Context, cluster *v1beta1.PostgresCluster, + instanceSetName, desiredRequest, desiredRequestBackup string, +) string { + var current resource.Quantity + var previous resource.Quantity + var err error + log := logging.FromContext(ctx) + + // Parse the desired request from the cluster's status. + if desiredRequest != "" { + current, err = resource.ParseQuantity(desiredRequest) + if err != nil { + log.Error(err, "Unable to parse pgData volume request from status ("+ + desiredRequest+") for "+cluster.Name+"/"+instanceSetName) + // If there was an error parsing the value, treat as unset (equivalent to zero). + desiredRequest = "" + current, _ = resource.ParseQuantity("") + + } + } + + // Parse the desired request from the status backup. + if desiredRequestBackup != "" { + previous, err = resource.ParseQuantity(desiredRequestBackup) + if err != nil { + log.Error(err, "Unable to parse pgData volume request from status backup ("+ + desiredRequestBackup+") for "+cluster.Name+"/"+instanceSetName) + // If there was an error parsing the value, treat as unset (equivalent to zero). + desiredRequestBackup = "" + previous, _ = resource.ParseQuantity("") + + } + } + + // Determine if the limit is set for this instance set. + var limitSet bool + for _, specInstance := range cluster.Spec.InstanceSets { + if specInstance.Name == instanceSetName { + limitSet = !specInstance.DataVolumeClaimSpec.Resources.Limits.Storage().IsZero() + } + } + + if limitSet && current.Value() > previous.Value() { + r.Recorder.Eventf(cluster, corev1.EventTypeNormal, "VolumeAutoGrow", + "pgData volume expansion to %v requested for %s/%s.", + current.String(), cluster.Name, instanceSetName) + } + + // If the desired size was not observed, update with previously stored value. + // This can happen in scenarios where the annotation on the Pod is missing + // such as when the cluster is shutdown or a Pod is in the middle of a restart. + if desiredRequest == "" { + desiredRequest = desiredRequestBackup + } + + return desiredRequest +} + // +kubebuilder:rbac:groups="",resources="pods",verbs={list} // +kubebuilder:rbac:groups="apps",resources="statefulsets",verbs={patch} @@ -402,7 +492,7 @@ func (r *Reconciler) deleteInstances( // mistake that something else is deleting objects. Use RequeueAfter to // avoid being rate-limited due to a deluge of delete events. if err != nil { - result.RequeueAfter = 10 * time.Second + result = runtime.RequeueWithoutBackoff(10 * time.Second) } return client.IgnoreNotFound(err) } @@ -503,6 +593,7 @@ func (r *Reconciler) reconcileInstanceSets( primaryCertificate *corev1.SecretProjection, clusterVolumes []corev1.PersistentVolumeClaim, exporterQueriesConfig, exporterWebConfig *corev1.ConfigMap, + backupsSpecFound bool, ) error { // Go through the observed instances and check if a primary has been determined. @@ -539,7 +630,9 @@ func (r *Reconciler) reconcileInstanceSets( rootCA, clusterPodService, instanceServiceAccount, patroniLeaderService, primaryCertificate, findAvailableInstanceNames(*set, instances, clusterVolumes), - numInstancePods, clusterVolumes, exporterQueriesConfig, exporterWebConfig) + numInstancePods, clusterVolumes, exporterQueriesConfig, exporterWebConfig, + backupsSpecFound, + ) if err == nil { err = r.reconcileInstanceSetPodDisruptionBudget(ctx, cluster, set) @@ -591,7 +684,7 @@ func (r *Reconciler) cleanupPodDisruptionBudgets( } if err == nil { - setNames := sets.String{} + setNames := sets.Set[string]{} for _, set := range cluster.Spec.InstanceSets { setNames.Insert(set.Name) } @@ -692,7 +785,7 @@ func (r *Reconciler) rolloutInstance( pod := instance.Pods[0] exec := func(_ context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string) error { - return r.PodExec(pod.Namespace, pod.Name, naming.ContainerDatabase, stdin, stdout, stderr, command...) + return r.PodExec(ctx, pod.Namespace, pod.Name, naming.ContainerDatabase, stdin, stdout, stderr, command...) } primary, known := instance.IsPrimary() @@ -978,6 +1071,7 @@ func (r *Reconciler) scaleUpInstances( numInstancePods int, clusterVolumes []corev1.PersistentVolumeClaim, exporterQueriesConfig, exporterWebConfig *corev1.ConfigMap, + backupsSpecFound bool, ) ([]*appsv1.StatefulSet, error) { log := logging.FromContext(ctx) @@ -1022,6 +1116,7 @@ func (r *Reconciler) scaleUpInstances( rootCA, clusterPodService, instanceServiceAccount, patroniLeaderService, primaryCertificate, instances[i], numInstancePods, clusterVolumes, exporterQueriesConfig, exporterWebConfig, + backupsSpecFound, ) } if err == nil { @@ -1051,6 +1146,7 @@ func (r *Reconciler) reconcileInstance( numInstancePods int, clusterVolumes []corev1.PersistentVolumeClaim, exporterQueriesConfig, exporterWebConfig *corev1.ConfigMap, + backupsSpecFound bool, ) error { log := logging.FromContext(ctx).WithValues("instance", instance.Name) ctx = logging.NewContext(ctx, log) @@ -1082,7 +1178,7 @@ func (r *Reconciler) reconcileInstance( ctx, cluster, spec, instance, rootCA) } if err == nil { - postgresDataVolume, err = r.reconcilePostgresDataVolume(ctx, cluster, spec, instance, clusterVolumes) + postgresDataVolume, err = r.reconcilePostgresDataVolume(ctx, cluster, spec, instance, clusterVolumes, nil) } if err == nil { postgresWALVolume, err = r.reconcilePostgresWALVolume(ctx, cluster, spec, instance, observed, clusterVolumes) @@ -1097,8 +1193,10 @@ func (r *Reconciler) reconcileInstance( postgresDataVolume, postgresWALVolume, tablespaceVolumes, &instance.Spec.Template.Spec) - addPGBackRestToInstancePodSpec( - cluster, instanceCertificates, &instance.Spec.Template.Spec) + if backupsSpecFound { + addPGBackRestToInstancePodSpec( + ctx, cluster, instanceCertificates, &instance.Spec.Template.Spec) + } err = patroni.InstancePod( ctx, cluster, clusterConfigMap, clusterPodService, patroniLeaderService, @@ -1107,7 +1205,7 @@ func (r *Reconciler) reconcileInstance( // Add pgMonitor resources to the instance Pod spec if err == nil { - err = addPGMonitorToInstancePodSpec(cluster, &instance.Spec.Template, exporterQueriesConfig, exporterWebConfig) + err = addPGMonitorToInstancePodSpec(ctx, cluster, &instance.Spec.Template, exporterQueriesConfig, exporterWebConfig) } // add nss_wrapper init container and add nss_wrapper env vars to the database and pgbackrest @@ -1200,15 +1298,11 @@ func generateInstanceStatefulSetIntent(_ context.Context, sts.Spec.Template.Spec.Affinity = spec.Affinity sts.Spec.Template.Spec.Tolerations = spec.Tolerations sts.Spec.Template.Spec.TopologySpreadConstraints = spec.TopologySpreadConstraints - if spec.PriorityClassName != nil { - sts.Spec.Template.Spec.PriorityClassName = *spec.PriorityClassName - } + sts.Spec.Template.Spec.PriorityClassName = initialize.FromPointer(spec.PriorityClassName) // if default pod scheduling is not explicitly disabled, add the default // pod topology spread constraints - if cluster.Spec.DisableDefaultPodScheduling == nil || - (cluster.Spec.DisableDefaultPodScheduling != nil && - !*cluster.Spec.DisableDefaultPodScheduling) { + if !initialize.FromPointer(cluster.Spec.DisableDefaultPodScheduling) { sts.Spec.Template.Spec.TopologySpreadConstraints = append( sts.Spec.Template.Spec.TopologySpreadConstraints, defaultTopologySpreadConstraints( @@ -1271,13 +1365,12 @@ func generateInstanceStatefulSetIntent(_ context.Context, // addPGBackRestToInstancePodSpec adds pgBackRest configurations and sidecars // to the PodSpec. -func addPGBackRestToInstancePodSpec(cluster *v1beta1.PostgresCluster, +func addPGBackRestToInstancePodSpec( + ctx context.Context, cluster *v1beta1.PostgresCluster, instanceCertificates *corev1.Secret, instancePod *corev1.PodSpec, ) { - if pgbackrest.DedicatedRepoHostEnabled(cluster) { - pgbackrest.AddServerToInstancePod(cluster, instancePod, - instanceCertificates.Name) - } + pgbackrest.AddServerToInstancePod(ctx, cluster, instancePod, + instanceCertificates.Name) pgbackrest.AddConfigToInstancePod(cluster, instancePod) } diff --git a/internal/controller/postgrescluster/instance.md b/internal/controller/postgrescluster/instance.md index 48870828fd..f0de4c5d7a 100644 --- a/internal/controller/postgrescluster/instance.md +++ b/internal/controller/postgrescluster/instance.md @@ -1,16 +1,7 @@ ## Shutdown and Startup Logic Detail @@ -69,7 +60,7 @@ instance name or set to blank ("") ### Logic Map With this, the grid below shows the expected replica count value, depending on -the the values. Below, the letters represent the following: +the values. Below, the letters represent the following: M = StartupInstance matches the instance name diff --git a/internal/controller/postgrescluster/instance_rollout_test.go b/internal/controller/postgrescluster/instance_rollout_test.go index 97bcc2f0ab..e668907497 100644 --- a/internal/controller/postgrescluster/instance_rollout_test.go +++ b/internal/controller/postgrescluster/instance_rollout_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster @@ -75,7 +64,7 @@ func TestReconcilerRolloutInstance(t *testing.T) { execCalls := 0 reconciler.PodExec = func( - namespace, pod, container string, stdin io.Reader, _, _ io.Writer, command ...string, + ctx context.Context, namespace, pod, container string, stdin io.Reader, _, _ io.Writer, command ...string, ) error { execCalls++ @@ -134,7 +123,7 @@ func TestReconcilerRolloutInstance(t *testing.T) { reconciler := &Reconciler{} reconciler.Tracer = otel.Tracer(t.Name()) reconciler.PodExec = func( - namespace, pod, container string, _ io.Reader, stdout, _ io.Writer, command ...string, + ctx context.Context, namespace, pod, container string, _ io.Reader, stdout, _ io.Writer, command ...string, ) error { execCalls++ @@ -162,7 +151,7 @@ func TestReconcilerRolloutInstance(t *testing.T) { reconciler := &Reconciler{} reconciler.Tracer = otel.Tracer(t.Name()) reconciler.PodExec = func( - _, _, _ string, _ io.Reader, _, _ io.Writer, _ ...string, + ctx context.Context, _, _, _ string, _ io.Reader, _, _ io.Writer, _ ...string, ) error { // Nothing useful in stdout. return nil diff --git a/internal/controller/postgrescluster/instance_test.go b/internal/controller/postgrescluster/instance_test.go index ec393df38f..f7f59f50a5 100644 --- a/internal/controller/postgrescluster/instance_test.go +++ b/internal/controller/postgrescluster/instance_test.go @@ -1,20 +1,6 @@ -//go:build envtest -// +build envtest - -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster @@ -27,6 +13,7 @@ import ( "testing" "time" + "github.com/go-logr/logr/funcr" "github.com/google/go-cmp/cmp/cmpopts" "github.com/pkg/errors" "go.opentelemetry.io/otel" @@ -41,15 +28,19 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "github.com/crunchydata/postgres-operator/internal/controller/runtime" "github.com/crunchydata/postgres-operator/internal/initialize" + "github.com/crunchydata/postgres-operator/internal/logging" "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/internal/testing/cmp" + "github.com/crunchydata/postgres-operator/internal/testing/events" "github.com/crunchydata/postgres-operator/internal/testing/require" - "github.com/crunchydata/postgres-operator/internal/util" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -187,7 +178,7 @@ func TestNewObservedInstances(t *testing.T) { // Lookup based on its labels. assert.Equal(t, observed.byName["the-name"], instance) assert.DeepEqual(t, observed.bySet["missing"], []*Instance{instance}) - assert.DeepEqual(t, observed.setNames.List(), []string{"missing"}) + assert.DeepEqual(t, sets.List(observed.setNames), []string{"missing"}) }) t.Run("RunnerMissingOthers", func(t *testing.T) { @@ -220,7 +211,7 @@ func TestNewObservedInstances(t *testing.T) { // Lookup based on its name and labels. assert.Equal(t, observed.byName["the-name"], instance) assert.DeepEqual(t, observed.bySet["missing"], []*Instance{instance}) - assert.DeepEqual(t, observed.setNames.List(), []string{"missing"}) + assert.DeepEqual(t, sets.List(observed.setNames), []string{"missing"}) }) t.Run("Matching", func(t *testing.T) { @@ -265,7 +256,122 @@ func TestNewObservedInstances(t *testing.T) { // Lookup based on its name and labels. assert.Equal(t, observed.byName["the-name"], instance) assert.DeepEqual(t, observed.bySet["00"], []*Instance{instance}) - assert.DeepEqual(t, observed.setNames.List(), []string{"00"}) + assert.DeepEqual(t, sets.List(observed.setNames), []string{"00"}) + }) +} + +func TestStoreDesiredRequest(t *testing.T) { + ctx := context.Background() + + setupLogCapture := func(ctx context.Context) (context.Context, *[]string) { + calls := []string{} + testlog := funcr.NewJSON(func(object string) { + calls = append(calls, object) + }, funcr.Options{ + Verbosity: 1, + }) + return logging.NewContext(ctx, testlog), &calls + } + + cluster := v1beta1.PostgresCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "rhino", + Namespace: "test-namespace", + }, + Spec: v1beta1.PostgresClusterSpec{ + InstanceSets: []v1beta1.PostgresInstanceSetSpec{{ + Name: "red", + Replicas: initialize.Int32(1), + DataVolumeClaimSpec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + Resources: corev1.VolumeResourceRequirements{ + Limits: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceStorage: resource.MustParse("1Gi"), + }}}, + }, { + Name: "blue", + Replicas: initialize.Int32(1), + }}}} + + t.Run("BadRequestNoBackup", func(t *testing.T) { + recorder := events.NewRecorder(t, runtime.Scheme) + reconciler := &Reconciler{Recorder: recorder} + ctx, logs := setupLogCapture(ctx) + + value := reconciler.storeDesiredRequest(ctx, &cluster, "red", "woot", "") + + assert.Equal(t, value, "") + assert.Equal(t, len(recorder.Events), 0) + assert.Equal(t, len(*logs), 1) + assert.Assert(t, cmp.Contains((*logs)[0], "Unable to parse pgData volume request from status")) + }) + + t.Run("BadRequestWithBackup", func(t *testing.T) { + recorder := events.NewRecorder(t, runtime.Scheme) + reconciler := &Reconciler{Recorder: recorder} + ctx, logs := setupLogCapture(ctx) + + value := reconciler.storeDesiredRequest(ctx, &cluster, "red", "foo", "1Gi") + + assert.Equal(t, value, "1Gi") + assert.Equal(t, len(recorder.Events), 0) + assert.Equal(t, len(*logs), 1) + assert.Assert(t, cmp.Contains((*logs)[0], "Unable to parse pgData volume request from status (foo) for rhino/red")) + }) + + t.Run("NoLimitNoEvent", func(t *testing.T) { + recorder := events.NewRecorder(t, runtime.Scheme) + reconciler := &Reconciler{Recorder: recorder} + ctx, logs := setupLogCapture(ctx) + + value := reconciler.storeDesiredRequest(ctx, &cluster, "blue", "1Gi", "") + + assert.Equal(t, value, "1Gi") + assert.Equal(t, len(*logs), 0) + assert.Equal(t, len(recorder.Events), 0) + }) + + t.Run("BadBackupRequest", func(t *testing.T) { + recorder := events.NewRecorder(t, runtime.Scheme) + reconciler := &Reconciler{Recorder: recorder} + ctx, logs := setupLogCapture(ctx) + + value := reconciler.storeDesiredRequest(ctx, &cluster, "red", "2Gi", "bar") + + assert.Equal(t, value, "2Gi") + assert.Equal(t, len(*logs), 1) + assert.Assert(t, cmp.Contains((*logs)[0], "Unable to parse pgData volume request from status backup (bar) for rhino/red")) + assert.Equal(t, len(recorder.Events), 1) + assert.Equal(t, recorder.Events[0].Regarding.Name, cluster.Name) + assert.Equal(t, recorder.Events[0].Reason, "VolumeAutoGrow") + assert.Equal(t, recorder.Events[0].Note, "pgData volume expansion to 2Gi requested for rhino/red.") + }) + + t.Run("ValueUpdateWithEvent", func(t *testing.T) { + recorder := events.NewRecorder(t, runtime.Scheme) + reconciler := &Reconciler{Recorder: recorder} + ctx, logs := setupLogCapture(ctx) + + value := reconciler.storeDesiredRequest(ctx, &cluster, "red", "1Gi", "") + + assert.Equal(t, value, "1Gi") + assert.Equal(t, len(*logs), 0) + assert.Equal(t, len(recorder.Events), 1) + assert.Equal(t, recorder.Events[0].Regarding.Name, cluster.Name) + assert.Equal(t, recorder.Events[0].Reason, "VolumeAutoGrow") + assert.Equal(t, recorder.Events[0].Note, "pgData volume expansion to 1Gi requested for rhino/red.") + }) + + t.Run("NoLimitNoEvent", func(t *testing.T) { + recorder := events.NewRecorder(t, runtime.Scheme) + reconciler := &Reconciler{Recorder: recorder} + ctx, logs := setupLogCapture(ctx) + + value := reconciler.storeDesiredRequest(ctx, &cluster, "blue", "1Gi", "") + + assert.Equal(t, value, "1Gi") + assert.Equal(t, len(*logs), 0) + assert.Equal(t, len(recorder.Events), 0) }) } @@ -418,8 +524,9 @@ func TestWritablePod(t *testing.T) { } func TestAddPGBackRestToInstancePodSpec(t *testing.T) { - assert.NilError(t, util.AddAndSetFeatureGates(string(util.TablespaceVolumes+"=false"))) + t.Parallel() + ctx := context.Background() cluster := v1beta1.PostgresCluster{} cluster.Name = "hippo" cluster.Default() @@ -444,14 +551,14 @@ func TestAddPGBackRestToInstancePodSpec(t *testing.T) { cluster.Spec.Backups.PGBackRest.Repos = nil out := pod.DeepCopy() - addPGBackRestToInstancePodSpec(cluster, &certificates, out) + addPGBackRestToInstancePodSpec(ctx, cluster, &certificates, out) // Only Containers and Volumes fields have changed. assert.DeepEqual(t, pod, *out, cmpopts.IgnoreFields(pod, "Containers", "Volumes")) // Only database container has mounts. // Other containers are ignored. - assert.Assert(t, marshalMatches(out.Containers, ` + assert.Assert(t, cmp.MarshalMatches(out.Containers, ` - name: database resources: {} volumeMounts: @@ -460,14 +567,104 @@ func TestAddPGBackRestToInstancePodSpec(t *testing.T) { readOnly: true - name: other resources: {} +- command: + - pgbackrest + - server + livenessProbe: + exec: + command: + - pgbackrest + - server-ping + name: pgbackrest + resources: {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /etc/pgbackrest/server + name: pgbackrest-server + readOnly: true + - mountPath: /pgdata + name: postgres-data + - mountPath: /pgwal + name: postgres-wal + - mountPath: /etc/pgbackrest/conf.d + name: pgbackrest-config + readOnly: true +- command: + - bash + - -ceu + - -- + - |- + monitor() { + exec {fd}<> <(:||:) + until read -r -t 5 -u "${fd}"; do + if + [[ "${filename}" -nt "/proc/self/fd/${fd}" ]] && + pkill -HUP --exact --parent=0 pgbackrest + then + exec {fd}>&- && exec {fd}<> <(:||:) + stat --dereference --format='Loaded configuration dated %y' "${filename}" + elif + { [[ "${directory}" -nt "/proc/self/fd/${fd}" ]] || + [[ "${authority}" -nt "/proc/self/fd/${fd}" ]] + } && + pkill -HUP --exact --parent=0 pgbackrest + then + exec {fd}>&- && exec {fd}<> <(:||:) + stat --format='Loaded certificates dated %y' "${directory}" + fi + done + }; export directory="$1" authority="$2" filename="$3"; export -f monitor; exec -a "$0" bash -ceu monitor + - pgbackrest-config + - /etc/pgbackrest/server + - /etc/pgbackrest/conf.d/~postgres-operator/tls-ca.crt + - /etc/pgbackrest/conf.d/~postgres-operator_server.conf + name: pgbackrest-config + resources: {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /etc/pgbackrest/server + name: pgbackrest-server + readOnly: true + - mountPath: /etc/pgbackrest/conf.d + name: pgbackrest-config + readOnly: true `)) - // Instance configuration files but no certificates. + // Instance configuration files with certificates. // Other volumes are ignored. - assert.Assert(t, marshalMatches(out.Volumes, ` + assert.Assert(t, cmp.MarshalMatches(out.Volumes, ` - name: other - name: postgres-data - name: postgres-wal +- name: pgbackrest-server + projected: + sources: + - secret: + items: + - key: pgbackrest-server.crt + path: server-tls.crt + - key: pgbackrest-server.key + mode: 384 + path: server-tls.key + name: some-secret - name: pgbackrest-config projected: sources: @@ -477,7 +674,19 @@ func TestAddPGBackRestToInstancePodSpec(t *testing.T) { path: pgbackrest_instance.conf - key: config-hash path: config-hash + - key: pgbackrest-server.conf + path: ~postgres-operator_server.conf name: hippo-pgbackrest-config + - secret: + items: + - key: pgbackrest.ca-roots + path: ~postgres-operator/tls-ca.crt + - key: pgbackrest-client.crt + path: ~postgres-operator/client-tls.crt + - key: pgbackrest-client.key + mode: 384 + path: ~postgres-operator/client-tls.key + name: hippo-pgbackrest `)) }) @@ -489,7 +698,7 @@ func TestAddPGBackRestToInstancePodSpec(t *testing.T) { // Instance configuration files plus client and server certificates. // The server certificate comes from the instance Secret. // Other volumes are untouched. - assert.Assert(t, marshalMatches(result.Volumes, ` + assert.Assert(t, cmp.MarshalMatches(result.Volumes, ` - name: other - name: postgres-data - name: postgres-wal @@ -526,7 +735,6 @@ func TestAddPGBackRestToInstancePodSpec(t *testing.T) { mode: 384 path: ~postgres-operator/client-tls.key name: hippo-pgbackrest - optional: true `)) } @@ -539,12 +747,12 @@ func TestAddPGBackRestToInstancePodSpec(t *testing.T) { } out := pod.DeepCopy() - addPGBackRestToInstancePodSpec(cluster, &certificates, out) + addPGBackRestToInstancePodSpec(ctx, cluster, &certificates, out) alwaysExpect(t, out) // The TLS server is added and configuration mounted. // It has PostgreSQL volumes mounted while other volumes are ignored. - assert.Assert(t, marshalMatches(out.Containers, ` + assert.Assert(t, cmp.MarshalMatches(out.Containers, ` - name: database resources: {} volumeMounts: @@ -571,6 +779,8 @@ func TestAddPGBackRestToInstancePodSpec(t *testing.T) { privileged: false readOnlyRootFilesystem: true runAsNonRoot: true + seccompProfile: + type: RuntimeDefault volumeMounts: - mountPath: /etc/pgbackrest/server name: pgbackrest-server @@ -588,21 +798,21 @@ func TestAddPGBackRestToInstancePodSpec(t *testing.T) { - -- - |- monitor() { - exec {fd}<> <(:) + exec {fd}<> <(:||:) until read -r -t 5 -u "${fd}"; do if - [ "${filename}" -nt "/proc/self/fd/${fd}" ] && + [[ "${filename}" -nt "/proc/self/fd/${fd}" ]] && pkill -HUP --exact --parent=0 pgbackrest then - exec {fd}>&- && exec {fd}<> <(:) + exec {fd}>&- && exec {fd}<> <(:||:) stat --dereference --format='Loaded configuration dated %y' "${filename}" elif - { [ "${directory}" -nt "/proc/self/fd/${fd}" ] || - [ "${authority}" -nt "/proc/self/fd/${fd}" ] + { [[ "${directory}" -nt "/proc/self/fd/${fd}" ]] || + [[ "${authority}" -nt "/proc/self/fd/${fd}" ]] } && pkill -HUP --exact --parent=0 pgbackrest then - exec {fd}>&- && exec {fd}<> <(:) + exec {fd}>&- && exec {fd}<> <(:||:) stat --format='Loaded certificates dated %y' "${directory}" fi done @@ -621,6 +831,8 @@ func TestAddPGBackRestToInstancePodSpec(t *testing.T) { privileged: false readOnlyRootFilesystem: true runAsNonRoot: true + seccompProfile: + type: RuntimeDefault volumeMounts: - mountPath: /etc/pgbackrest/server name: pgbackrest-server @@ -647,7 +859,7 @@ func TestAddPGBackRestToInstancePodSpec(t *testing.T) { before := out.DeepCopy() out := pod.DeepCopy() - addPGBackRestToInstancePodSpec(cluster, &certificates, out) + addPGBackRestToInstancePodSpec(ctx, cluster, &certificates, out) alwaysExpect(t, out) // Only the TLS server container changed. @@ -656,7 +868,7 @@ func TestAddPGBackRestToInstancePodSpec(t *testing.T) { assert.DeepEqual(t, before.Containers[:2], out.Containers[:2]) // It has the custom resources. - assert.Assert(t, marshalMatches(out.Containers[2:], ` + assert.Assert(t, cmp.MarshalMatches(out.Containers[2:], ` - command: - pgbackrest - server @@ -679,6 +891,8 @@ func TestAddPGBackRestToInstancePodSpec(t *testing.T) { privileged: false readOnlyRootFilesystem: true runAsNonRoot: true + seccompProfile: + type: RuntimeDefault volumeMounts: - mountPath: /etc/pgbackrest/server name: pgbackrest-server @@ -696,21 +910,21 @@ func TestAddPGBackRestToInstancePodSpec(t *testing.T) { - -- - |- monitor() { - exec {fd}<> <(:) + exec {fd}<> <(:||:) until read -r -t 5 -u "${fd}"; do if - [ "${filename}" -nt "/proc/self/fd/${fd}" ] && + [[ "${filename}" -nt "/proc/self/fd/${fd}" ]] && pkill -HUP --exact --parent=0 pgbackrest then - exec {fd}>&- && exec {fd}<> <(:) + exec {fd}>&- && exec {fd}<> <(:||:) stat --dereference --format='Loaded configuration dated %y' "${filename}" elif - { [ "${directory}" -nt "/proc/self/fd/${fd}" ] || - [ "${authority}" -nt "/proc/self/fd/${fd}" ] + { [[ "${directory}" -nt "/proc/self/fd/${fd}" ]] || + [[ "${authority}" -nt "/proc/self/fd/${fd}" ]] } && pkill -HUP --exact --parent=0 pgbackrest then - exec {fd}>&- && exec {fd}<> <(:) + exec {fd}>&- && exec {fd}<> <(:||:) stat --format='Loaded certificates dated %y' "${directory}" fi done @@ -729,6 +943,8 @@ func TestAddPGBackRestToInstancePodSpec(t *testing.T) { privileged: false readOnlyRootFilesystem: true runAsNonRoot: true + seccompProfile: + type: RuntimeDefault volumeMounts: - mountPath: /etc/pgbackrest/server name: pgbackrest-server @@ -1127,9 +1343,6 @@ func TestDeleteInstance(t *testing.T) { Tracer: otel.Tracer(t.Name()), } - // Initialize the feature gate - assert.NilError(t, util.AddAndSetFeatureGates("")) - // Define, Create, and Reconcile a cluster to get an instance running in kube cluster := testCluster() cluster.Namespace = setupNamespace(t, cc).Name @@ -1180,8 +1393,9 @@ func TestDeleteInstance(t *testing.T) { for _, gvk := range gvks { t.Run(gvk.Kind, func(t *testing.T) { - uList := &unstructured.UnstructuredList{} - err := wait.Poll(time.Second*3, Scale(time.Second*30), func() (bool, error) { + ctx := context.Background() + err := wait.PollUntilContextTimeout(ctx, time.Second*3, Scale(time.Second*30), false, func(ctx context.Context) (bool, error) { + uList := &unstructured.UnstructuredList{} uList.SetGroupVersionKind(gvk) assert.NilError(t, errors.WithStack(reconciler.Client.List(ctx, uList, client.InNamespace(cluster.Namespace), @@ -1351,7 +1565,7 @@ func TestGenerateInstanceStatefulSetIntent(t *testing.T) { name: "check default scheduling constraints are added", run: func(t *testing.T, ss *appsv1.StatefulSet) { assert.Equal(t, len(ss.Spec.Template.Spec.TopologySpreadConstraints), 2) - assert.Assert(t, marshalMatches(ss.Spec.Template.Spec.TopologySpreadConstraints, ` + assert.Assert(t, cmp.MarshalMatches(ss.Spec.Template.Spec.TopologySpreadConstraints, ` - labelSelector: matchExpressions: - key: postgres-operator.crunchydata.com/data @@ -1398,7 +1612,7 @@ func TestGenerateInstanceStatefulSetIntent(t *testing.T) { }, run: func(t *testing.T, ss *appsv1.StatefulSet) { assert.Equal(t, len(ss.Spec.Template.Spec.TopologySpreadConstraints), 3) - assert.Assert(t, marshalMatches(ss.Spec.Template.Spec.TopologySpreadConstraints, ` + assert.Assert(t, cmp.MarshalMatches(ss.Spec.Template.Spec.TopologySpreadConstraints, ` - labelSelector: matchExpressions: - key: postgres-operator.crunchydata.com/cluster @@ -1481,7 +1695,7 @@ func TestGenerateInstanceStatefulSetIntent(t *testing.T) { }, run: func(t *testing.T, ss *appsv1.StatefulSet) { assert.Equal(t, len(ss.Spec.Template.Spec.TopologySpreadConstraints), 1) - assert.Assert(t, marshalMatches(ss.Spec.Template.Spec.TopologySpreadConstraints, + assert.Assert(t, cmp.MarshalMatches(ss.Spec.Template.Spec.TopologySpreadConstraints, `- labelSelector: matchExpressions: - key: postgres-operator.crunchydata.com/cluster @@ -1758,7 +1972,7 @@ func TestReconcileInstanceSetPodDisruptionBudget(t *testing.T) { cluster := testCluster() cluster.Namespace = ns.Name spec := &cluster.Spec.InstanceSets[0] - spec.MinAvailable = initialize.IntOrStringInt32(0) + spec.MinAvailable = initialize.Pointer(intstr.FromInt32(0)) assert.NilError(t, r.reconcileInstanceSetPodDisruptionBudget(ctx, cluster, spec)) assert.Assert(t, !foundPDB(cluster, spec)) }) @@ -1767,7 +1981,7 @@ func TestReconcileInstanceSetPodDisruptionBudget(t *testing.T) { cluster := testCluster() cluster.Namespace = ns.Name spec := &cluster.Spec.InstanceSets[0] - spec.MinAvailable = initialize.IntOrStringInt32(1) + spec.MinAvailable = initialize.Pointer(intstr.FromInt32(1)) assert.NilError(t, r.Client.Create(ctx, cluster)) t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) @@ -1776,7 +1990,7 @@ func TestReconcileInstanceSetPodDisruptionBudget(t *testing.T) { assert.Assert(t, foundPDB(cluster, spec)) t.Run("deleted", func(t *testing.T) { - spec.MinAvailable = initialize.IntOrStringInt32(0) + spec.MinAvailable = initialize.Pointer(intstr.FromInt32(0)) err := r.reconcileInstanceSetPodDisruptionBudget(ctx, cluster, spec) if apierrors.IsConflict(err) { // When running in an existing environment another controller will sometimes update @@ -1794,7 +2008,7 @@ func TestReconcileInstanceSetPodDisruptionBudget(t *testing.T) { cluster := testCluster() cluster.Namespace = ns.Name spec := &cluster.Spec.InstanceSets[0] - spec.MinAvailable = initialize.IntOrStringString("50%") + spec.MinAvailable = initialize.Pointer(intstr.FromString("50%")) assert.NilError(t, r.Client.Create(ctx, cluster)) t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) @@ -1803,7 +2017,7 @@ func TestReconcileInstanceSetPodDisruptionBudget(t *testing.T) { assert.Assert(t, foundPDB(cluster, spec)) t.Run("deleted", func(t *testing.T) { - spec.MinAvailable = initialize.IntOrStringString("0%") + spec.MinAvailable = initialize.Pointer(intstr.FromString("0%")) err := r.reconcileInstanceSetPodDisruptionBudget(ctx, cluster, spec) if apierrors.IsConflict(err) { // When running in an existing environment another controller will sometimes update @@ -1817,13 +2031,13 @@ func TestReconcileInstanceSetPodDisruptionBudget(t *testing.T) { }) t.Run("delete with 00%", func(t *testing.T) { - spec.MinAvailable = initialize.IntOrStringString("50%") + spec.MinAvailable = initialize.Pointer(intstr.FromString("50%")) assert.NilError(t, r.reconcileInstanceSetPodDisruptionBudget(ctx, cluster, spec)) assert.Assert(t, foundPDB(cluster, spec)) t.Run("deleted", func(t *testing.T) { - spec.MinAvailable = initialize.IntOrStringString("00%") + spec.MinAvailable = initialize.Pointer(intstr.FromString("00%")) err := r.reconcileInstanceSetPodDisruptionBudget(ctx, cluster, spec) if apierrors.IsConflict(err) { // When running in an existing environment another controller will sometimes update @@ -1896,13 +2110,13 @@ func TestCleanupDisruptionBudgets(t *testing.T) { cluster := testCluster() cluster.Namespace = ns.Name spec := &cluster.Spec.InstanceSets[0] - spec.MinAvailable = initialize.IntOrStringInt32(1) + spec.MinAvailable = initialize.Pointer(intstr.FromInt32(1)) assert.NilError(t, r.Client.Create(ctx, cluster)) t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) expectedPDB := generatePDB(t, cluster, spec, - initialize.IntOrStringInt32(1)) + initialize.Pointer(intstr.FromInt32(1))) assert.NilError(t, createPDB(expectedPDB)) t.Run("no instances were removed", func(t *testing.T) { @@ -1915,7 +2129,7 @@ func TestCleanupDisruptionBudgets(t *testing.T) { leftoverPDB := generatePDB(t, cluster, &v1beta1.PostgresInstanceSetSpec{ Name: "old-instance", Replicas: initialize.Int32(1), - }, initialize.IntOrStringInt32(1)) + }, initialize.Pointer(intstr.FromInt32(1))) assert.NilError(t, createPDB(leftoverPDB)) assert.Assert(t, foundPDB(expectedPDB)) diff --git a/internal/controller/postgrescluster/olm_registration.go b/internal/controller/postgrescluster/olm_registration.go deleted file mode 100644 index 03d250fecb..0000000000 --- a/internal/controller/postgrescluster/olm_registration.go +++ /dev/null @@ -1,68 +0,0 @@ -package postgrescluster - -import ( - "golang.org/x/mod/semver" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/crunchydata/postgres-operator/internal/config" - "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" -) - -func emitAdvanceWarning(cluster *v1beta1.PostgresCluster, r *Reconciler) { - advanceWarning := "Crunchy Postgres for Kubernetes now requires registration for " + - "operator upgrades. Register now to be ready for your next upgrade. See " + - r.RegistrationURL + " for details." - r.Recorder.Event(cluster, corev1.EventTypeWarning, "Register Soon", advanceWarning) -} - -func emitEncumbranceWarning(cluster *v1beta1.PostgresCluster, r *Reconciler) { - encumbranceWarning := "Registration required for Crunchy Postgres for Kubernetes to modify " + - cluster.Name + ". See " + r.RegistrationURL + " for details." - r.Recorder.Event(cluster, corev1.EventTypeWarning, "Registration Required", encumbranceWarning) - addTokenRequiredCondition(cluster) -} - -func registrationRequiredStatusFound(cluster *v1beta1.PostgresCluster) bool { - return cluster.Status.RegistrationRequired != nil -} - -func tokenRequiredConditionFound(cluster *v1beta1.PostgresCluster) bool { - for _, c := range cluster.Status.Conditions { - if c.Type == v1beta1.TokenRequired { - return true - } - } - - return false -} - -func addTokenRequiredCondition(cluster *v1beta1.PostgresCluster) { - meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{ - Type: v1beta1.TokenRequired, - Status: metav1.ConditionTrue, - Reason: "TokenRequired", - Message: "Reconciliation suspended", - ObservedGeneration: cluster.GetGeneration(), - }) -} - -func addRegistrationRequiredStatus(cluster *v1beta1.PostgresCluster, pgoVersion string) { - cluster.Status.RegistrationRequired = &v1beta1.RegistrationRequirementStatus{ - PGOVersion: pgoVersion, - } -} - -func shouldEncumberReconciliation(validToken bool, cluster *v1beta1.PostgresCluster, pgoVersion string) bool { - if validToken { - return false - } - - // Get the CPK version that first imposed RegistrationRequired status on this cluster. - trialStartedWith := config.RegistrationRequiredBy(cluster) - currentPGOVersion := pgoVersion - startedLessThanCurrent := semver.Compare(trialStartedWith, currentPGOVersion) == -1 - - return startedLessThanCurrent -} diff --git a/internal/controller/postgrescluster/patroni.go b/internal/controller/postgrescluster/patroni.go index 9df385cfae..1c5ac93eed 100644 --- a/internal/controller/postgrescluster/patroni.go +++ b/internal/controller/postgrescluster/patroni.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster @@ -26,7 +15,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/logging" @@ -103,7 +91,7 @@ func (r *Reconciler) handlePatroniRestarts( ctx context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, ) error { pod := primaryNeedsRestart.Pods[0] - return r.PodExec(pod.Namespace, pod.Name, container, stdin, stdout, stderr, command...) + return r.PodExec(ctx, pod.Namespace, pod.Name, container, stdin, stdout, stderr, command...) }) return errors.WithStack(exec.RestartPendingMembers(ctx, "master", naming.PatroniScope(cluster))) @@ -128,7 +116,7 @@ func (r *Reconciler) handlePatroniRestarts( ctx context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, ) error { pod := replicaNeedsRestart.Pods[0] - return r.PodExec(pod.Namespace, pod.Name, container, stdin, stdout, stderr, command...) + return r.PodExec(ctx, pod.Namespace, pod.Name, container, stdin, stdout, stderr, command...) }) return errors.WithStack(exec.RestartPendingMembers(ctx, "replica", naming.PatroniScope(cluster))) @@ -212,8 +200,8 @@ func (r *Reconciler) reconcilePatroniDynamicConfiguration( // NOTE(cbandy): Despite the guards above, calling PodExec may still fail // due to a missing or stopped container. - exec := func(_ context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string) error { - return r.PodExec(pod.Namespace, pod.Name, naming.ContainerDatabase, stdin, stdout, stderr, command...) + exec := func(ctx context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string) error { + return r.PodExec(ctx, pod.Namespace, pod.Name, naming.ContainerDatabase, stdin, stdout, stderr, command...) } var configuration map[string]any @@ -286,6 +274,8 @@ func (r *Reconciler) generatePatroniLeaderLeaseService( } servicePort.NodePort = *spec.NodePort } + service.Spec.ExternalTrafficPolicy = initialize.FromPointer(spec.ExternalTrafficPolicy) + service.Spec.InternalTrafficPolicy = spec.InternalTrafficPolicy } service.Spec.Ports = []corev1.ServicePort{servicePort} @@ -318,8 +308,8 @@ func (r *Reconciler) reconcilePatroniLeaderLease( func (r *Reconciler) reconcilePatroniStatus( ctx context.Context, cluster *v1beta1.PostgresCluster, observedInstances *observedInstances, -) (reconcile.Result, error) { - result := reconcile.Result{} +) (time.Duration, error) { + var requeue time.Duration log := logging.FromContext(ctx) var readyInstance bool @@ -346,12 +336,11 @@ func (r *Reconciler) reconcilePatroniStatus( // is detected in the cluster we assume this is the case, and simply log a message and // requeue in order to try again until the expected value is found. log.Info("detected ready instance but no initialize value") - result.RequeueAfter = 1 * time.Second - return result, nil + requeue = time.Second } } - return result, err + return requeue, err } // reconcileReplicationSecret creates a secret containing the TLS @@ -535,7 +524,7 @@ func (r *Reconciler) reconcilePatroniSwitchover(ctx context.Context, } exec := func(_ context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string) error { - return r.PodExec(runningPod.Namespace, runningPod.Name, naming.ContainerDatabase, stdin, + return r.PodExec(ctx, runningPod.Namespace, runningPod.Name, naming.ContainerDatabase, stdin, stdout, stderr, command...) } diff --git a/internal/controller/postgrescluster/patroni_test.go b/internal/controller/postgrescluster/patroni_test.go index 7420144bca..b2a457685b 100644 --- a/internal/controller/postgrescluster/patroni_test.go +++ b/internal/controller/postgrescluster/patroni_test.go @@ -1,23 +1,9 @@ -//go:build envtest -// +build envtest +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - import ( "context" "fmt" @@ -37,10 +23,10 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/internal/testing/cmp" "github.com/crunchydata/postgres-operator/internal/testing/require" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -60,11 +46,11 @@ func TestGeneratePatroniLeaderLeaseService(t *testing.T) { cluster.Spec.Port = initialize.Int32(9876) alwaysExpect := func(t testing.TB, service *corev1.Service) { - assert.Assert(t, marshalMatches(service.TypeMeta, ` + assert.Assert(t, cmp.MarshalMatches(service.TypeMeta, ` apiVersion: v1 kind: Service `)) - assert.Assert(t, marshalMatches(service.ObjectMeta, ` + assert.Assert(t, cmp.MarshalMatches(service.ObjectMeta, ` creationTimestamp: null labels: postgres-operator.crunchydata.com/cluster: pg2 @@ -92,7 +78,7 @@ ownerReferences: alwaysExpect(t, service) // Defaults to ClusterIP. assert.Equal(t, service.Spec.Type, corev1.ServiceTypeClusterIP) - assert.Assert(t, marshalMatches(service.Spec.Ports, ` + assert.Assert(t, cmp.MarshalMatches(service.Spec.Ports, ` - name: postgres port: 9876 protocol: TCP @@ -181,7 +167,7 @@ ownerReferences: assert.NilError(t, err) alwaysExpect(t, service) test.Expect(t, service) - assert.Assert(t, marshalMatches(service.Spec.Ports, ` + assert.Assert(t, cmp.MarshalMatches(service.Spec.Ports, ` - name: postgres port: 9876 protocol: TCP @@ -206,7 +192,7 @@ ownerReferences: assert.NilError(t, err) alwaysExpect(t, service) assert.Equal(t, service.Spec.Type, corev1.ServiceTypeNodePort) - assert.Assert(t, marshalMatches(service.Spec.Ports, ` + assert.Assert(t, cmp.MarshalMatches(service.Spec.Ports, ` - name: postgres nodePort: 32001 port: 9876 @@ -219,7 +205,7 @@ ownerReferences: assert.Equal(t, service.Spec.Type, corev1.ServiceTypeLoadBalancer) assert.NilError(t, err) alwaysExpect(t, service) - assert.Assert(t, marshalMatches(service.Spec.Ports, ` + assert.Assert(t, cmp.MarshalMatches(service.Spec.Ports, ` - name: postgres nodePort: 32002 port: 9876 @@ -527,13 +513,13 @@ func TestReconcilePatroniStatus(t *testing.T) { t.Run(fmt.Sprintf("%+v", tc), func(t *testing.T) { postgresCluster, observedInstances := createResources(i, tc.readyReplicas, tc.writeAnnotation) - result, err := r.reconcilePatroniStatus(ctx, postgresCluster, observedInstances) + requeue, err := r.reconcilePatroniStatus(ctx, postgresCluster, observedInstances) if tc.requeueExpected { assert.NilError(t, err) - assert.Assert(t, result.RequeueAfter == 1*time.Second) + assert.Equal(t, requeue, time.Second) } else { assert.NilError(t, err) - assert.DeepEqual(t, result, reconcile.Result{}) + assert.Equal(t, requeue, time.Duration(0)) } }) } @@ -547,7 +533,7 @@ func TestReconcilePatroniSwitchover(t *testing.T) { var timelineCallNoLeader, timelineCall bool r := Reconciler{ Client: client, - PodExec: func(namespace, pod, container string, + PodExec: func(ctx context.Context, namespace, pod, container string, stdin io.Reader, stdout, stderr io.Writer, command ...string) error { called = true switch { diff --git a/internal/controller/postgrescluster/pgadmin.go b/internal/controller/postgrescluster/pgadmin.go index 0f361c71bf..c0a936ba1f 100644 --- a/internal/controller/postgrescluster/pgadmin.go +++ b/internal/controller/postgrescluster/pgadmin.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster @@ -23,6 +12,7 @@ import ( "github.com/pkg/errors" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "sigs.k8s.io/controller-runtime/pkg/client" @@ -168,7 +158,7 @@ func (r *Reconciler) generatePGAdminService( // requires updates to the pgAdmin service configuration. servicePort := corev1.ServicePort{ Name: naming.PortPGAdmin, - Port: *initialize.Int32(5050), + Port: 5050, Protocol: corev1.ProtocolTCP, TargetPort: intstr.FromString(naming.PortPGAdmin), } @@ -191,6 +181,8 @@ func (r *Reconciler) generatePGAdminService( } servicePort.NodePort = *spec.NodePort } + service.Spec.ExternalTrafficPolicy = initialize.FromPointer(spec.ExternalTrafficPolicy) + service.Spec.InternalTrafficPolicy = spec.InternalTrafficPolicy } service.Spec.Ports = []corev1.ServicePort{servicePort} @@ -291,24 +283,19 @@ func (r *Reconciler) reconcilePGAdminStatefulSet( // - https://docs.k8s.io/concepts/services-networking/dns-pod-service/#pods sts.Spec.ServiceName = naming.ClusterPodService(cluster).Name - // Set the StatefulSet update strategy to "RollingUpdate", and the Partition size for the - // update strategy to 0 (note that these are the defaults for a StatefulSet). This means - // every pod of the StatefulSet will be deleted and recreated when the Pod template changes. - // - https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#rolling-updates - // - https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#forced-rollback + // Use StatefulSet's "RollingUpdate" strategy and "Parallel" policy to roll + // out changes to pods even when not Running or not Ready. + // - https://docs.k8s.io/concepts/workloads/controllers/statefulset/#rolling-updates + // - https://docs.k8s.io/concepts/workloads/controllers/statefulset/#forced-rollback + // - https://kep.k8s.io/3541 + sts.Spec.PodManagementPolicy = appsv1.ParallelPodManagement sts.Spec.UpdateStrategy.Type = appsv1.RollingUpdateStatefulSetStrategyType - sts.Spec.UpdateStrategy.RollingUpdate = &appsv1.RollingUpdateStatefulSetStrategy{ - Partition: initialize.Int32(0), - } // Use scheduling constraints from the cluster spec. sts.Spec.Template.Spec.Affinity = cluster.Spec.UserInterface.PGAdmin.Affinity sts.Spec.Template.Spec.Tolerations = cluster.Spec.UserInterface.PGAdmin.Tolerations - - if cluster.Spec.UserInterface.PGAdmin.PriorityClassName != nil { - sts.Spec.Template.Spec.PriorityClassName = *cluster.Spec.UserInterface.PGAdmin.PriorityClassName - } - + sts.Spec.Template.Spec.PriorityClassName = + initialize.FromPointer(cluster.Spec.UserInterface.PGAdmin.PriorityClassName) sts.Spec.Template.Spec.TopologySpreadConstraints = cluster.Spec.UserInterface.PGAdmin.TopologySpreadConstraints @@ -328,6 +315,29 @@ func (r *Reconciler) reconcilePGAdminStatefulSet( // set the image pull secrets, if any exist sts.Spec.Template.Spec.ImagePullSecrets = cluster.Spec.ImagePullSecrets + // Previous versions of PGO used a StatefulSet Pod Management Policy that could leave the Pod + // in a failed state. When we see that it has the wrong policy, we will delete the StatefulSet + // and then recreate it with the correct policy, as this is not a property that can be patched. + // When we delete the StatefulSet, we will leave its Pods in place. They will be claimed by + // the StatefulSet that gets created in the next reconcile. + existing := &appsv1.StatefulSet{} + if err := errors.WithStack(r.Client.Get(ctx, client.ObjectKeyFromObject(sts), existing)); err != nil { + if !apierrors.IsNotFound(err) { + return err + } + } else { + if existing.Spec.PodManagementPolicy != sts.Spec.PodManagementPolicy { + // We want to delete the STS without affecting the Pods, so we set the PropagationPolicy to Orphan. + // The orphaned Pods will be claimed by the StatefulSet that will be created in the next reconcile. + uid := existing.GetUID() + version := existing.GetResourceVersion() + exactly := client.Preconditions{UID: &uid, ResourceVersion: &version} + propagate := client.PropagationPolicy(metav1.DeletePropagationOrphan) + + return errors.WithStack(client.IgnoreNotFound(r.Client.Delete(ctx, existing, exactly, propagate))) + } + } + if err := errors.WithStack(r.setControllerReference(cluster, sts)); err != nil { return err } @@ -432,9 +442,9 @@ func (r *Reconciler) reconcilePGAdminUsers( ctx = logging.NewContext(ctx, logging.FromContext(ctx).WithValues("pod", pod.Name)) podExecutor = func( - _ context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, + ctx context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, ) error { - return r.PodExec(pod.Namespace, pod.Name, container, stdin, stdout, stderr, command...) + return r.PodExec(ctx, pod.Namespace, pod.Name, container, stdin, stdout, stderr, command...) } } if podExecutor == nil { diff --git a/internal/controller/postgrescluster/pgadmin_test.go b/internal/controller/postgrescluster/pgadmin_test.go index 92a0a6ead4..92ec6f42f1 100644 --- a/internal/controller/postgrescluster/pgadmin_test.go +++ b/internal/controller/postgrescluster/pgadmin_test.go @@ -1,20 +1,6 @@ -//go:build envtest -// +build envtest - -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster @@ -155,7 +141,7 @@ func TestGeneratePGAdminService(t *testing.T) { assert.NilError(t, err) assert.Assert(t, !specified) - assert.Assert(t, marshalMatches(service.ObjectMeta, ` + assert.Assert(t, cmp.MarshalMatches(service.ObjectMeta, ` creationTimestamp: null name: my-cluster-pgadmin namespace: my-ns @@ -168,11 +154,11 @@ namespace: my-ns } alwaysExpect := func(t testing.TB, service *corev1.Service) { - assert.Assert(t, marshalMatches(service.TypeMeta, ` + assert.Assert(t, cmp.MarshalMatches(service.TypeMeta, ` apiVersion: v1 kind: Service `)) - assert.Assert(t, marshalMatches(service.ObjectMeta, ` + assert.Assert(t, cmp.MarshalMatches(service.ObjectMeta, ` creationTimestamp: null labels: postgres-operator.crunchydata.com/cluster: my-cluster @@ -266,7 +252,7 @@ ownerReferences: alwaysExpect(t, service) // Defaults to ClusterIP. assert.Equal(t, service.Spec.Type, corev1.ServiceTypeClusterIP) - assert.Assert(t, marshalMatches(service.Spec.Ports, ` + assert.Assert(t, cmp.MarshalMatches(service.Spec.Ports, ` - name: pgadmin port: 5050 protocol: TCP @@ -299,7 +285,7 @@ ownerReferences: assert.Assert(t, specified) alwaysExpect(t, service) test.Expect(t, service) - assert.Assert(t, marshalMatches(service.Spec.Ports, ` + assert.Assert(t, cmp.MarshalMatches(service.Spec.Ports, ` - name: pgadmin port: 5050 protocol: TCP @@ -324,7 +310,7 @@ ownerReferences: assert.NilError(t, err) assert.Equal(t, service.Spec.Type, corev1.ServiceTypeNodePort) alwaysExpect(t, service) - assert.Assert(t, marshalMatches(service.Spec.Ports, ` + assert.Assert(t, cmp.MarshalMatches(service.Spec.Ports, ` - name: pgadmin nodePort: 32001 port: 5050 @@ -337,7 +323,7 @@ ownerReferences: assert.NilError(t, err) assert.Equal(t, service.Spec.Type, corev1.ServiceTypeLoadBalancer) alwaysExpect(t, service) - assert.Assert(t, marshalMatches(service.Spec.Ports, ` + assert.Assert(t, cmp.MarshalMatches(service.Spec.Ports, ` - name: pgadmin nodePort: 32002 port: 5050 @@ -701,7 +687,7 @@ func TestReconcilePGAdminDataVolume(t *testing.T) { assert.Equal(t, pvc.Labels[naming.LabelRole], naming.RolePGAdmin) assert.Equal(t, pvc.Labels[naming.LabelData], naming.DataPGAdmin) - assert.Assert(t, marshalMatches(pvc.Spec, ` + assert.Assert(t, cmp.MarshalMatches(pvc.Spec, ` accessModes: - ReadWriteOnce resources: @@ -756,6 +742,10 @@ func TestReconcilePGAdminUsers(t *testing.T) { t.Run("PodTerminating", func(t *testing.T) { pod := pod.DeepCopy() + // Must add finalizer when adding deletion timestamp otherwise fake client will panic: + // https://github.com/kubernetes-sigs/controller-runtime/pull/2316 + pod.Finalizers = append(pod.Finalizers, "some-finalizer") + pod.DeletionTimestamp = new(metav1.Time) *pod.DeletionTimestamp = metav1.Now() pod.Status.ContainerStatuses = @@ -784,7 +774,7 @@ func TestReconcilePGAdminUsers(t *testing.T) { calls := 0 r.PodExec = func( - namespace, pod, container string, + ctx context.Context, namespace, pod, container string, stdin io.Reader, stdout, stderr io.Writer, command ...string, ) error { calls++ @@ -862,7 +852,7 @@ func pgAdminTestCluster(ns corev1.Namespace) *v1beta1.PostgresCluster { Volume: &v1beta1.RepoPVC{ VolumeClaimSpec: corev1.PersistentVolumeClaimSpec{ AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, - Resources: corev1.ResourceRequirements{ + Resources: corev1.VolumeResourceRequirements{ Requests: corev1.ResourceList{ corev1.ResourceStorage: resource.MustParse("1Gi"), }, @@ -877,7 +867,7 @@ func pgAdminTestCluster(ns corev1.Namespace) *v1beta1.PostgresCluster { Image: "test-image", DataVolumeClaimSpec: corev1.PersistentVolumeClaimSpec{ AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, - Resources: corev1.ResourceRequirements{ + Resources: corev1.VolumeResourceRequirements{ Requests: corev1.ResourceList{ corev1.ResourceStorage: resource.MustParse("1Gi"), }, diff --git a/internal/controller/postgrescluster/pgbackrest.go b/internal/controller/postgrescluster/pgbackrest.go index a0a2d9b53d..836df047fc 100644 --- a/internal/controller/postgrescluster/pgbackrest.go +++ b/internal/controller/postgrescluster/pgbackrest.go @@ -1,24 +1,14 @@ -package postgrescluster - -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +package postgrescluster import ( "context" "fmt" "io" + "reflect" "regexp" "sort" "strings" @@ -33,7 +23,6 @@ import ( "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" @@ -43,6 +32,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/crunchydata/postgres-operator/internal/config" + "github.com/crunchydata/postgres-operator/internal/feature" "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/logging" "github.com/crunchydata/postgres-operator/internal/naming" @@ -117,11 +107,14 @@ var regexRepoIndex = regexp.MustCompile(`\d+`) // RepoResources is used to store various resources for pgBackRest repositories and // repository hosts type RepoResources struct { + hosts []*appsv1.StatefulSet cronjobs []*batchv1.CronJob manualBackupJobs []*batchv1.Job replicaCreateBackupJobs []*batchv1.Job - hosts []*appsv1.StatefulSet pvcs []*corev1.PersistentVolumeClaim + sas []*corev1.ServiceAccount + roles []*rbacv1.Role + rolebindings []*rbacv1.RoleBinding } // applyRepoHostIntent ensures the pgBackRest repository host StatefulSet is synchronized with the @@ -134,12 +127,34 @@ func (r *Reconciler) applyRepoHostIntent(ctx context.Context, postgresCluster *v repoHostName string, repoResources *RepoResources, observedInstances *observedInstances) (*appsv1.StatefulSet, error) { - repo, err := r.generateRepoHostIntent(postgresCluster, repoHostName, repoResources, - observedInstances) + repo, err := r.generateRepoHostIntent(ctx, postgresCluster, repoHostName, repoResources, observedInstances) if err != nil { return nil, err } + // Previous versions of PGO used a StatefulSet Pod Management Policy that could leave the Pod + // in a failed state. When we see that it has the wrong policy, we will delete the StatefulSet + // and then recreate it with the correct policy, as this is not a property that can be patched. + // When we delete the StatefulSet, we will leave its Pods in place. They will be claimed by + // the StatefulSet that gets created in the next reconcile. + existing := &appsv1.StatefulSet{} + if err := errors.WithStack(r.Client.Get(ctx, client.ObjectKeyFromObject(repo), existing)); err != nil { + if !apierrors.IsNotFound(err) { + return nil, err + } + } else { + if existing.Spec.PodManagementPolicy != repo.Spec.PodManagementPolicy { + // We want to delete the STS without affecting the Pods, so we set the PropagationPolicy to Orphan. + // The orphaned Pods will be claimed by the new StatefulSet that gets created in the next reconcile. + uid := existing.GetUID() + version := existing.GetResourceVersion() + exactly := client.Preconditions{UID: &uid, ResourceVersion: &version} + propagate := client.PropagationPolicy(metav1.DeletePropagationOrphan) + + return repo, errors.WithStack(r.Client.Delete(ctx, existing, exactly, propagate)) + } + } + if err := r.apply(ctx, repo); err != nil { return nil, err } @@ -170,24 +185,44 @@ func (r *Reconciler) applyRepoVolumeIntent(ctx context.Context, return repo, nil } +// +kubebuilder:rbac:groups="apps",resources="statefulsets",verbs={list} +// +kubebuilder:rbac:groups="batch",resources="cronjobs",verbs={list} +// +kubebuilder:rbac:groups="batch",resources="jobs",verbs={list} +// +kubebuilder:rbac:groups="",resources="configmaps",verbs={list} +// +kubebuilder:rbac:groups="",resources="persistentvolumeclaims",verbs={list} +// +kubebuilder:rbac:groups="",resources="secrets",verbs={list} +// +kubebuilder:rbac:groups="",resources="serviceaccounts",verbs={list} +// +kubebuilder:rbac:groups="rbac.authorization.k8s.io",resources="roles",verbs={list} +// +kubebuilder:rbac:groups="rbac.authorization.k8s.io",resources="rolebindings",verbs={list} + // getPGBackRestResources returns the existing pgBackRest resources that should utilized by the // PostgresCluster controller during reconciliation. Any items returned are verified to be owned // by the PostgresCluster controller and still applicable per the current PostgresCluster spec. -// Additionally, and resources identified that no longer correspond to any current configuration +// Additionally, any resources identified that no longer correspond to any current configuration // are deleted. func (r *Reconciler) getPGBackRestResources(ctx context.Context, - postgresCluster *v1beta1.PostgresCluster) (*RepoResources, error) { + postgresCluster *v1beta1.PostgresCluster, + backupsSpecFound bool, +) (*RepoResources, error) { repoResources := &RepoResources{} gvks := []schema.GroupVersionKind{{ - Group: corev1.SchemeGroupVersion.Group, - Version: corev1.SchemeGroupVersion.Version, - Kind: "ConfigMapList", + Group: appsv1.SchemeGroupVersion.Group, + Version: appsv1.SchemeGroupVersion.Version, + Kind: "StatefulSetList", + }, { + Group: batchv1.SchemeGroupVersion.Group, + Version: batchv1.SchemeGroupVersion.Version, + Kind: "CronJobList", }, { Group: batchv1.SchemeGroupVersion.Group, Version: batchv1.SchemeGroupVersion.Version, Kind: "JobList", + }, { + Group: corev1.SchemeGroupVersion.Group, + Version: corev1.SchemeGroupVersion.Version, + Kind: "ConfigMapList", }, { Group: corev1.SchemeGroupVersion.Group, Version: corev1.SchemeGroupVersion.Version, @@ -197,13 +232,17 @@ func (r *Reconciler) getPGBackRestResources(ctx context.Context, Version: corev1.SchemeGroupVersion.Version, Kind: "SecretList", }, { - Group: appsv1.SchemeGroupVersion.Group, - Version: appsv1.SchemeGroupVersion.Version, - Kind: "StatefulSetList", + Group: corev1.SchemeGroupVersion.Group, + Version: corev1.SchemeGroupVersion.Version, + Kind: "ServiceAccountList", }, { - Group: batchv1.SchemeGroupVersion.Group, - Version: batchv1.SchemeGroupVersion.Version, - Kind: "CronJobList", + Group: rbacv1.SchemeGroupVersion.Group, + Version: rbacv1.SchemeGroupVersion.Version, + Kind: "RoleList", + }, { + Group: rbacv1.SchemeGroupVersion.Group, + Version: rbacv1.SchemeGroupVersion.Version, + Kind: "RoleBindingList", }} selector := naming.PGBackRestSelector(postgresCluster.GetName()) @@ -219,7 +258,7 @@ func (r *Reconciler) getPGBackRestResources(ctx context.Context, continue } - owned, err := r.cleanupRepoResources(ctx, postgresCluster, uList.Items) + owned, err := r.cleanupRepoResources(ctx, postgresCluster, uList.Items, backupsSpecFound) if err != nil { return nil, errors.WithStack(err) } @@ -241,8 +280,11 @@ func (r *Reconciler) getPGBackRestResources(ctx context.Context, } // +kubebuilder:rbac:groups="",resources="persistentvolumeclaims",verbs={delete} +// +kubebuilder:rbac:groups="",resources="serviceaccounts",verbs={delete} // +kubebuilder:rbac:groups="apps",resources="statefulsets",verbs={delete} // +kubebuilder:rbac:groups="batch",resources="cronjobs",verbs={delete} +// +kubebuilder:rbac:groups="rbac.authorization.k8s.io",resources="roles",verbs={delete} +// +kubebuilder:rbac:groups="rbac.authorization.k8s.io",resources="rolebindings",verbs={delete} // cleanupRepoResources cleans up pgBackRest repository resources that should no longer be // reconciled by deleting them. This includes deleting repos (i.e. PersistentVolumeClaims) that @@ -250,7 +292,9 @@ func (r *Reconciler) getPGBackRestResources(ctx context.Context, // pgBackRest repository host resources if a repository host is no longer configured. func (r *Reconciler) cleanupRepoResources(ctx context.Context, postgresCluster *v1beta1.PostgresCluster, - ownedResources []unstructured.Unstructured) ([]unstructured.Unstructured, error) { + ownedResources []unstructured.Unstructured, + backupsSpecFound bool, +) ([]unstructured.Unstructured, error) { // stores the resources that should not be deleted ownedNoDelete := []unstructured.Unstructured{} @@ -265,11 +309,17 @@ func (r *Reconciler) cleanupRepoResources(ctx context.Context, // spec switch { case hasLabel(naming.LabelPGBackRestConfig): + if !backupsSpecFound { + break + } // Simply add the things we never want to delete (e.g. the pgBackRest configuration) // to the slice and do not delete ownedNoDelete = append(ownedNoDelete, owned) delete = false case hasLabel(naming.LabelPGBackRestDedicated): + if !backupsSpecFound { + break + } // Any resources from before 5.1 that relate to the previously required // SSH configuration should be deleted. // TODO(tjmoore4): This can be removed once 5.0 is EOL. @@ -277,12 +327,13 @@ func (r *Reconciler) cleanupRepoResources(ctx context.Context, owned.GetName() != naming.PGBackRestSSHSecret(postgresCluster).Name { // If a dedicated repo host resource and a dedicated repo host is enabled, then // add to the slice and do not delete. - if pgbackrest.DedicatedRepoHostEnabled(postgresCluster) { - ownedNoDelete = append(ownedNoDelete, owned) - delete = false - } + ownedNoDelete = append(ownedNoDelete, owned) + delete = false } case hasLabel(naming.LabelPGBackRestRepoVolume): + if !backupsSpecFound { + break + } // If a volume (PVC) is identified for a repo that no longer exists in the // spec then delete it. Otherwise add it to the slice and continue. for _, repo := range postgresCluster.Spec.Backups.PGBackRest.Repos { @@ -295,6 +346,9 @@ func (r *Reconciler) cleanupRepoResources(ctx context.Context, } } case hasLabel(naming.LabelPGBackRestBackup): + if !backupsSpecFound { + break + } // If a Job is identified for a repo that no longer exists in the spec then // delete it. Otherwise add it to the slice and continue. for _, repo := range postgresCluster.Spec.Backups.PGBackRest.Repos { @@ -304,6 +358,9 @@ func (r *Reconciler) cleanupRepoResources(ctx context.Context, } } case hasLabel(naming.LabelPGBackRestCronJob): + if !backupsSpecFound { + break + } for _, repo := range postgresCluster.Spec.Backups.PGBackRest.Repos { if repo.Name == owned.GetLabels()[naming.LabelPGBackRestRepo] { if backupScheduleFound(repo, @@ -315,6 +372,18 @@ func (r *Reconciler) cleanupRepoResources(ctx context.Context, } } case hasLabel(naming.LabelPGBackRestRestore): + if !backupsSpecFound { + break + } + + // If the restore job has the PGBackRestBackupJobCompletion annotation, it is + // used for volume snapshots and should not be deleted (volume snapshots code + // will clean it up when appropriate). + if _, ok := owned.GetAnnotations()[naming.PGBackRestBackupJobCompletion]; ok { + ownedNoDelete = append(ownedNoDelete, owned) + delete = false + } + // When a cluster is prepared for restore, the system identifier is removed from status // and the cluster is therefore no longer bootstrapped. Only once the restore Job is // complete will the cluster then be bootstrapped again, which means by the time we @@ -324,6 +393,12 @@ func (r *Reconciler) cleanupRepoResources(ctx context.Context, ownedNoDelete = append(ownedNoDelete, owned) delete = false } + case hasLabel(naming.LabelPGBackRest): + if !backupsSpecFound { + break + } + ownedNoDelete = append(ownedNoDelete, owned) + delete = false } // If nothing has specified that the resource should not be deleted, then delete @@ -363,6 +438,24 @@ func unstructuredToRepoResources(kind string, repoResources *RepoResources, uList *unstructured.UnstructuredList) error { switch kind { + case "StatefulSetList": + var stsList appsv1.StatefulSetList + if err := runtime.DefaultUnstructuredConverter. + FromUnstructured(uList.UnstructuredContent(), &stsList); err != nil { + return errors.WithStack(err) + } + for i := range stsList.Items { + repoResources.hosts = append(repoResources.hosts, &stsList.Items[i]) + } + case "CronJobList": + var cronList batchv1.CronJobList + if err := runtime.DefaultUnstructuredConverter. + FromUnstructured(uList.UnstructuredContent(), &cronList); err != nil { + return errors.WithStack(err) + } + for i := range cronList.Items { + repoResources.cronjobs = append(repoResources.cronjobs, &cronList.Items[i]) + } case "JobList": var jobList batchv1.JobList if err := runtime.DefaultUnstructuredConverter. @@ -380,6 +473,9 @@ func unstructuredToRepoResources(kind string, repoResources *RepoResources, append(repoResources.manualBackupJobs, &jobList.Items[i]) } } + case "ConfigMapList": + // Repository host now uses mTLS for encryption, authentication, and authorization. + // Configmaps for SSHD are no longer managed here. case "PersistentVolumeClaimList": var pvcList corev1.PersistentVolumeClaimList if err := runtime.DefaultUnstructuredConverter. @@ -389,34 +485,38 @@ func unstructuredToRepoResources(kind string, repoResources *RepoResources, for i := range pvcList.Items { repoResources.pvcs = append(repoResources.pvcs, &pvcList.Items[i]) } - case "StatefulSetList": - var stsList appsv1.StatefulSetList + case "SecretList": + // Repository host now uses mTLS for encryption, authentication, and authorization. + // Secrets for SSHD are no longer managed here. + // TODO(tjmoore4): Consider adding all pgBackRest secrets to RepoResources to + // observe all pgBackRest secrets in one place. + case "ServiceAccountList": + var saList corev1.ServiceAccountList if err := runtime.DefaultUnstructuredConverter. - FromUnstructured(uList.UnstructuredContent(), &stsList); err != nil { + FromUnstructured(uList.UnstructuredContent(), &saList); err != nil { return errors.WithStack(err) } - for i := range stsList.Items { - repoResources.hosts = append(repoResources.hosts, &stsList.Items[i]) + for i := range saList.Items { + repoResources.sas = append(repoResources.sas, &saList.Items[i]) } - case "CronJobList": - var cronList batchv1.CronJobList + case "RoleList": + var roleList rbacv1.RoleList if err := runtime.DefaultUnstructuredConverter. - FromUnstructured(uList.UnstructuredContent(), &cronList); err != nil { + FromUnstructured(uList.UnstructuredContent(), &roleList); err != nil { return errors.WithStack(err) } - for i := range cronList.Items { - repoResources.cronjobs = append(repoResources.cronjobs, &cronList.Items[i]) + for i := range roleList.Items { + repoResources.roles = append(repoResources.roles, &roleList.Items[i]) + } + case "RoleBindingList": + var rb rbacv1.RoleBindingList + if err := runtime.DefaultUnstructuredConverter. + FromUnstructured(uList.UnstructuredContent(), &rb); err != nil { + return errors.WithStack(err) + } + for i := range rb.Items { + repoResources.rolebindings = append(repoResources.rolebindings, &rb.Items[i]) } - case "ConfigMapList": - // Repository host now uses mTLS for encryption, authentication, and authorization. - // Configmaps for SSHD are no longer managed here. - // TODO(tjmoore4): Consider adding all pgBackRest configs to RepoResources to - // observe all pgBackRest configs in one place. - case "SecretList": - // Repository host now uses mTLS for encryption, authentication, and authorization. - // Secrets for SSHD are no longer managed here. - // TODO(tjmoore4): Consider adding all pgBackRest secrets to RepoResources to - // observe all pgBackRest secrets in one place. default: return fmt.Errorf("unexpected kind %q", kind) } @@ -449,8 +549,9 @@ func (r *Reconciler) setScheduledJobStatus(ctx context.Context, for _, job := range jobList.Items { // we only care about the scheduled backup Jobs created by the // associated CronJobs - sbs := v1beta1.PGBackRestScheduledBackupStatus{} if job.GetLabels()[naming.LabelPGBackRestCronJob] != "" { + sbs := v1beta1.PGBackRestScheduledBackupStatus{} + if len(job.OwnerReferences) > 0 { sbs.CronJobName = job.OwnerReferences[0].Name } @@ -476,7 +577,7 @@ func (r *Reconciler) setScheduledJobStatus(ctx context.Context, // generateRepoHostIntent creates and populates StatefulSet with the PostgresCluster's full intent // as needed to create and reconcile a pgBackRest dedicated repository host within the kubernetes // cluster. -func (r *Reconciler) generateRepoHostIntent(postgresCluster *v1beta1.PostgresCluster, +func (r *Reconciler) generateRepoHostIntent(ctx context.Context, postgresCluster *v1beta1.PostgresCluster, repoHostName string, repoResources *RepoResources, observedInstances *observedInstances, ) (*appsv1.StatefulSet, error) { @@ -520,16 +621,12 @@ func (r *Reconciler) generateRepoHostIntent(postgresCluster *v1beta1.PostgresClu repo.Spec.Template.Spec.Affinity = repoHost.Affinity repo.Spec.Template.Spec.Tolerations = repoHost.Tolerations repo.Spec.Template.Spec.TopologySpreadConstraints = repoHost.TopologySpreadConstraints - if repoHost.PriorityClassName != nil { - repo.Spec.Template.Spec.PriorityClassName = *repoHost.PriorityClassName - } + repo.Spec.Template.Spec.PriorityClassName = initialize.FromPointer(repoHost.PriorityClassName) } // if default pod scheduling is not explicitly disabled, add the default // pod topology spread constraints - if postgresCluster.Spec.DisableDefaultPodScheduling == nil || - (postgresCluster.Spec.DisableDefaultPodScheduling != nil && - !*postgresCluster.Spec.DisableDefaultPodScheduling) { + if !initialize.FromPointer(postgresCluster.Spec.DisableDefaultPodScheduling) { repo.Spec.Template.Spec.TopologySpreadConstraints = append( repo.Spec.Template.Spec.TopologySpreadConstraints, defaultTopologySpreadConstraints( @@ -561,6 +658,14 @@ func (r *Reconciler) generateRepoHostIntent(postgresCluster *v1beta1.PostgresClu repo.Spec.Replicas = initialize.Int32(1) } + // Use StatefulSet's "RollingUpdate" strategy and "Parallel" policy to roll + // out changes to pods even when not Running or not Ready. + // - https://docs.k8s.io/concepts/workloads/controllers/statefulset/#rolling-updates + // - https://docs.k8s.io/concepts/workloads/controllers/statefulset/#forced-rollback + // - https://kep.k8s.io/3541 + repo.Spec.PodManagementPolicy = appsv1.ParallelPodManagement + repo.Spec.UpdateStrategy.Type = appsv1.RollingUpdateStatefulSetStrategyType + // Restart containers any time they stop, die, are killed, etc. // - https://docs.k8s.io/concepts/workloads/pods/pod-lifecycle/#restart-policy repo.Spec.Template.Spec.RestartPolicy = corev1.RestartPolicyAlways @@ -583,16 +688,18 @@ func (r *Reconciler) generateRepoHostIntent(postgresCluster *v1beta1.PostgresClu repo.Spec.Template.Spec.SecurityContext = postgres.PodSecurityContext(postgresCluster) - pgbackrest.AddServerToRepoPod(postgresCluster, &repo.Spec.Template.Spec) + pgbackrest.AddServerToRepoPod(ctx, postgresCluster, &repo.Spec.Template.Spec) - // add the init container to make the pgBackRest repo volume log directory - pgbackrest.MakePGBackrestLogDir(&repo.Spec.Template, postgresCluster) + if pgbackrest.RepoHostVolumeDefined(postgresCluster) { + // add the init container to make the pgBackRest repo volume log directory + pgbackrest.MakePGBackrestLogDir(&repo.Spec.Template, postgresCluster) - // add pgBackRest repo volumes to pod - if err := pgbackrest.AddRepoVolumesToPod(postgresCluster, &repo.Spec.Template, - getRepoPVCNames(postgresCluster, repoResources.pvcs), - naming.PGBackRestRepoContainerName); err != nil { - return nil, errors.WithStack(err) + // add pgBackRest repo volumes to pod + if err := pgbackrest.AddRepoVolumesToPod(postgresCluster, &repo.Spec.Template, + getRepoPVCNames(postgresCluster, repoResources.pvcs), + naming.PGBackRestRepoContainerName); err != nil { + return nil, errors.WithStack(err) + } } // add configs to pod pgbackrest.AddConfigToRepoPod(postgresCluster, &repo.Spec.Template.Spec) @@ -662,20 +769,20 @@ func (r *Reconciler) generateRepoVolumeIntent(postgresCluster *v1beta1.PostgresC } // generateBackupJobSpecIntent generates a JobSpec for a pgBackRest backup job -func generateBackupJobSpecIntent(postgresCluster *v1beta1.PostgresCluster, +func generateBackupJobSpecIntent(ctx context.Context, postgresCluster *v1beta1.PostgresCluster, repo v1beta1.PGBackRestRepo, serviceAccountName string, - labels, annotations map[string]string, opts ...string) (*batchv1.JobSpec, error) { - - selector, containerName, err := getPGBackRestExecSelector(postgresCluster, repo) - if err != nil { - return nil, errors.WithStack(err) - } + labels, annotations map[string]string, opts ...string) *batchv1.JobSpec { repoIndex := regexRepoIndex.FindString(repo.Name) cmdOpts := []string{ "--stanza=" + pgbackrest.DefaultStanzaName, "--repo=" + repoIndex, } + // If VolumeSnapshots are enabled, use archive-copy and archive-check options + if postgresCluster.Spec.Backups.Snapshots != nil && feature.Enabled(ctx, feature.VolumeSnapshots) { + cmdOpts = append(cmdOpts, "--archive-copy=y", "--archive-check=y") + } + cmdOpts = append(cmdOpts, opts...) container := corev1.Container{ @@ -684,9 +791,9 @@ func generateBackupJobSpecIntent(postgresCluster *v1beta1.PostgresCluster, {Name: "COMMAND", Value: "backup"}, {Name: "COMMAND_OPTS", Value: strings.Join(cmdOpts, " ")}, {Name: "COMPARE_HASH", Value: "true"}, - {Name: "CONTAINER", Value: containerName}, + {Name: "CONTAINER", Value: naming.PGBackRestRepoContainerName}, {Name: "NAMESPACE", Value: postgresCluster.GetNamespace()}, - {Name: "SELECTOR", Value: selector.String()}, + {Name: "SELECTOR", Value: naming.PGBackRestDedicatedSelector(postgresCluster.GetName()).String()}, }, Image: config.PGBackRestContainerImage(postgresCluster), ImagePullPolicy: postgresCluster.Spec.ImagePullPolicy, @@ -726,12 +833,10 @@ func generateBackupJobSpecIntent(postgresCluster *v1beta1.PostgresCluster, // set the priority class name, tolerations, and affinity, if they exist if postgresCluster.Spec.Backups.PGBackRest.Jobs != nil { - if postgresCluster.Spec.Backups.PGBackRest.Jobs.PriorityClassName != nil { - jobSpec.Template.Spec.PriorityClassName = - *postgresCluster.Spec.Backups.PGBackRest.Jobs.PriorityClassName - } jobSpec.Template.Spec.Tolerations = postgresCluster.Spec.Backups.PGBackRest.Jobs.Tolerations jobSpec.Template.Spec.Affinity = postgresCluster.Spec.Backups.PGBackRest.Jobs.Affinity + jobSpec.Template.Spec.PriorityClassName = + initialize.FromPointer(postgresCluster.Spec.Backups.PGBackRest.Jobs.PriorityClassName) } // Set the image pull secrets, if any exist. @@ -741,13 +846,9 @@ func generateBackupJobSpecIntent(postgresCluster *v1beta1.PostgresCluster, jobSpec.Template.Spec.ImagePullSecrets = postgresCluster.Spec.ImagePullSecrets // add pgBackRest configs to template - if containerName == naming.PGBackRestRepoContainerName { - pgbackrest.AddConfigToRepoPod(postgresCluster, &jobSpec.Template.Spec) - } else { - pgbackrest.AddConfigToInstancePod(postgresCluster, &jobSpec.Template.Spec) - } + pgbackrest.AddConfigToRepoPod(postgresCluster, &jobSpec.Template.Spec) - return jobSpec, nil + return jobSpec } // +kubebuilder:rbac:groups="",resources="configmaps",verbs={delete,list} @@ -1095,7 +1196,8 @@ func (r *Reconciler) reconcileRestoreJob(ctx context.Context, // NOTE (andrewlecuyer): Forcing users to put each argument separately might prevent the need // to do any escaping or use eval. - cmd := pgbackrest.RestoreCommand(pgdata, hugePagesSetting, pgtablespaceVolumes, strings.Join(opts, " ")) + cmd := pgbackrest.RestoreCommand(pgdata, hugePagesSetting, config.FetchKeyCommand(&cluster.Spec), + pgtablespaceVolumes, strings.Join(opts, " ")) // create the volume resources required for the postgres data directory dataVolumeMount := postgres.DataVolumeMount() @@ -1226,9 +1328,7 @@ func (r *Reconciler) generateRestoreJobIntent(cluster *v1beta1.PostgresCluster, job.Spec.Template.Spec.SecurityContext = postgres.PodSecurityContext(cluster) // set the priority class name, if it exists - if dataSource.PriorityClassName != nil { - job.Spec.Template.Spec.PriorityClassName = *dataSource.PriorityClassName - } + job.Spec.Template.Spec.PriorityClassName = initialize.FromPointer(dataSource.PriorityClassName) job.SetGroupVersionKind(batchv1.SchemeGroupVersion.WithKind("Job")) if err := errors.WithStack(r.setControllerReference(cluster, job)); err != nil { @@ -1246,13 +1346,15 @@ func (r *Reconciler) generateRestoreJobIntent(cluster *v1beta1.PostgresCluster, func (r *Reconciler) reconcilePGBackRest(ctx context.Context, postgresCluster *v1beta1.PostgresCluster, instances *observedInstances, - rootCA *pki.RootCertificateAuthority) (reconcile.Result, error) { + rootCA *pki.RootCertificateAuthority, + backupsSpecFound bool, +) (reconcile.Result, error) { // add some additional context about what component is being reconciled log := logging.FromContext(ctx).WithValues("reconciler", "pgBackRest") - // if nil, create the pgBackRest status that will be updated when reconciling various - // pgBackRest resources + // if nil, create the pgBackRest status that will be updated when + // reconciling various pgBackRest resources if postgresCluster.Status.PGBackRest == nil { postgresCluster.Status.PGBackRest = &v1beta1.PGBackRestStatus{} } @@ -1263,32 +1365,33 @@ func (r *Reconciler) reconcilePGBackRest(ctx context.Context, // Get all currently owned pgBackRest resources in the environment as needed for // reconciliation. This includes deleting resources that should no longer exist per the // current spec (e.g. if repos, repo hosts, etc. have been removed). - repoResources, err := r.getPGBackRestResources(ctx, postgresCluster) + repoResources, err := r.getPGBackRestResources(ctx, postgresCluster, backupsSpecFound) if err != nil { // exit early if can't get and clean existing resources as needed to reconcile return reconcile.Result{}, errors.WithStack(err) } + // At this point, reconciliation is allowed, so if no backups spec is found + // clear the status and exit + if !backupsSpecFound { + postgresCluster.Status.PGBackRest = &v1beta1.PGBackRestStatus{} + return result, nil + } + var repoHost *appsv1.StatefulSet var repoHostName string - dedicatedEnabled := pgbackrest.DedicatedRepoHostEnabled(postgresCluster) - if dedicatedEnabled { - // reconcile the pgbackrest repository host - repoHost, err = r.reconcileDedicatedRepoHost(ctx, postgresCluster, repoResources, instances) - if err != nil { - log.Error(err, "unable to reconcile pgBackRest repo host") - result = updateReconcileResult(result, reconcile.Result{Requeue: true}) - return result, nil - } - repoHostName = repoHost.GetName() - } else { - // remove the dedicated repo host status if a dedicated host is not enabled - meta.RemoveStatusCondition(&postgresCluster.Status.Conditions, ConditionRepoHostReady) + // reconcile the pgbackrest repository host + repoHost, err = r.reconcileDedicatedRepoHost(ctx, postgresCluster, repoResources, instances) + if err != nil { + log.Error(err, "unable to reconcile pgBackRest repo host") + result.Requeue = true + return result, nil } + repoHostName = repoHost.GetName() if err := r.reconcilePGBackRestSecret(ctx, postgresCluster, repoHost, rootCA); err != nil { log.Error(err, "unable to reconcile pgBackRest secret") - result = updateReconcileResult(result, reconcile.Result{Requeue: true}) + result.Requeue = true } // calculate hashes for the external repository configurations in the spec (e.g. for Azure, @@ -1297,7 +1400,7 @@ func (r *Reconciler) reconcilePGBackRest(ctx context.Context, configHashes, configHash, err := pgbackrest.CalculateConfigHashes(postgresCluster) if err != nil { log.Error(err, "unable to calculate config hashes") - result = updateReconcileResult(result, reconcile.Result{Requeue: true}) + result.Requeue = true return result, nil } @@ -1305,7 +1408,7 @@ func (r *Reconciler) reconcilePGBackRest(ctx context.Context, replicaCreateRepo, err := r.reconcileRepos(ctx, postgresCluster, configHashes, repoResources) if err != nil { log.Error(err, "unable to reconcile pgBackRest repo host") - result = updateReconcileResult(result, reconcile.Result{Requeue: true}) + result.Requeue = true return result, nil } @@ -1320,14 +1423,14 @@ func (r *Reconciler) reconcilePGBackRest(ctx context.Context, configHash, naming.ClusterPodService(postgresCluster).Name, postgresCluster.GetNamespace(), instanceNames); err != nil { log.Error(err, "unable to reconcile pgBackRest configuration") - result = updateReconcileResult(result, reconcile.Result{Requeue: true}) + result.Requeue = true } // reconcile the RBAC required to run pgBackRest Jobs (e.g. for backups) sa, err := r.reconcilePGBackRestRBAC(ctx, postgresCluster) if err != nil { log.Error(err, "unable to create replica creation backup") - result = updateReconcileResult(result, reconcile.Result{Requeue: true}) + result.Requeue = true return result, nil } @@ -1346,14 +1449,14 @@ func (r *Reconciler) reconcilePGBackRest(ctx context.Context, // custom configuration and ensure stanzas are still created). if err != nil { log.Error(err, "unable to create stanza") - result = updateReconcileResult(result, reconcile.Result{RequeueAfter: 10 * time.Second}) + result.RequeueAfter = 10 * time.Second } // If a config hash mismatch, then log an info message and requeue to try again. Add some time // to the requeue to give the pgBackRest configuration changes a chance to propagate to the // container. if configHashMismatch { log.Info("pgBackRest config hash mismatch detected, requeuing to reattempt stanza create") - result = updateReconcileResult(result, reconcile.Result{RequeueAfter: 10 * time.Second}) + result.RequeueAfter = 10 * time.Second } // reconcile the pgBackRest backup CronJobs requeue := r.reconcileScheduledBackups(ctx, postgresCluster, sa, repoResources.cronjobs) @@ -1364,7 +1467,7 @@ func (r *Reconciler) reconcilePGBackRest(ctx context.Context, // A potential option to handle this proactively would be to use a webhook: // https://book.kubebuilder.io/cronjob-tutorial/webhook-implementation.html if requeue { - result = updateReconcileResult(result, reconcile.Result{RequeueAfter: 10 * time.Second}) + result.RequeueAfter = 10 * time.Second } // Reconcile the initial backup that is needed to enable replica creation using pgBackRest. @@ -1372,7 +1475,7 @@ func (r *Reconciler) reconcilePGBackRest(ctx context.Context, if err := r.reconcileReplicaCreateBackup(ctx, postgresCluster, instances, repoResources.replicaCreateBackupJobs, sa, configHash, replicaCreateRepo); err != nil { log.Error(err, "unable to reconcile replica creation backup") - result = updateReconcileResult(result, reconcile.Result{Requeue: true}) + result.Requeue = true } // Reconcile a manual backup as defined in the spec, and triggered by the end-user via @@ -1380,7 +1483,7 @@ func (r *Reconciler) reconcilePGBackRest(ctx context.Context, if err := r.reconcileManualBackup(ctx, postgresCluster, repoResources.manualBackupJobs, sa, instances); err != nil { log.Error(err, "unable to reconcile manual backup") - result = updateReconcileResult(result, reconcile.Result{Requeue: true}) + result.Requeue = true } return result, nil @@ -1395,7 +1498,9 @@ func (r *Reconciler) reconcilePGBackRest(ctx context.Context, func (r *Reconciler) reconcilePostgresClusterDataSource(ctx context.Context, cluster *v1beta1.PostgresCluster, dataSource *v1beta1.PostgresClusterDataSource, configHash string, clusterVolumes []corev1.PersistentVolumeClaim, - rootCA *pki.RootCertificateAuthority) error { + rootCA *pki.RootCertificateAuthority, + backupsSpecFound bool, +) error { // grab cluster, namespaces and repo name information from the data source sourceClusterName := dataSource.ClusterName @@ -1477,7 +1582,7 @@ func (r *Reconciler) reconcilePostgresClusterDataSource(ctx context.Context, // Note that function reconcilePGBackRest only uses forCluster in observedInstances. result, err := r.reconcilePGBackRest(ctx, cluster, &observedInstances{ forCluster: []*Instance{instance}, - }, rootCA) + }, rootCA, backupsSpecFound) if err != nil || result != (reconcile.Result{}) { return fmt.Errorf("unable to reconcile pgBackRest as needed to initialize "+ "PostgreSQL data for the cluster: %w", err) @@ -1523,7 +1628,7 @@ func (r *Reconciler) reconcilePostgresClusterDataSource(ctx context.Context, Namespace: cluster.GetNamespace(), }} // Reconcile the PGDATA and WAL volumes for the restore - pgdata, err := r.reconcilePostgresDataVolume(ctx, cluster, instanceSet, fakeSTS, clusterVolumes) + pgdata, err := r.reconcilePostgresDataVolume(ctx, cluster, instanceSet, fakeSTS, clusterVolumes, sourceCluster) if err != nil { return errors.WithStack(err) } @@ -1537,6 +1642,9 @@ func (r *Reconciler) reconcilePostgresClusterDataSource(ctx context.Context, return errors.WithStack(err) } + // TODO(snapshots): If pgdata is being sourced by a VolumeSnapshot then don't perform a typical restore job; + // we only want to replay the WAL. + // reconcile the pgBackRest restore Job to populate the cluster's data directory if err := r.reconcileRestoreJob(ctx, cluster, sourceCluster, pgdata, pgwal, pgtablespaces, dataSource, instanceName, instanceSetName, configHash, pgbackrest.DefaultStanzaName); err != nil { @@ -1618,7 +1726,7 @@ func (r *Reconciler) reconcileCloudBasedDataSource(ctx context.Context, Namespace: cluster.GetNamespace(), }} // Reconcile the PGDATA and WAL volumes for the restore - pgdata, err := r.reconcilePostgresDataVolume(ctx, cluster, instanceSet, fakeSTS, clusterVolumes) + pgdata, err := r.reconcilePostgresDataVolume(ctx, cluster, instanceSet, fakeSTS, clusterVolumes, nil) if err != nil { return errors.WithStack(err) } @@ -1883,8 +1991,6 @@ func (r *Reconciler) reconcilePGBackRestConfig(ctx context.Context, repoHostName, configHash, serviceName, serviceNamespace string, instanceNames []string) error { - log := logging.FromContext(ctx).WithValues("reconcileResource", "repoConfig") - backrestConfig := pgbackrest.CreatePGBackRestConfigMapIntent(postgresCluster, repoHostName, configHash, serviceName, serviceNamespace, instanceNames) if err := controllerutil.SetControllerReference(postgresCluster, backrestConfig, @@ -1895,12 +2001,6 @@ func (r *Reconciler) reconcilePGBackRestConfig(ctx context.Context, return errors.WithStack(err) } - repoHostConfigured := pgbackrest.DedicatedRepoHostEnabled(postgresCluster) - if !repoHostConfigured { - log.V(1).Info("skipping SSH reconciliation, no repo hosts configured") - return nil - } - return nil } @@ -2159,7 +2259,7 @@ func (r *Reconciler) reconcileManualBackup(ctx context.Context, // // TODO (andrewlecuyer): Since reconciliation doesn't currently occur when a leader is elected, // the operator may not get another chance to create the backup if a writable instance is not - // detected, and it then returns without requeing. To ensure this doesn't occur and that the + // detected, and it then returns without requeuing. To ensure this doesn't occur and that the // operator always has a chance to reconcile when an instance becomes writable, we should watch // Pods in the cluster for leader election events, and trigger reconciles accordingly. if !clusterWritable || manualAnnotation == "" || @@ -2187,20 +2287,18 @@ func (r *Reconciler) reconcileManualBackup(ctx context.Context, return nil } - // determine if the dedicated repository host is ready (if enabled) using the repo host ready + // determine if the dedicated repository host is ready using the repo host ready // condition, and return if not - if pgbackrest.DedicatedRepoHostEnabled(postgresCluster) { - condition := meta.FindStatusCondition(postgresCluster.Status.Conditions, ConditionRepoHostReady) - if condition == nil || condition.Status != metav1.ConditionTrue { - return nil - } + repoCondition := meta.FindStatusCondition(postgresCluster.Status.Conditions, ConditionRepoHostReady) + if repoCondition == nil || repoCondition.Status != metav1.ConditionTrue { + return nil } // Determine if the replica create backup is complete and return if not. This allows for proper // orchestration of backup Jobs since only one backup can be run at a time. - condition := meta.FindStatusCondition(postgresCluster.Status.Conditions, + backupCondition := meta.FindStatusCondition(postgresCluster.Status.Conditions, ConditionReplicaCreate) - if condition == nil || condition.Status != metav1.ConditionTrue { + if backupCondition == nil || backupCondition.Status != metav1.ConditionTrue { return nil } @@ -2275,11 +2373,9 @@ func (r *Reconciler) reconcileManualBackup(ctx context.Context, backupJob.ObjectMeta.Labels = labels backupJob.ObjectMeta.Annotations = annotations - spec, err := generateBackupJobSpecIntent(postgresCluster, repo, + spec := generateBackupJobSpecIntent(ctx, postgresCluster, repo, serviceAccount.GetName(), labels, annotations, backupOpts...) - if err != nil { - return errors.WithStack(err) - } + backupJob.Spec = *spec // set gvk and ownership refs @@ -2353,7 +2449,7 @@ func (r *Reconciler) reconcileReplicaCreateBackup(ctx context.Context, // // TODO (andrewlecuyer): Since reconciliation doesn't currently occur when a leader is elected, // the operator may not get another chance to create the backup if a writable instance is not - // detected, and it then returns without requeing. To ensure this doesn't occur and that the + // detected, and it then returns without requeuing. To ensure this doesn't occur and that the // operator always has a chance to reconcile when an instance becomes writable, we should watch // Pods in the cluster for leader election events, and trigger reconciles accordingly. if !clusterWritable || replicaCreateRepoStatus == nil || replicaCreateRepoStatus.ReplicaCreateBackupComplete { @@ -2367,13 +2463,6 @@ func (r *Reconciler) reconcileReplicaCreateBackup(ctx context.Context, replicaRepoReady = (condition.Status == metav1.ConditionTrue) } - // get pod name and container name as needed to exec into the proper pod and create - // the pgBackRest backup - _, containerName, err := getPGBackRestExecSelector(postgresCluster, replicaCreateRepo) - if err != nil { - return errors.WithStack(err) - } - // determine if the dedicated repository host is ready using the repo host ready status var dedicatedRepoReady bool condition = meta.FindStatusCondition(postgresCluster.Status.Conditions, ConditionRepoHostReady) @@ -2400,14 +2489,10 @@ func (r *Reconciler) reconcileReplicaCreateBackup(ctx context.Context, // - The job has failed. The Job will be deleted and recreated to try again. // - The replica creation repo has changed since the Job was created. Delete and recreate // with the Job with the proper repo configured. - // - The "config" annotation has changed, indicating there is a new primary. Delete and - // recreate the Job with the proper config mounted (applicable when a dedicated repo - // host is not enabled). // - The "config hash" annotation has changed, indicating a configuration change has been // made in the spec (specifically a change to the config for an external repo). Delete // and recreate the Job with proper hash per the current config. if failed || replicaCreateRepoChanged || - (job.GetAnnotations()[naming.PGBackRestCurrentConfig] != containerName) || (job.GetAnnotations()[naming.PGBackRestConfigHash] != configHash) { if err := r.Client.Delete(ctx, job, client.PropagationPolicy(metav1.DeletePropagationBackground)); err != nil { @@ -2423,10 +2508,9 @@ func (r *Reconciler) reconcileReplicaCreateBackup(ctx context.Context, } } - dedicatedEnabled := pgbackrest.DedicatedRepoHostEnabled(postgresCluster) - // return if no job has been created and the replica repo or the dedicated repo host is not - // ready - if job == nil && ((dedicatedEnabled && !dedicatedRepoReady) || !replicaRepoReady) { + // return if no job has been created and the replica repo or the dedicated + // repo host is not ready + if job == nil && (!dedicatedRepoReady || !replicaRepoReady) { return nil } @@ -2445,17 +2529,14 @@ func (r *Reconciler) reconcileReplicaCreateBackup(ctx context.Context, annotations = naming.Merge(postgresCluster.Spec.Metadata.GetAnnotationsOrNil(), postgresCluster.Spec.Backups.PGBackRest.Metadata.GetAnnotationsOrNil(), map[string]string{ - naming.PGBackRestCurrentConfig: containerName, - naming.PGBackRestConfigHash: configHash, + naming.PGBackRestConfigHash: configHash, }) backupJob.ObjectMeta.Labels = labels backupJob.ObjectMeta.Annotations = annotations - spec, err := generateBackupJobSpecIntent(postgresCluster, replicaCreateRepo, + spec := generateBackupJobSpecIntent(ctx, postgresCluster, replicaCreateRepo, serviceAccount.GetName(), labels, annotations) - if err != nil { - return errors.WithStack(err) - } + backupJob.Spec = *spec // set gvk and ownership refs @@ -2593,7 +2674,7 @@ func (r *Reconciler) reconcileStanzaCreate(ctx context.Context, // // TODO (andrewlecuyer): Since reconciliation doesn't currently occur when a leader is elected, // the operator may not get another chance to create the stanza if a writable instance is not - // detected, and it then returns without requeing. To ensure this doesn't occur and that the + // detected, and it then returns without requeuing. To ensure this doesn't occur and that the // operator always has a chance to reconcile when an instance becomes writable, we should watch // Pods in the cluster for leader election events, and trigger reconciles accordingly. if !clusterWritable || stanzasCreated { @@ -2603,13 +2684,12 @@ func (r *Reconciler) reconcileStanzaCreate(ctx context.Context, // create a pgBackRest executor and attempt stanza creation exec := func(ctx context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string) error { - return r.PodExec(postgresCluster.GetNamespace(), writableInstanceName, + return r.PodExec(ctx, postgresCluster.GetNamespace(), writableInstanceName, naming.ContainerDatabase, stdin, stdout, stderr, command...) } // Always attempt to create pgBackRest stanza first - configHashMismatch, err := pgbackrest.Executor(exec).StanzaCreateOrUpgrade(ctx, configHash, - false) + configHashMismatch, err := pgbackrest.Executor(exec).StanzaCreateOrUpgrade(ctx, configHash, postgresCluster) if err != nil { // record and log any errors resulting from running the stanza-create command r.Recorder.Event(postgresCluster, corev1.EventTypeWarning, EventUnableToCreateStanzas, @@ -2637,29 +2717,8 @@ func (r *Reconciler) reconcileStanzaCreate(ctx context.Context, return false, nil } -// getPGBackRestExecSelector returns a selector and container name that allows the proper -// Pod (along with a specific container within it) to be found within the Kubernetes -// cluster as needed to exec into the container and run a pgBackRest command. -func getPGBackRestExecSelector(postgresCluster *v1beta1.PostgresCluster, - repo v1beta1.PGBackRestRepo) (labels.Selector, string, error) { - - var err error - var podSelector labels.Selector - var containerName string - - if repo.Volume != nil { - podSelector = naming.PGBackRestDedicatedSelector(postgresCluster.GetName()) - containerName = naming.PGBackRestRepoContainerName - } else { - podSelector, err = naming.AsSelector(naming.ClusterPrimary(postgresCluster.GetName())) - containerName = naming.ContainerDatabase - } - - return podSelector, containerName, err -} - -// getRepoHostStatus is responsible for returning the pgBackRest status for the provided pgBackRest -// repository host +// getRepoHostStatus is responsible for returning the pgBackRest status for the +// provided pgBackRest repository host func getRepoHostStatus(repoHost *appsv1.StatefulSet) *v1beta1.RepoHostStatus { repoHostStatus := &v1beta1.RepoHostStatus{} @@ -2838,8 +2897,7 @@ func (r *Reconciler) reconcilePGBackRestCronJob( labels := naming.Merge( cluster.Spec.Metadata.GetLabelsOrNil(), cluster.Spec.Backups.PGBackRest.Metadata.GetLabelsOrNil(), - naming.PGBackRestCronJobLabels(cluster.Name, repo.Name, backupType), - ) + naming.PGBackRestCronJobLabels(cluster.Name, repo.Name, backupType)) objectmeta := naming.PGBackRestCronJob(cluster, backupType, repo.Name) // Look for an existing CronJob by the associated Labels. If one exists, @@ -2903,11 +2961,8 @@ func (r *Reconciler) reconcilePGBackRestCronJob( // set backup type (i.e. "full", "diff", "incr") backupOpts := []string{"--type=" + backupType} - jobSpec, err := generateBackupJobSpecIntent(cluster, repo, + jobSpec := generateBackupJobSpecIntent(ctx, cluster, repo, serviceAccount.GetName(), labels, annotations, backupOpts...) - if err != nil { - return errors.WithStack(err) - } // Suspend cronjobs when shutdown or read-only. Any jobs that have already // started will continue. @@ -2940,7 +2995,7 @@ func (r *Reconciler) reconcilePGBackRestCronJob( // set metadata pgBackRestCronJob.SetGroupVersionKind(batchv1.SchemeGroupVersion.WithKind("CronJob")) - err = errors.WithStack(r.setControllerReference(cluster, pgBackRestCronJob)) + err := errors.WithStack(r.setControllerReference(cluster, pgBackRestCronJob)) if err == nil { err = r.apply(ctx, pgBackRestCronJob) @@ -2953,3 +3008,94 @@ func (r *Reconciler) reconcilePGBackRestCronJob( } return err } + +// BackupsEnabled checks the state of the backups (i.e., if backups are in the spec, +// if a repo-host StatefulSet exists, if the annotation permitting backup deletion exists) +// and determines whether reconciliation is allowed. +// Reconciliation of backup-related Kubernetes objects is paused if +// - a user created a cluster with backups; +// - the cluster is updated to remove backups; +// - the annotation authorizing that removal is missing. +// +// This function also returns whether the spec has a defined backups or not. +func (r *Reconciler) BackupsEnabled( + ctx context.Context, + postgresCluster *v1beta1.PostgresCluster, +) ( + backupsSpecFound bool, + backupsReconciliationAllowed bool, + err error, +) { + specFound, stsNotFound, annotationFound, err := r.ObserveBackupUniverse(ctx, postgresCluster) + + switch { + case err != nil: + case specFound: + backupsSpecFound = true + backupsReconciliationAllowed = true + case annotationFound || stsNotFound: + backupsReconciliationAllowed = true + case !annotationFound && !stsNotFound: + // Destroying backups is a two key operation: + // 1. You must remove the backups section of the spec. + // 2. You must apply an annotation to the cluster. + // The existence of a StatefulSet without the backups spec is + // evidence of key 1 being turned without key 2 being turned + // -- block reconciliation until the annotation is added. + backupsReconciliationAllowed = false + default: + backupsReconciliationAllowed = false + } + return backupsSpecFound, backupsReconciliationAllowed, err +} + +// ObserveBackupUniverse returns +// - whether the spec has backups defined; +// - whether the repo-host statefulset exists; +// - whether the cluster has the annotation authorizing backup removal. +func (r *Reconciler) ObserveBackupUniverse(ctx context.Context, + postgresCluster *v1beta1.PostgresCluster, +) ( + backupsSpecFound bool, + repoHostStatefulSetNotFound bool, + backupsRemovalAnnotationFound bool, + err error, +) { + + // Does the cluster have a blank Backups section + backupsSpecFound = !reflect.DeepEqual(postgresCluster.Spec.Backups, v1beta1.Backups{PGBackRest: v1beta1.PGBackRestArchive{}}) + + // Does the repo-host StatefulSet exist? + name := fmt.Sprintf("%s-%s", postgresCluster.GetName(), "repo-host") + existing := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: postgresCluster.Namespace, + Name: name, + }, + } + err = errors.WithStack( + r.Client.Get(ctx, client.ObjectKeyFromObject(existing), existing)) + repoHostStatefulSetNotFound = apierrors.IsNotFound(err) + + // If we have an error that is not related to a missing repo-host StatefulSet, + // we return an error and expect the calling function to correctly stop processing. + if err != nil && !repoHostStatefulSetNotFound { + return true, false, false, err + } + + backupsRemovalAnnotationFound = authorizeBackupRemovalAnnotationPresent(postgresCluster) + + // If we have reached this point, the err is either nil or an IsNotFound error + // which we do not care about; hence, pass nil rather than the err + return backupsSpecFound, repoHostStatefulSetNotFound, backupsRemovalAnnotationFound, nil +} + +func authorizeBackupRemovalAnnotationPresent(postgresCluster *v1beta1.PostgresCluster) bool { + annotations := postgresCluster.GetAnnotations() + for annotation := range annotations { + if annotation == naming.AuthorizeBackupRemovalAnnotation { + return annotations[naming.AuthorizeBackupRemovalAnnotation] == "true" + } + } + return false +} diff --git a/internal/controller/postgrescluster/pgbackrest_test.go b/internal/controller/postgrescluster/pgbackrest_test.go index a366627dfd..8e34dabb5e 100644 --- a/internal/controller/postgrescluster/pgbackrest_test.go +++ b/internal/controller/postgrescluster/pgbackrest_test.go @@ -1,23 +1,9 @@ -//go:build envtest -// +build envtest +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - import ( "context" "errors" @@ -55,6 +41,7 @@ import ( "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/internal/pgbackrest" "github.com/crunchydata/postgres-operator/internal/pki" + "github.com/crunchydata/postgres-operator/internal/testing/cmp" "github.com/crunchydata/postgres-operator/internal/testing/require" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -81,7 +68,7 @@ func fakePostgresCluster(clusterName, namespace, clusterUID string, Name: "instance1", DataVolumeClaimSpec: corev1.PersistentVolumeClaimSpec{ AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteMany}, - Resources: corev1.ResourceRequirements{ + Resources: corev1.VolumeResourceRequirements{ Requests: corev1.ResourceList{ corev1.ResourceStorage: resource.MustParse("1Gi"), }, @@ -132,7 +119,7 @@ func fakePostgresCluster(clusterName, namespace, clusterUID string, Volume: &v1beta1.RepoPVC{ VolumeClaimSpec: corev1.PersistentVolumeClaimSpec{ AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteMany}, - Resources: corev1.ResourceRequirements{ + Resources: corev1.VolumeResourceRequirements{ Requests: map[corev1.ResourceName]resource.Quantity{ corev1.ResourceStorage: resource.MustParse("1Gi"), }, @@ -186,11 +173,11 @@ func TestReconcilePGBackRest(t *testing.T) { t.Skip("USE_EXISTING_CLUSTER: Test fails due to garbage collection") } - tEnv, tClient := setupKubernetes(t) + cfg, tClient := setupKubernetes(t) require.ParallelCapacity(t, 2) r := &Reconciler{} - ctx, cancel := setupManager(t, tEnv.Config, func(mgr manager.Manager) { + ctx, cancel := setupManager(t, cfg, func(mgr manager.Manager) { r = &Reconciler{ Client: mgr.GetClient(), Recorder: mgr.GetEventRecorderFor(ControllerName), @@ -200,137 +187,137 @@ func TestReconcilePGBackRest(t *testing.T) { }) t.Cleanup(func() { teardownManager(cancel, t) }) - clusterName := "hippocluster" - clusterUID := "hippouid" - - ns := setupNamespace(t, tClient) + t.Run("run reconcile with backups defined", func(t *testing.T) { + clusterName := "hippocluster" + clusterUID := "hippouid" - // create a PostgresCluster to test with - postgresCluster := fakePostgresCluster(clusterName, ns.GetName(), clusterUID, true) + ns := setupNamespace(t, tClient) + // create a PostgresCluster to test with + postgresCluster := fakePostgresCluster(clusterName, ns.GetName(), clusterUID, true) - // create a service account to test with - serviceAccount, err := r.reconcilePGBackRestRBAC(ctx, postgresCluster) - assert.NilError(t, err) - assert.Assert(t, serviceAccount != nil) + // create a service account to test with + serviceAccount, err := r.reconcilePGBackRestRBAC(ctx, postgresCluster) + assert.NilError(t, err) + assert.Assert(t, serviceAccount != nil) - // create the 'observed' instances and set the leader - instances := &observedInstances{ - forCluster: []*Instance{{Name: "instance1", - Pods: []*corev1.Pod{{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{naming.LabelRole: naming.RolePatroniLeader}, - }, - Spec: corev1.PodSpec{}, - }}, - }, {Name: "instance2"}, {Name: "instance3"}}, - } + // create the 'observed' instances and set the leader + instances := &observedInstances{ + forCluster: []*Instance{{Name: "instance1", + Pods: []*corev1.Pod{{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{naming.LabelRole: naming.RolePatroniLeader}, + }, + Spec: corev1.PodSpec{}, + }}, + }, {Name: "instance2"}, {Name: "instance3"}}, + } - // set status - postgresCluster.Status = v1beta1.PostgresClusterStatus{ - Patroni: v1beta1.PatroniStatus{SystemIdentifier: "12345abcde"}, - PGBackRest: &v1beta1.PGBackRestStatus{ - RepoHost: &v1beta1.RepoHostStatus{Ready: true}, - Repos: []v1beta1.RepoStatus{{Name: "repo1", StanzaCreated: true}}}, - } + // set status + postgresCluster.Status = v1beta1.PostgresClusterStatus{ + Patroni: v1beta1.PatroniStatus{SystemIdentifier: "12345abcde"}, + PGBackRest: &v1beta1.PGBackRestStatus{ + RepoHost: &v1beta1.RepoHostStatus{Ready: true}, + Repos: []v1beta1.RepoStatus{{Name: "repo1", StanzaCreated: true}}}, + } - // set conditions - clusterConditions := map[string]metav1.ConditionStatus{ - ConditionRepoHostReady: metav1.ConditionTrue, - ConditionReplicaCreate: metav1.ConditionTrue, - } - for condition, status := range clusterConditions { - meta.SetStatusCondition(&postgresCluster.Status.Conditions, metav1.Condition{ - Type: condition, Reason: "testing", Status: status}) - } + // set conditions + clusterConditions := map[string]metav1.ConditionStatus{ + ConditionRepoHostReady: metav1.ConditionTrue, + ConditionReplicaCreate: metav1.ConditionTrue, + } + for condition, status := range clusterConditions { + meta.SetStatusCondition(&postgresCluster.Status.Conditions, metav1.Condition{ + Type: condition, Reason: "testing", Status: status}) + } - rootCA, err := pki.NewRootCertificateAuthority() - assert.NilError(t, err) + rootCA, err := pki.NewRootCertificateAuthority() + assert.NilError(t, err) - result, err := r.reconcilePGBackRest(ctx, postgresCluster, instances, rootCA) - if err != nil || result != (reconcile.Result{}) { - t.Errorf("unable to reconcile pgBackRest: %v", err) - } + result, err := r.reconcilePGBackRest(ctx, postgresCluster, instances, rootCA, true) + if err != nil || result != (reconcile.Result{}) { + t.Errorf("unable to reconcile pgBackRest: %v", err) + } - // repo is the first defined repo - repo := postgresCluster.Spec.Backups.PGBackRest.Repos[0] + // repo is the first defined repo + repo := postgresCluster.Spec.Backups.PGBackRest.Repos[0] - // test that the repo was created properly - t.Run("verify pgbackrest dedicated repo StatefulSet", func(t *testing.T) { + // test that the repo was created properly + t.Run("verify pgbackrest dedicated repo StatefulSet", func(t *testing.T) { - // get the pgBackRest repo sts using the labels we expect it to have - dedicatedRepos := &appsv1.StatefulSetList{} - if err := tClient.List(ctx, dedicatedRepos, client.InNamespace(ns.Name), - client.MatchingLabels{ - naming.LabelCluster: clusterName, - naming.LabelPGBackRest: "", - naming.LabelPGBackRestDedicated: "", - }); err != nil { - t.Fatal(err) - } + // get the pgBackRest repo sts using the labels we expect it to have + dedicatedRepos := &appsv1.StatefulSetList{} + if err := tClient.List(ctx, dedicatedRepos, client.InNamespace(ns.Name), + client.MatchingLabels{ + naming.LabelCluster: clusterName, + naming.LabelPGBackRest: "", + naming.LabelPGBackRestDedicated: "", + }); err != nil { + t.Fatal(err) + } - repo := appsv1.StatefulSet{} - // verify that we found a repo sts as expected - if len(dedicatedRepos.Items) == 0 { - t.Fatal("Did not find a dedicated repo sts") - } else if len(dedicatedRepos.Items) > 1 { - t.Fatal("Too many dedicated repo sts's found") - } else { - repo = dedicatedRepos.Items[0] - } + repo := appsv1.StatefulSet{} + // verify that we found a repo sts as expected + if len(dedicatedRepos.Items) == 0 { + t.Fatal("Did not find a dedicated repo sts") + } else if len(dedicatedRepos.Items) > 1 { + t.Fatal("Too many dedicated repo sts's found") + } else { + repo = dedicatedRepos.Items[0] + } - // verify proper number of replicas - if *repo.Spec.Replicas != 1 { - t.Errorf("%v replicas found for dedicated repo sts, expected %v", - repo.Spec.Replicas, 1) - } + // verify proper number of replicas + if *repo.Spec.Replicas != 1 { + t.Errorf("%v replicas found for dedicated repo sts, expected %v", + repo.Spec.Replicas, 1) + } - // verify proper ownership - var foundOwnershipRef bool - for _, r := range repo.GetOwnerReferences() { - if r.Kind == "PostgresCluster" && r.Name == clusterName && - r.UID == types.UID(clusterUID) { + // verify proper ownership + var foundOwnershipRef bool + for _, r := range repo.GetOwnerReferences() { + if r.Kind == "PostgresCluster" && r.Name == clusterName && + r.UID == types.UID(clusterUID) { - foundOwnershipRef = true - break + foundOwnershipRef = true + break + } } - } - if !foundOwnershipRef { - t.Errorf("did not find expected ownership references") - } + if !foundOwnershipRef { + t.Errorf("did not find expected ownership references") + } - // verify proper matching labels - expectedLabels := map[string]string{ - naming.LabelCluster: clusterName, - naming.LabelPGBackRest: "", - naming.LabelPGBackRestDedicated: "", - } - expectedLabelsSelector, err := metav1.LabelSelectorAsSelector( - metav1.SetAsLabelSelector(expectedLabels)) - if err != nil { - t.Error(err) - } - if !expectedLabelsSelector.Matches(labels.Set(repo.GetLabels())) { - t.Errorf("dedicated repo host is missing an expected label: found=%v, expected=%v", - repo.GetLabels(), expectedLabels) - } + // verify proper matching labels + expectedLabels := map[string]string{ + naming.LabelCluster: clusterName, + naming.LabelPGBackRest: "", + naming.LabelPGBackRestDedicated: "", + } + expectedLabelsSelector, err := metav1.LabelSelectorAsSelector( + metav1.SetAsLabelSelector(expectedLabels)) + if err != nil { + t.Error(err) + } + if !expectedLabelsSelector.Matches(labels.Set(repo.GetLabels())) { + t.Errorf("dedicated repo host is missing an expected label: found=%v, expected=%v", + repo.GetLabels(), expectedLabels) + } - template := repo.Spec.Template.DeepCopy() + template := repo.Spec.Template.DeepCopy() - // Containers and Volumes should be populated. - assert.Assert(t, len(template.Spec.Containers) != 0) - assert.Assert(t, len(template.Spec.InitContainers) != 0) - assert.Assert(t, len(template.Spec.Volumes) != 0) + // Containers and Volumes should be populated. + assert.Assert(t, len(template.Spec.Containers) != 0) + assert.Assert(t, len(template.Spec.InitContainers) != 0) + assert.Assert(t, len(template.Spec.Volumes) != 0) - // Ignore Containers and Volumes in the comparison below. - template.Spec.Containers = nil - template.Spec.InitContainers = nil - template.Spec.Volumes = nil + // Ignore Containers and Volumes in the comparison below. + template.Spec.Containers = nil + template.Spec.InitContainers = nil + template.Spec.Volumes = nil - // TODO(tjmoore4): Add additional tests to test appending existing - // topology spread constraints and spec.disableDefaultPodScheduling being - // set to true (as done in instance StatefulSet tests). - assert.Assert(t, marshalMatches(template.Spec, ` + // TODO(tjmoore4): Add additional tests to test appending existing + // topology spread constraints and spec.disableDefaultPodScheduling being + // set to true (as done in instance StatefulSet tests). + assert.Assert(t, cmp.MarshalMatches(template.Spec, ` affinity: {} automountServiceAccountToken: false containers: null @@ -384,230 +371,298 @@ topologySpreadConstraints: maxSkew: 1 topologyKey: topology.kubernetes.io/zone whenUnsatisfiable: ScheduleAnyway - `)) + `)) - // verify that the repohost container exists and contains the proper env vars - var repoHostContExists bool - for _, c := range repo.Spec.Template.Spec.Containers { - if c.Name == naming.PGBackRestRepoContainerName { - repoHostContExists = true + // verify that the repohost container exists and contains the proper env vars + var repoHostContExists bool + for _, c := range repo.Spec.Template.Spec.Containers { + if c.Name == naming.PGBackRestRepoContainerName { + repoHostContExists = true + } } - } - // now verify the proper env within the container - if !repoHostContExists { - t.Errorf("dedicated repo host is missing a container with name %s", - naming.PGBackRestRepoContainerName) - } - - repoHostStatus := postgresCluster.Status.PGBackRest.RepoHost - if repoHostStatus != nil { - if repoHostStatus.APIVersion != "apps/v1" || repoHostStatus.Kind != "StatefulSet" { - t.Errorf("invalid version/kind for dedicated repo host status") + // now verify the proper env within the container + if !repoHostContExists { + t.Errorf("dedicated repo host is missing a container with name %s", + naming.PGBackRestRepoContainerName) } - } else { - t.Errorf("dedicated repo host status is missing") - } - var foundConditionRepoHostsReady bool - for _, c := range postgresCluster.Status.Conditions { - if c.Type == "PGBackRestRepoHostReady" { - foundConditionRepoHostsReady = true - break + repoHostStatus := postgresCluster.Status.PGBackRest.RepoHost + if repoHostStatus != nil { + if repoHostStatus.APIVersion != "apps/v1" || repoHostStatus.Kind != "StatefulSet" { + t.Errorf("invalid version/kind for dedicated repo host status") + } + } else { + t.Errorf("dedicated repo host status is missing") } - } - if !foundConditionRepoHostsReady { - t.Errorf("status condition PGBackRestRepoHostsReady is missing") - } - events := &corev1.EventList{} - if err := wait.Poll(time.Second/2, Scale(time.Second*2), func() (bool, error) { - if err := tClient.List(ctx, events, &client.MatchingFields{ - "involvedObject.kind": "PostgresCluster", - "involvedObject.name": clusterName, - "involvedObject.namespace": ns.Name, - "involvedObject.uid": clusterUID, - "reason": "RepoHostCreated", - }); err != nil { - return false, err + var foundConditionRepoHostsReady bool + for _, c := range postgresCluster.Status.Conditions { + if c.Type == "PGBackRestRepoHostReady" { + foundConditionRepoHostsReady = true + break + } } - if len(events.Items) != 1 { - return false, nil + if !foundConditionRepoHostsReady { + t.Errorf("status condition PGBackRestRepoHostsReady is missing") } - return true, nil - }); err != nil { - t.Error(err) - } - }) - - t.Run("verify pgbackrest repo volumes", func(t *testing.T) { - // get the pgBackRest repo sts using the labels we expect it to have - repoVols := &corev1.PersistentVolumeClaimList{} - if err := tClient.List(ctx, repoVols, client.InNamespace(ns.Name), - client.MatchingLabels{ - naming.LabelCluster: clusterName, - naming.LabelPGBackRest: "", - naming.LabelPGBackRestRepoVolume: "", - }); err != nil { - t.Fatal(err) - } - assert.Assert(t, len(repoVols.Items) > 0) + assert.Check(t, wait.PollUntilContextTimeout(ctx, time.Second/2, Scale(time.Second*2), false, + func(ctx context.Context) (bool, error) { + events := &corev1.EventList{} + err := tClient.List(ctx, events, &client.MatchingFields{ + "involvedObject.kind": "PostgresCluster", + "involvedObject.name": clusterName, + "involvedObject.namespace": ns.Name, + "involvedObject.uid": clusterUID, + "reason": "RepoHostCreated", + }) + return len(events.Items) == 1, err + })) + }) - for _, r := range postgresCluster.Spec.Backups.PGBackRest.Repos { - if r.Volume == nil { - continue + t.Run("verify pgbackrest repo volumes", func(t *testing.T) { + + // get the pgBackRest repo sts using the labels we expect it to have + repoVols := &corev1.PersistentVolumeClaimList{} + if err := tClient.List(ctx, repoVols, client.InNamespace(ns.Name), + client.MatchingLabels{ + naming.LabelCluster: clusterName, + naming.LabelPGBackRest: "", + naming.LabelPGBackRestRepoVolume: "", + }); err != nil { + t.Fatal(err) } - var foundRepoVol bool - for _, v := range repoVols.Items { - if v.GetName() == - naming.PGBackRestRepoVolume(postgresCluster, r.Name).Name { - foundRepoVol = true - break + assert.Assert(t, len(repoVols.Items) > 0) + + for _, r := range postgresCluster.Spec.Backups.PGBackRest.Repos { + if r.Volume == nil { + continue } + var foundRepoVol bool + for _, v := range repoVols.Items { + if v.GetName() == + naming.PGBackRestRepoVolume(postgresCluster, r.Name).Name { + foundRepoVol = true + break + } + } + assert.Assert(t, foundRepoVol) } - assert.Assert(t, foundRepoVol) - } - }) + }) - t.Run("verify pgbackrest configuration", func(t *testing.T) { + t.Run("verify pgbackrest configuration", func(t *testing.T) { - config := &corev1.ConfigMap{} - if err := tClient.Get(ctx, types.NamespacedName{ - Name: naming.PGBackRestConfig(postgresCluster).Name, - Namespace: postgresCluster.GetNamespace(), - }, config); err != nil { - assert.NilError(t, err) - } - assert.Assert(t, len(config.Data) > 0) - - var instanceConfFound, dedicatedRepoConfFound bool - for k, v := range config.Data { - if v != "" { - if k == pgbackrest.CMInstanceKey { - instanceConfFound = true - } else if k == pgbackrest.CMRepoKey { - dedicatedRepoConfFound = true + config := &corev1.ConfigMap{} + if err := tClient.Get(ctx, types.NamespacedName{ + Name: naming.PGBackRestConfig(postgresCluster).Name, + Namespace: postgresCluster.GetNamespace(), + }, config); err != nil { + assert.NilError(t, err) + } + assert.Assert(t, len(config.Data) > 0) + + var instanceConfFound, dedicatedRepoConfFound bool + for k, v := range config.Data { + if v != "" { + if k == pgbackrest.CMInstanceKey { + instanceConfFound = true + } else if k == pgbackrest.CMRepoKey { + dedicatedRepoConfFound = true + } } } - } - assert.Check(t, instanceConfFound) - assert.Check(t, dedicatedRepoConfFound) - }) + assert.Check(t, instanceConfFound) + assert.Check(t, dedicatedRepoConfFound) + }) - t.Run("verify pgbackrest schedule cronjob", func(t *testing.T) { + t.Run("verify pgbackrest schedule cronjob", func(t *testing.T) { - // set status - postgresCluster.Status = v1beta1.PostgresClusterStatus{ - Patroni: v1beta1.PatroniStatus{SystemIdentifier: "12345abcde"}, - PGBackRest: &v1beta1.PGBackRestStatus{ - Repos: []v1beta1.RepoStatus{{Name: "repo1", StanzaCreated: true}}}, - } + // set status + postgresCluster.Status = v1beta1.PostgresClusterStatus{ + Patroni: v1beta1.PatroniStatus{SystemIdentifier: "12345abcde"}, + PGBackRest: &v1beta1.PGBackRestStatus{ + Repos: []v1beta1.RepoStatus{{Name: "repo1", StanzaCreated: true}}}, + } - // set conditions - clusterConditions := map[string]metav1.ConditionStatus{ - ConditionRepoHostReady: metav1.ConditionTrue, - ConditionReplicaCreate: metav1.ConditionTrue, - } + // set conditions + clusterConditions := map[string]metav1.ConditionStatus{ + ConditionRepoHostReady: metav1.ConditionTrue, + ConditionReplicaCreate: metav1.ConditionTrue, + } - for condition, status := range clusterConditions { - meta.SetStatusCondition(&postgresCluster.Status.Conditions, metav1.Condition{ - Type: condition, Reason: "testing", Status: status}) - } + for condition, status := range clusterConditions { + meta.SetStatusCondition(&postgresCluster.Status.Conditions, metav1.Condition{ + Type: condition, Reason: "testing", Status: status}) + } - requeue := r.reconcileScheduledBackups(ctx, postgresCluster, serviceAccount, fakeObservedCronJobs()) - assert.Assert(t, !requeue) + requeue := r.reconcileScheduledBackups(ctx, postgresCluster, serviceAccount, fakeObservedCronJobs()) + assert.Assert(t, !requeue) - returnedCronJob := &batchv1.CronJob{} - if err := tClient.Get(ctx, types.NamespacedName{ - Name: postgresCluster.Name + "-repo1-full", - Namespace: postgresCluster.GetNamespace(), - }, returnedCronJob); err != nil { - assert.NilError(t, err) - } + returnedCronJob := &batchv1.CronJob{} + if err := tClient.Get(ctx, types.NamespacedName{ + Name: postgresCluster.Name + "-repo1-full", + Namespace: postgresCluster.GetNamespace(), + }, returnedCronJob); err != nil { + assert.NilError(t, err) + } - // check returned cronjob matches set spec - assert.Equal(t, returnedCronJob.Name, "hippocluster-repo1-full") - assert.Equal(t, returnedCronJob.Spec.Schedule, testCronSchedule) - assert.Equal(t, returnedCronJob.Spec.ConcurrencyPolicy, batchv1.ForbidConcurrent) - assert.Equal(t, returnedCronJob.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Name, - "pgbackrest") - assert.Assert(t, returnedCronJob.Spec.JobTemplate.Spec.Template.Spec.Containers[0].SecurityContext != &corev1.SecurityContext{}) + // check returned cronjob matches set spec + assert.Equal(t, returnedCronJob.Name, "hippocluster-repo1-full") + assert.Equal(t, returnedCronJob.Spec.Schedule, testCronSchedule) + assert.Equal(t, returnedCronJob.Spec.ConcurrencyPolicy, batchv1.ForbidConcurrent) + assert.Equal(t, returnedCronJob.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Name, + "pgbackrest") + assert.Assert(t, returnedCronJob.Spec.JobTemplate.Spec.Template.Spec.Containers[0].SecurityContext != &corev1.SecurityContext{}) - }) + }) - t.Run("verify pgbackrest schedule found", func(t *testing.T) { + t.Run("verify pgbackrest schedule found", func(t *testing.T) { - assert.Assert(t, backupScheduleFound(repo, "full")) + assert.Assert(t, backupScheduleFound(repo, "full")) - testrepo := v1beta1.PGBackRestRepo{ - Name: "repo1", - BackupSchedules: &v1beta1.PGBackRestBackupSchedules{ - Full: &testCronSchedule, - Differential: &testCronSchedule, - Incremental: &testCronSchedule, - }} + testrepo := v1beta1.PGBackRestRepo{ + Name: "repo1", + BackupSchedules: &v1beta1.PGBackRestBackupSchedules{ + Full: &testCronSchedule, + Differential: &testCronSchedule, + Incremental: &testCronSchedule, + }} - assert.Assert(t, backupScheduleFound(testrepo, "full")) - assert.Assert(t, backupScheduleFound(testrepo, "diff")) - assert.Assert(t, backupScheduleFound(testrepo, "incr")) + assert.Assert(t, backupScheduleFound(testrepo, "full")) + assert.Assert(t, backupScheduleFound(testrepo, "diff")) + assert.Assert(t, backupScheduleFound(testrepo, "incr")) - }) + }) + + t.Run("verify pgbackrest schedule not found", func(t *testing.T) { + + assert.Assert(t, !backupScheduleFound(repo, "notabackuptype")) + + noscheduletestrepo := v1beta1.PGBackRestRepo{Name: "repo1"} + assert.Assert(t, !backupScheduleFound(noscheduletestrepo, "full")) + + }) - t.Run("verify pgbackrest schedule not found", func(t *testing.T) { + t.Run("pgbackrest schedule suspended status", func(t *testing.T) { - assert.Assert(t, !backupScheduleFound(repo, "notabackuptype")) + returnedCronJob := &batchv1.CronJob{} + if err := tClient.Get(ctx, types.NamespacedName{ + Name: postgresCluster.Name + "-repo1-full", + Namespace: postgresCluster.GetNamespace(), + }, returnedCronJob); err != nil { + assert.NilError(t, err) + } + + t.Run("pgbackrest schedule suspended false", func(t *testing.T) { + assert.Assert(t, !*returnedCronJob.Spec.Suspend) + }) - noscheduletestrepo := v1beta1.PGBackRestRepo{Name: "repo1"} - assert.Assert(t, !backupScheduleFound(noscheduletestrepo, "full")) + t.Run("shutdown", func(t *testing.T) { + *postgresCluster.Spec.Shutdown = true + postgresCluster.Spec.Standby = nil + requeue := r.reconcileScheduledBackups(ctx, + postgresCluster, serviceAccount, fakeObservedCronJobs()) + assert.Assert(t, !requeue) + + assert.NilError(t, tClient.Get(ctx, types.NamespacedName{ + Name: postgresCluster.Name + "-repo1-full", + Namespace: postgresCluster.GetNamespace(), + }, returnedCronJob)) + + assert.Assert(t, *returnedCronJob.Spec.Suspend) + }) + + t.Run("standby", func(t *testing.T) { + *postgresCluster.Spec.Shutdown = false + postgresCluster.Spec.Standby = &v1beta1.PostgresStandbySpec{ + Enabled: true, + } + + requeue := r.reconcileScheduledBackups(ctx, + postgresCluster, serviceAccount, fakeObservedCronJobs()) + assert.Assert(t, !requeue) + + assert.NilError(t, tClient.Get(ctx, types.NamespacedName{ + Name: postgresCluster.Name + "-repo1-full", + Namespace: postgresCluster.GetNamespace(), + }, returnedCronJob)) + + assert.Assert(t, *returnedCronJob.Spec.Suspend) + }) + }) }) - t.Run("pgbackrest schedule suspended status", func(t *testing.T) { + t.Run("run reconcile with backups not defined", func(t *testing.T) { + clusterName := "hippocluster2" + clusterUID := "hippouid2" + + ns := setupNamespace(t, tClient) + // create a PostgresCluster without backups to test with + postgresCluster := fakePostgresCluster(clusterName, ns.GetName(), clusterUID, true) + postgresCluster.Spec.Backups = v1beta1.Backups{} - returnedCronJob := &batchv1.CronJob{} - if err := tClient.Get(ctx, types.NamespacedName{ - Name: postgresCluster.Name + "-repo1-full", - Namespace: postgresCluster.GetNamespace(), - }, returnedCronJob); err != nil { - assert.NilError(t, err) + // create the 'observed' instances and set the leader + instances := &observedInstances{ + forCluster: []*Instance{{Name: "instance1", + Pods: []*corev1.Pod{{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{naming.LabelRole: naming.RolePatroniLeader}, + }, + Spec: corev1.PodSpec{}, + }}, + }, {Name: "instance2"}, {Name: "instance3"}}, } - t.Run("pgbackrest schedule suspended false", func(t *testing.T) { - assert.Assert(t, !*returnedCronJob.Spec.Suspend) - }) + rootCA, err := pki.NewRootCertificateAuthority() + assert.NilError(t, err) - t.Run("shutdown", func(t *testing.T) { - *postgresCluster.Spec.Shutdown = true - postgresCluster.Spec.Standby = nil + result, err := r.reconcilePGBackRest(ctx, postgresCluster, instances, rootCA, false) + if err != nil { + t.Errorf("unable to reconcile pgBackRest: %v", err) + } + assert.Equal(t, result, reconcile.Result{}) - requeue := r.reconcileScheduledBackups(ctx, - postgresCluster, serviceAccount, fakeObservedCronJobs()) - assert.Assert(t, !requeue) + t.Run("verify pgbackrest dedicated repo StatefulSet", func(t *testing.T) { - assert.NilError(t, tClient.Get(ctx, types.NamespacedName{ - Name: postgresCluster.Name + "-repo1-full", - Namespace: postgresCluster.GetNamespace(), - }, returnedCronJob)) + // Verify the sts doesn't exist + dedicatedRepos := &appsv1.StatefulSetList{} + if err := tClient.List(ctx, dedicatedRepos, client.InNamespace(ns.Name), + client.MatchingLabels{ + naming.LabelCluster: clusterName, + naming.LabelPGBackRest: "", + naming.LabelPGBackRestDedicated: "", + }); err != nil { + t.Fatal(err) + } - assert.Assert(t, *returnedCronJob.Spec.Suspend) + assert.Equal(t, len(dedicatedRepos.Items), 0) }) - t.Run("standby", func(t *testing.T) { - *postgresCluster.Spec.Shutdown = false - postgresCluster.Spec.Standby = &v1beta1.PostgresStandbySpec{ - Enabled: true, + t.Run("verify pgbackrest repo volumes", func(t *testing.T) { + + // get the pgBackRest repo sts using the labels we expect it to have + repoVols := &corev1.PersistentVolumeClaimList{} + if err := tClient.List(ctx, repoVols, client.InNamespace(ns.Name), + client.MatchingLabels{ + naming.LabelCluster: clusterName, + naming.LabelPGBackRest: "", + naming.LabelPGBackRestRepoVolume: "", + }); err != nil { + t.Fatal(err) } - requeue := r.reconcileScheduledBackups(ctx, - postgresCluster, serviceAccount, fakeObservedCronJobs()) - assert.Assert(t, !requeue) + assert.Equal(t, len(repoVols.Items), 0) + }) - assert.NilError(t, tClient.Get(ctx, types.NamespacedName{ - Name: postgresCluster.Name + "-repo1-full", - Namespace: postgresCluster.GetNamespace(), - }, returnedCronJob)) + t.Run("verify pgbackrest configuration", func(t *testing.T) { - assert.Assert(t, *returnedCronJob.Spec.Suspend) + config := &corev1.ConfigMap{} + err := tClient.Get(ctx, types.NamespacedName{ + Name: naming.PGBackRestConfig(postgresCluster).Name, + Namespace: postgresCluster.GetNamespace(), + }, config) + assert.Equal(t, apierrors.IsNotFound(err), true) }) }) } @@ -673,11 +728,11 @@ func TestReconcilePGBackRestRBAC(t *testing.T) { } func TestReconcileStanzaCreate(t *testing.T) { - tEnv, tClient := setupKubernetes(t) + cfg, tClient := setupKubernetes(t) require.ParallelCapacity(t, 0) r := &Reconciler{} - ctx, cancel := setupManager(t, tEnv.Config, func(mgr manager.Manager) { + ctx, cancel := setupManager(t, cfg, func(mgr manager.Manager) { r = &Reconciler{ Client: mgr.GetClient(), Recorder: mgr.GetEventRecorderFor(ControllerName), @@ -709,13 +764,13 @@ func TestReconcileStanzaCreate(t *testing.T) { }, }}) - stanzaCreateFail := func(namespace, pod, container string, stdin io.Reader, stdout, - stderr io.Writer, command ...string) error { + stanzaCreateFail := func(ctx context.Context, namespace, pod, container string, stdin io.Reader, + stdout, stderr io.Writer, command ...string) error { return errors.New("fake stanza create failed") } - stanzaCreateSuccess := func(namespace, pod, container string, stdin io.Reader, stdout, - stderr io.Writer, command ...string) error { + stanzaCreateSuccess := func(ctx context.Context, namespace, pod, container string, stdin io.Reader, + stdout, stderr io.Writer, command ...string) error { return nil } @@ -729,27 +784,22 @@ func TestReconcileStanzaCreate(t *testing.T) { Message: "pgBackRest dedicated repository host is ready", }) - configHashMistmatch, err := r.reconcileStanzaCreate(ctx, postgresCluster, instances, "abcde12345") - assert.NilError(t, err) - assert.Assert(t, !configHashMistmatch) - - events := &corev1.EventList{} - err = wait.Poll(time.Second/2, Scale(time.Second*2), func() (bool, error) { - if err := tClient.List(ctx, events, &client.MatchingFields{ - "involvedObject.kind": "PostgresCluster", - "involvedObject.name": clusterName, - "involvedObject.namespace": ns.Name, - "involvedObject.uid": clusterUID, - "reason": "StanzasCreated", - }); err != nil { - return false, err - } - if len(events.Items) != 1 { - return false, nil - } - return true, nil - }) + configHashMismatch, err := r.reconcileStanzaCreate(ctx, postgresCluster, instances, "abcde12345") assert.NilError(t, err) + assert.Assert(t, !configHashMismatch) + + assert.NilError(t, wait.PollUntilContextTimeout(ctx, time.Second/2, Scale(time.Second*2), false, + func(ctx context.Context) (bool, error) { + events := &corev1.EventList{} + err := tClient.List(ctx, events, &client.MatchingFields{ + "involvedObject.kind": "PostgresCluster", + "involvedObject.name": clusterName, + "involvedObject.namespace": ns.Name, + "involvedObject.uid": clusterUID, + "reason": "StanzasCreated", + }) + return len(events.Items) == 1, err + })) // status should indicate stanzas were created for _, r := range postgresCluster.Status.PGBackRest.Repos { @@ -773,27 +823,22 @@ func TestReconcileStanzaCreate(t *testing.T) { SystemIdentifier: "6952526174828511264", } - configHashMismatch, err := r.reconcileStanzaCreate(ctx, postgresCluster, instances, "abcde12345") + configHashMismatch, err = r.reconcileStanzaCreate(ctx, postgresCluster, instances, "abcde12345") assert.Error(t, err, "fake stanza create failed: ") assert.Assert(t, !configHashMismatch) - events = &corev1.EventList{} - err = wait.Poll(time.Second/2, Scale(time.Second*2), func() (bool, error) { - if err := tClient.List(ctx, events, &client.MatchingFields{ - "involvedObject.kind": "PostgresCluster", - "involvedObject.name": clusterName, - "involvedObject.namespace": ns.Name, - "involvedObject.uid": clusterUID, - "reason": "UnableToCreateStanzas", - }); err != nil { - return false, err - } - if len(events.Items) != 1 { - return false, nil - } - return true, nil - }) - assert.NilError(t, err) + assert.NilError(t, wait.PollUntilContextTimeout(ctx, time.Second/2, Scale(time.Second*2), false, + func(ctx context.Context) (bool, error) { + events := &corev1.EventList{} + err := tClient.List(ctx, events, &client.MatchingFields{ + "involvedObject.kind": "PostgresCluster", + "involvedObject.name": clusterName, + "involvedObject.namespace": ns.Name, + "involvedObject.uid": clusterUID, + "reason": "UnableToCreateStanzas", + }) + return len(events.Items) == 1, err + })) // status should indicate stanza were not created for _, r := range postgresCluster.Status.PGBackRest.Repos { @@ -801,52 +846,6 @@ func TestReconcileStanzaCreate(t *testing.T) { } } -func TestGetPGBackRestExecSelector(t *testing.T) { - - testCases := []struct { - cluster *v1beta1.PostgresCluster - repo v1beta1.PGBackRestRepo - desc string - expectedSelector string - expectedContainer string - }{{ - desc: "volume repo defined dedicated repo host enabled", - cluster: &v1beta1.PostgresCluster{ - ObjectMeta: metav1.ObjectMeta{Name: "hippo"}, - }, - repo: v1beta1.PGBackRestRepo{ - Name: "repo1", - Volume: &v1beta1.RepoPVC{}, - }, - expectedSelector: "postgres-operator.crunchydata.com/cluster=hippo," + - "postgres-operator.crunchydata.com/pgbackrest=," + - "postgres-operator.crunchydata.com/pgbackrest-dedicated=", - expectedContainer: "pgbackrest", - }, { - desc: "cloud repo defined no repo host enabled", - cluster: &v1beta1.PostgresCluster{ - ObjectMeta: metav1.ObjectMeta{Name: "hippo"}, - }, - repo: v1beta1.PGBackRestRepo{ - Name: "repo1", - S3: &v1beta1.RepoS3{}, - }, - expectedSelector: "postgres-operator.crunchydata.com/cluster=hippo," + - "postgres-operator.crunchydata.com/instance," + - "postgres-operator.crunchydata.com/role=master", - expectedContainer: "database", - }} - - for _, tc := range testCases { - t.Run(tc.desc, func(t *testing.T) { - selector, container, err := getPGBackRestExecSelector(tc.cluster, tc.repo) - assert.NilError(t, err) - assert.Assert(t, selector.String() == tc.expectedSelector) - assert.Assert(t, container == tc.expectedContainer) - }) - } -} - func TestReconcileReplicaCreateBackup(t *testing.T) { // Garbage collector cleans up test resources before the test completes if strings.EqualFold(os.Getenv("USE_EXISTING_CLUSTER"), "true") { @@ -931,17 +930,13 @@ func TestReconcileReplicaCreateBackup(t *testing.T) { } assert.Assert(t, foundOwnershipRef) - var foundConfigAnnotation, foundHashAnnotation bool + var foundHashAnnotation bool // verify annotations for k, v := range backupJob.GetAnnotations() { - if k == naming.PGBackRestCurrentConfig && v == naming.PGBackRestRepoContainerName { - foundConfigAnnotation = true - } if k == naming.PGBackRestConfigHash && v == configHash { foundHashAnnotation = true } } - assert.Assert(t, foundConfigAnnotation) assert.Assert(t, foundHashAnnotation) // verify container & env vars @@ -1012,11 +1007,11 @@ func TestReconcileReplicaCreateBackup(t *testing.T) { } func TestReconcileManualBackup(t *testing.T) { - tEnv, tClient := setupKubernetes(t) + cfg, tClient := setupKubernetes(t) require.ParallelCapacity(t, 2) r := &Reconciler{} - _, cancel := setupManager(t, tEnv.Config, func(mgr manager.Manager) { + _, cancel := setupManager(t, cfg, func(mgr manager.Manager) { r = &Reconciler{ Client: mgr.GetClient(), Recorder: mgr.GetEventRecorderFor(ControllerName), @@ -1427,23 +1422,18 @@ func TestReconcileManualBackup(t *testing.T) { // if an event is expected, the check for it if tc.expectedEventReason != "" { - events := &corev1.EventList{} - err = wait.Poll(time.Second/2, Scale(time.Second*2), func() (bool, error) { - if err := tClient.List(ctx, events, &client.MatchingFields{ - "involvedObject.kind": "PostgresCluster", - "involvedObject.name": clusterName, - "involvedObject.namespace": ns.GetName(), - "involvedObject.uid": string(postgresCluster.GetUID()), - "reason": tc.expectedEventReason, - }); err != nil { - return false, err - } - if len(events.Items) != 1 { - return false, nil - } - return true, nil - }) - assert.NilError(t, err) + assert.NilError(t, wait.PollUntilContextTimeout(ctx, time.Second/2, Scale(time.Second*2), false, + func(ctx context.Context) (bool, error) { + events := &corev1.EventList{} + err := tClient.List(ctx, events, &client.MatchingFields{ + "involvedObject.kind": "PostgresCluster", + "involvedObject.name": clusterName, + "involvedObject.namespace": ns.GetName(), + "involvedObject.uid": string(postgresCluster.GetUID()), + "reason": tc.expectedEventReason, + }) + return len(events.Items) == 1, err + })) } return } @@ -1562,7 +1552,7 @@ func TestGetPGBackRestResources(t *testing.T) { }, Spec: corev1.PersistentVolumeClaimSpec{ AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteMany}, - Resources: corev1.ResourceRequirements{ + Resources: corev1.VolumeResourceRequirements{ Requests: corev1.ResourceList{ corev1.ResourceStorage: resource.MustParse("1Gi"), }, @@ -1601,7 +1591,7 @@ func TestGetPGBackRestResources(t *testing.T) { }, Spec: corev1.PersistentVolumeClaimSpec{ AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteMany}, - Resources: corev1.ResourceRequirements{ + Resources: corev1.VolumeResourceRequirements{ Requests: corev1.ResourceList{ corev1.ResourceStorage: resource.MustParse("1Gi"), }, @@ -1668,11 +1658,11 @@ func TestGetPGBackRestResources(t *testing.T) { jobCount: 0, pvcCount: 0, hostCount: 1, }, }, { - desc: "no dedicated repo host defined delete dedicated sts", + desc: "no dedicated repo host defined, dedicated sts not deleted", createResources: []client.Object{ &appsv1.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ - Name: "delete-dedicated", + Name: "keep-dedicated-two", Namespace: namespace, Labels: naming.PGBackRestDedicatedLabels(clusterName), }, @@ -1701,43 +1691,8 @@ func TestGetPGBackRestResources(t *testing.T) { }, }, result: testResult{ - jobCount: 0, pvcCount: 0, hostCount: 0, - }, - }, { - desc: "no repo host defined delete dedicated sts", - createResources: []client.Object{ - &appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: "delete-dedicated-no-repo-host", - Namespace: namespace, - Labels: naming.PGBackRestDedicatedLabels(clusterName), - }, - Spec: appsv1.StatefulSetSpec{ - Selector: metav1.SetAsLabelSelector( - naming.PGBackRestDedicatedLabels(clusterName)), - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: naming.PGBackRestDedicatedLabels(clusterName), - }, - Spec: corev1.PodSpec{}, - }, - }, - }, - }, - cluster: &v1beta1.PostgresCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: clusterName, - Namespace: namespace, - UID: types.UID(clusterUID), - }, - Spec: v1beta1.PostgresClusterSpec{ - Backups: v1beta1.Backups{ - PGBackRest: v1beta1.PGBackRestArchive{}, - }, - }, - }, - result: testResult{ - jobCount: 0, pvcCount: 0, hostCount: 0, + // Host count is 2 due to previous repo host sts not being deleted. + jobCount: 0, pvcCount: 0, hostCount: 2, }, }} @@ -1750,7 +1705,7 @@ func TestGetPGBackRestResources(t *testing.T) { assert.NilError(t, err) assert.NilError(t, tClient.Create(ctx, resource)) - resources, err := r.getPGBackRestResources(ctx, tc.cluster) + resources, err := r.getPGBackRestResources(ctx, tc.cluster, true) assert.NilError(t, err) assert.Assert(t, tc.result.jobCount == len(resources.replicaCreateBackupJobs)) @@ -1762,11 +1717,11 @@ func TestGetPGBackRestResources(t *testing.T) { } func TestReconcilePostgresClusterDataSource(t *testing.T) { - tEnv, tClient := setupKubernetes(t) + cfg, tClient := setupKubernetes(t) require.ParallelCapacity(t, 4) r := &Reconciler{} - ctx, cancel := setupManager(t, tEnv.Config, func(mgr manager.Manager) { + ctx, cancel := setupManager(t, cfg, func(mgr manager.Manager) { r = &Reconciler{ Client: tClient, Recorder: mgr.GetEventRecorderFor(ControllerName), @@ -1987,7 +1942,7 @@ func TestReconcilePostgresClusterDataSource(t *testing.T) { pgclusterDataSource = tc.dataSource.PostgresCluster } err := r.reconcilePostgresClusterDataSource(ctx, cluster, pgclusterDataSource, - "testhash", nil, rootCA) + "testhash", nil, rootCA, true) assert.NilError(t, err) restoreConfig := &corev1.ConfigMap{} @@ -2038,23 +1993,17 @@ func TestReconcilePostgresClusterDataSource(t *testing.T) { if tc.result.invalidSourceCluster || tc.result.invalidSourceRepo || tc.result.invalidOptions { - events := &corev1.EventList{} - if err := wait.Poll(time.Second/2, Scale(time.Second*2), func() (bool, error) { - if err := tClient.List(ctx, events, &client.MatchingFields{ - "involvedObject.kind": "PostgresCluster", - "involvedObject.name": clusterName, - "involvedObject.namespace": namespace, - "reason": "InvalidDataSource", - }); err != nil { - return false, err - } - if len(events.Items) != 1 { - return false, nil - } - return true, nil - }); err != nil { - t.Error(err) - } + assert.Check(t, wait.PollUntilContextTimeout(ctx, time.Second/2, Scale(time.Second*2), false, + func(ctx context.Context) (bool, error) { + events := &corev1.EventList{} + err := tClient.List(ctx, events, &client.MatchingFields{ + "involvedObject.kind": "PostgresCluster", + "involvedObject.name": clusterName, + "involvedObject.namespace": namespace, + "reason": "InvalidDataSource", + }) + return len(events.Items) == 1, err + })) } }) } @@ -2062,11 +2011,11 @@ func TestReconcilePostgresClusterDataSource(t *testing.T) { } func TestReconcileCloudBasedDataSource(t *testing.T) { - tEnv, tClient := setupKubernetes(t) + cfg, tClient := setupKubernetes(t) require.ParallelCapacity(t, 4) r := &Reconciler{} - ctx, cancel := setupManager(t, tEnv.Config, func(mgr manager.Manager) { + ctx, cancel := setupManager(t, cfg, func(mgr manager.Manager) { r = &Reconciler{ Client: tClient, Recorder: mgr.GetEventRecorderFor(ControllerName), @@ -2102,7 +2051,7 @@ func TestReconcileCloudBasedDataSource(t *testing.T) { result: testResult{ configCount: 1, jobCount: 1, pvcCount: 1, expectedClusterCondition: nil, - conf: "|\n # Generated by postgres-operator. DO NOT EDIT.\n # Your changes will not be saved.\n\n [global]\n log-path = /pgdata/pgbackrest/log\n repo1-path = /pgbackrest/repo1\n\n [db]\n pg1-path = /pgdata/pg13\n pg1-port = 5432\n pg1-socket-path = /tmp/postgres\n", + conf: "|\n # Generated by postgres-operator. DO NOT EDIT.\n # Your changes will not be saved.\n\n [global]\n archive-async = y\n log-path = /pgdata/pgbackrest/log\n repo1-path = /pgbackrest/repo1\n spool-path = /pgdata/pgbackrest-spool\n\n [db]\n pg1-path = /pgdata/pg13\n pg1-port = 5432\n pg1-socket-path = /tmp/postgres\n", }, }, { desc: "global/configuration set", @@ -2119,7 +2068,7 @@ func TestReconcileCloudBasedDataSource(t *testing.T) { result: testResult{ configCount: 1, jobCount: 1, pvcCount: 1, expectedClusterCondition: nil, - conf: "|\n # Generated by postgres-operator. DO NOT EDIT.\n # Your changes will not be saved.\n\n [global]\n log-path = /pgdata/pgbackrest/log\n repo1-path = elephant\n\n [db]\n pg1-path = /pgdata/pg13\n pg1-port = 5432\n pg1-socket-path = /tmp/postgres\n", + conf: "|\n # Generated by postgres-operator. DO NOT EDIT.\n # Your changes will not be saved.\n\n [global]\n archive-async = y\n log-path = /pgdata/pgbackrest/log\n repo1-path = elephant\n spool-path = /pgdata/pgbackrest-spool\n\n [db]\n pg1-path = /pgdata/pg13\n pg1-port = 5432\n pg1-socket-path = /tmp/postgres\n", }, }, { desc: "invalid option: stanza", @@ -2134,7 +2083,7 @@ func TestReconcileCloudBasedDataSource(t *testing.T) { result: testResult{ configCount: 1, jobCount: 0, pvcCount: 1, expectedClusterCondition: nil, - conf: "|\n # Generated by postgres-operator. DO NOT EDIT.\n # Your changes will not be saved.\n\n [global]\n log-path = /pgdata/pgbackrest/log\n repo1-path = /pgbackrest/repo1\n\n [db]\n pg1-path = /pgdata/pg13\n pg1-port = 5432\n pg1-socket-path = /tmp/postgres\n", + conf: "|\n # Generated by postgres-operator. DO NOT EDIT.\n # Your changes will not be saved.\n\n [global]\n archive-async = y\n log-path = /pgdata/pgbackrest/log\n repo1-path = /pgbackrest/repo1\n spool-path = /pgdata/pgbackrest-spool\n\n [db]\n pg1-path = /pgdata/pg13\n pg1-port = 5432\n pg1-socket-path = /tmp/postgres\n", }, }, { desc: "cluster bootstrapped init condition missing", @@ -2153,7 +2102,7 @@ func TestReconcileCloudBasedDataSource(t *testing.T) { Reason: "ClusterAlreadyBootstrapped", Message: "The cluster is already bootstrapped", }, - conf: "|\n # Generated by postgres-operator. DO NOT EDIT.\n # Your changes will not be saved.\n\n [global]\n log-path = /pgdata/pgbackrest/log\n repo1-path = /pgbackrest/repo1\n\n [db]\n pg1-path = /pgdata/pg13\n pg1-port = 5432\n pg1-socket-path = /tmp/postgres\n", + conf: "|\n # Generated by postgres-operator. DO NOT EDIT.\n # Your changes will not be saved.\n\n [global]\n archive-async = y\n log-path = /pgdata/pgbackrest/log\n repo1-path = /pgbackrest/repo1\n spool-path = /pgdata/pgbackrest-spool\n\n [db]\n pg1-path = /pgdata/pg13\n pg1-port = 5432\n pg1-socket-path = /tmp/postgres\n", }, }} @@ -2198,7 +2147,7 @@ func TestReconcileCloudBasedDataSource(t *testing.T) { assert.Assert(t, apierrors.IsNotFound(err), "expected NotFound, got %#v", err) } else { assert.NilError(t, err) - assert.Assert(t, marshalMatches(restoreConfig.Data["pgbackrest_instance.conf"], tc.result.conf)) + assert.Assert(t, cmp.MarshalMatches(restoreConfig.Data["pgbackrest_instance.conf"], tc.result.conf)) } restoreJobs := &batchv1.JobList{} @@ -2284,7 +2233,7 @@ func TestCopyConfigurationResources(t *testing.T) { Name: "instance1", DataVolumeClaimSpec: corev1.PersistentVolumeClaimSpec{ AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteMany}, - Resources: corev1.ResourceRequirements{ + Resources: corev1.VolumeResourceRequirements{ Requests: corev1.ResourceList{ corev1.ResourceStorage: resource.MustParse("1Gi"), }, @@ -2336,7 +2285,7 @@ func TestCopyConfigurationResources(t *testing.T) { Name: "instance1", DataVolumeClaimSpec: corev1.PersistentVolumeClaimSpec{ AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteMany}, - Resources: corev1.ResourceRequirements{ + Resources: corev1.VolumeResourceRequirements{ Requests: corev1.ResourceList{ corev1.ResourceStorage: resource.MustParse("1Gi"), }, @@ -2489,14 +2438,14 @@ func TestCopyConfigurationResources(t *testing.T) { } func TestGenerateBackupJobIntent(t *testing.T) { + ctx := context.Background() t.Run("empty", func(t *testing.T) { - spec, err := generateBackupJobSpecIntent( + spec := generateBackupJobSpecIntent(ctx, &v1beta1.PostgresCluster{}, v1beta1.PGBackRestRepo{}, "", nil, nil, ) - assert.NilError(t, err) - assert.Assert(t, marshalMatches(spec.Template.Spec, ` + assert.Assert(t, cmp.MarshalMatches(spec.Template.Spec, ` containers: - command: - /opt/crunchy/bin/pgbackrest @@ -2508,10 +2457,10 @@ containers: - name: COMPARE_HASH value: "true" - name: CONTAINER - value: database + value: pgbackrest - name: NAMESPACE - name: SELECTOR - value: postgres-operator.crunchydata.com/cluster=,postgres-operator.crunchydata.com/instance,postgres-operator.crunchydata.com/role=master + value: postgres-operator.crunchydata.com/cluster=,postgres-operator.crunchydata.com/pgbackrest=,postgres-operator.crunchydata.com/pgbackrest-dedicated= name: pgbackrest resources: {} securityContext: @@ -2522,6 +2471,8 @@ containers: privileged: false readOnlyRootFilesystem: true runAsNonRoot: true + seccompProfile: + type: RuntimeDefault volumeMounts: - mountPath: /etc/pgbackrest/conf.d name: pgbackrest-config @@ -2536,11 +2487,23 @@ volumes: sources: - configMap: items: - - key: pgbackrest_instance.conf - path: pgbackrest_instance.conf + - key: pgbackrest_repo.conf + path: pgbackrest_repo.conf - key: config-hash path: config-hash + - key: pgbackrest-server.conf + path: ~postgres-operator_server.conf name: -pgbackrest-config + - secret: + items: + - key: pgbackrest.ca-roots + path: ~postgres-operator/tls-ca.crt + - key: pgbackrest-client.crt + path: ~postgres-operator/client-tls.crt + - key: pgbackrest-client.key + mode: 384 + path: ~postgres-operator/client-tls.key + name: -pgbackrest `)) }) @@ -2550,12 +2513,11 @@ volumes: ImagePullPolicy: corev1.PullAlways, }, } - job, err := generateBackupJobSpecIntent( + job := generateBackupJobSpecIntent(ctx, cluster, v1beta1.PGBackRestRepo{}, "", nil, nil, ) - assert.NilError(t, err) assert.Equal(t, job.Template.Spec.Containers[0].ImagePullPolicy, corev1.PullAlways) }) @@ -2566,12 +2528,11 @@ volumes: cluster.Spec.Backups = v1beta1.Backups{ PGBackRest: v1beta1.PGBackRestArchive{}, } - job, err := generateBackupJobSpecIntent( + job := generateBackupJobSpecIntent(ctx, cluster, v1beta1.PGBackRestRepo{}, "", nil, nil, ) - assert.NilError(t, err) assert.DeepEqual(t, job.Template.Spec.Containers[0].Resources, corev1.ResourceRequirements{}) }) @@ -2584,12 +2545,11 @@ volumes: }, }, } - job, err := generateBackupJobSpecIntent( + job := generateBackupJobSpecIntent(ctx, cluster, v1beta1.PGBackRestRepo{}, "", nil, nil, ) - assert.NilError(t, err) assert.DeepEqual(t, job.Template.Spec.Containers[0].Resources, corev1.ResourceRequirements{ Requests: corev1.ResourceList{ @@ -2624,12 +2584,11 @@ volumes: }, }, } - job, err := generateBackupJobSpecIntent( + job := generateBackupJobSpecIntent(ctx, cluster, v1beta1.PGBackRestRepo{}, "", nil, nil, ) - assert.NilError(t, err) assert.Equal(t, job.Template.Spec.Affinity, affinity) }) @@ -2638,12 +2597,11 @@ volumes: cluster.Spec.Backups.PGBackRest.Jobs = &v1beta1.BackupJobs{ PriorityClassName: initialize.String("some-priority-class"), } - job, err := generateBackupJobSpecIntent( + job := generateBackupJobSpecIntent(ctx, cluster, v1beta1.PGBackRestRepo{}, "", nil, nil, ) - assert.NilError(t, err) assert.Equal(t, job.Template.Spec.PriorityClassName, "some-priority-class") }) @@ -2657,12 +2615,11 @@ volumes: cluster.Spec.Backups.PGBackRest.Jobs = &v1beta1.BackupJobs{ Tolerations: tolerations, } - job, err := generateBackupJobSpecIntent( + job := generateBackupJobSpecIntent(ctx, cluster, v1beta1.PGBackRestRepo{}, "", nil, nil, ) - assert.NilError(t, err) assert.DeepEqual(t, job.Template.Spec.Tolerations, tolerations) }) @@ -2672,18 +2629,16 @@ volumes: t.Run("Undefined", func(t *testing.T) { cluster.Spec.Backups.PGBackRest.Jobs = nil - spec, err := generateBackupJobSpecIntent( + spec := generateBackupJobSpecIntent(ctx, cluster, v1beta1.PGBackRestRepo{}, "", nil, nil, ) - assert.NilError(t, err) assert.Assert(t, spec.TTLSecondsAfterFinished == nil) cluster.Spec.Backups.PGBackRest.Jobs = &v1beta1.BackupJobs{} - spec, err = generateBackupJobSpecIntent( + spec = generateBackupJobSpecIntent(ctx, cluster, v1beta1.PGBackRestRepo{}, "", nil, nil, ) - assert.NilError(t, err) assert.Assert(t, spec.TTLSecondsAfterFinished == nil) }) @@ -2692,10 +2647,9 @@ volumes: TTLSecondsAfterFinished: initialize.Int32(0), } - spec, err := generateBackupJobSpecIntent( + spec := generateBackupJobSpecIntent(ctx, cluster, v1beta1.PGBackRestRepo{}, "", nil, nil, ) - assert.NilError(t, err) if assert.Check(t, spec.TTLSecondsAfterFinished != nil) { assert.Equal(t, *spec.TTLSecondsAfterFinished, int32(0)) } @@ -2706,10 +2660,9 @@ volumes: TTLSecondsAfterFinished: initialize.Int32(100), } - spec, err := generateBackupJobSpecIntent( + spec := generateBackupJobSpecIntent(ctx, cluster, v1beta1.PGBackRestRepo{}, "", nil, nil, ) - assert.NilError(t, err) if assert.Check(t, spec.TTLSecondsAfterFinished != nil) { assert.Equal(t, *spec.TTLSecondsAfterFinished, int32(100)) } @@ -2721,16 +2674,17 @@ func TestGenerateRepoHostIntent(t *testing.T) { _, cc := setupKubernetes(t) require.ParallelCapacity(t, 0) + ctx := context.Background() r := Reconciler{Client: cc} t.Run("empty", func(t *testing.T) { - _, err := r.generateRepoHostIntent(&v1beta1.PostgresCluster{}, "", &RepoResources{}, + _, err := r.generateRepoHostIntent(ctx, &v1beta1.PostgresCluster{}, "", &RepoResources{}, &observedInstances{}) assert.NilError(t, err) }) cluster := &v1beta1.PostgresCluster{} - sts, err := r.generateRepoHostIntent(cluster, "", &RepoResources{}, &observedInstances{}) + sts, err := r.generateRepoHostIntent(ctx, cluster, "", &RepoResources{}, &observedInstances{}) assert.NilError(t, err) t.Run("ServiceAccount", func(t *testing.T) { @@ -2751,7 +2705,7 @@ func TestGenerateRepoHostIntent(t *testing.T) { }, } observed := &observedInstances{forCluster: []*Instance{{Pods: []*corev1.Pod{{}}}}} - sts, err := r.generateRepoHostIntent(cluster, "", &RepoResources{}, observed) + sts, err := r.generateRepoHostIntent(ctx, cluster, "", &RepoResources{}, observed) assert.NilError(t, err) assert.Equal(t, *sts.Spec.Replicas, int32(1)) }) @@ -2763,7 +2717,7 @@ func TestGenerateRepoHostIntent(t *testing.T) { }, } observed := &observedInstances{forCluster: []*Instance{{}}} - sts, err := r.generateRepoHostIntent(cluster, "", &RepoResources{}, observed) + sts, err := r.generateRepoHostIntent(ctx, cluster, "", &RepoResources{}, observed) assert.NilError(t, err) assert.Equal(t, *sts.Spec.Replicas, int32(0)) }) @@ -3432,11 +3386,11 @@ func TestPrepareForRestore(t *testing.T) { } func TestReconcileScheduledBackups(t *testing.T) { - tEnv, tClient := setupKubernetes(t) + cfg, tClient := setupKubernetes(t) require.ParallelCapacity(t, 2) r := &Reconciler{} - _, cancel := setupManager(t, tEnv.Config, func(mgr manager.Manager) { + _, cancel := setupManager(t, cfg, func(mgr manager.Manager) { r = &Reconciler{ Client: mgr.GetClient(), Recorder: mgr.GetEventRecorderFor(ControllerName), @@ -3628,23 +3582,18 @@ func TestReconcileScheduledBackups(t *testing.T) { // if an event is expected, the check for it if tc.expectedEventReason != "" { - events := &corev1.EventList{} - err := wait.Poll(time.Second/2, Scale(time.Second*2), func() (bool, error) { - if err := tClient.List(ctx, events, &client.MatchingFields{ - "involvedObject.kind": "PostgresCluster", - "involvedObject.name": clusterName, - "involvedObject.namespace": ns.GetName(), - "involvedObject.uid": string(postgresCluster.GetUID()), - "reason": tc.expectedEventReason, - }); err != nil { - return false, err - } - if len(events.Items) != 1 { - return false, nil - } - return true, nil - }) - assert.NilError(t, err) + assert.NilError(t, wait.PollUntilContextTimeout(ctx, time.Second/2, Scale(time.Second*2), false, + func(ctx context.Context) (bool, error) { + events := &corev1.EventList{} + err := tClient.List(ctx, events, &client.MatchingFields{ + "involvedObject.kind": "PostgresCluster", + "involvedObject.name": clusterName, + "involvedObject.namespace": ns.GetName(), + "involvedObject.uid": string(postgresCluster.GetUID()), + "reason": tc.expectedEventReason, + }) + return len(events.Items) == 1, err + })) } } else if !tc.expectReconcile && tc.expectRequeue { // expect requeue, no reconcile @@ -3787,3 +3736,167 @@ func TestSetScheduledJobStatus(t *testing.T) { assert.Assert(t, len(postgresCluster.Status.PGBackRest.ScheduledBackups) == 0) }) } + +func TestBackupsEnabled(t *testing.T) { + // Garbage collector cleans up test resources before the test completes + if strings.EqualFold(os.Getenv("USE_EXISTING_CLUSTER"), "true") { + t.Skip("USE_EXISTING_CLUSTER: Test fails due to garbage collection") + } + + cfg, tClient := setupKubernetes(t) + require.ParallelCapacity(t, 2) + + r := &Reconciler{} + ctx, cancel := setupManager(t, cfg, func(mgr manager.Manager) { + r = &Reconciler{ + Client: mgr.GetClient(), + Recorder: mgr.GetEventRecorderFor(ControllerName), + Tracer: otel.Tracer(ControllerName), + Owner: ControllerName, + } + }) + t.Cleanup(func() { teardownManager(cancel, t) }) + + t.Run("Cluster with backups, no sts can be reconciled", func(t *testing.T) { + clusterName := "hippocluster1" + clusterUID := "hippouid1" + + ns := setupNamespace(t, tClient) + + // create a PostgresCluster to test with + postgresCluster := fakePostgresCluster(clusterName, ns.GetName(), clusterUID, true) + + backupsSpecFound, backupsReconciliationAllowed, err := r.BackupsEnabled(ctx, postgresCluster) + + assert.NilError(t, err) + assert.Assert(t, backupsSpecFound) + assert.Assert(t, backupsReconciliationAllowed) + }) + + t.Run("Cluster with backups, sts can be reconciled", func(t *testing.T) { + clusterName := "hippocluster2" + clusterUID := "hippouid2" + + ns := setupNamespace(t, tClient) + + // create a PostgresCluster to test with + postgresCluster := fakePostgresCluster(clusterName, ns.GetName(), clusterUID, true) + + // create the 'observed' instances and set the leader + instances := &observedInstances{ + forCluster: []*Instance{{Name: "instance1", + Pods: []*corev1.Pod{{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{naming.LabelRole: naming.RolePatroniLeader}, + }, + Spec: corev1.PodSpec{}, + }}, + }, {Name: "instance2"}, {Name: "instance3"}}, + } + + rootCA, err := pki.NewRootCertificateAuthority() + assert.NilError(t, err) + + _, err = r.reconcilePGBackRest(ctx, postgresCluster, instances, rootCA, true) + assert.NilError(t, err) + + backupsSpecFound, backupsReconciliationAllowed, err := r.BackupsEnabled(ctx, postgresCluster) + + assert.NilError(t, err) + assert.Assert(t, backupsSpecFound) + assert.Assert(t, backupsReconciliationAllowed) + }) + + t.Run("Cluster with no backups, no sts can reconcile", func(t *testing.T) { + // create a PostgresCluster to test with + clusterName := "hippocluster3" + clusterUID := "hippouid3" + + ns := setupNamespace(t, tClient) + + postgresCluster := fakePostgresCluster(clusterName, ns.GetName(), clusterUID, true) + postgresCluster.Spec.Backups = v1beta1.Backups{} + + backupsSpecFound, backupsReconciliationAllowed, err := r.BackupsEnabled(ctx, postgresCluster) + + assert.NilError(t, err) + assert.Assert(t, !backupsSpecFound) + assert.Assert(t, backupsReconciliationAllowed) + }) + + t.Run("Cluster with no backups, sts cannot be reconciled", func(t *testing.T) { + clusterName := "hippocluster4" + clusterUID := "hippouid4" + + ns := setupNamespace(t, tClient) + + // create a PostgresCluster to test with + postgresCluster := fakePostgresCluster(clusterName, ns.GetName(), clusterUID, true) + + // create the 'observed' instances and set the leader + instances := &observedInstances{ + forCluster: []*Instance{{Name: "instance1", + Pods: []*corev1.Pod{{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{naming.LabelRole: naming.RolePatroniLeader}, + }, + Spec: corev1.PodSpec{}, + }}, + }, {Name: "instance2"}, {Name: "instance3"}}, + } + + rootCA, err := pki.NewRootCertificateAuthority() + assert.NilError(t, err) + + _, err = r.reconcilePGBackRest(ctx, postgresCluster, instances, rootCA, true) + assert.NilError(t, err) + + postgresCluster.Spec.Backups = v1beta1.Backups{} + + backupsSpecFound, backupsReconciliationAllowed, err := r.BackupsEnabled(ctx, postgresCluster) + + assert.NilError(t, err) + assert.Assert(t, !backupsSpecFound) + assert.Assert(t, !backupsReconciliationAllowed) + }) + + t.Run("Cluster with no backups, sts, annotation can be reconciled", func(t *testing.T) { + clusterName := "hippocluster5" + clusterUID := "hippouid5" + + ns := setupNamespace(t, tClient) + + // create a PostgresCluster to test with + postgresCluster := fakePostgresCluster(clusterName, ns.GetName(), clusterUID, true) + + // create the 'observed' instances and set the leader + instances := &observedInstances{ + forCluster: []*Instance{{Name: "instance1", + Pods: []*corev1.Pod{{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{naming.LabelRole: naming.RolePatroniLeader}, + }, + Spec: corev1.PodSpec{}, + }}, + }, {Name: "instance2"}, {Name: "instance3"}}, + } + + rootCA, err := pki.NewRootCertificateAuthority() + assert.NilError(t, err) + + _, err = r.reconcilePGBackRest(ctx, postgresCluster, instances, rootCA, true) + assert.NilError(t, err) + + postgresCluster.Spec.Backups = v1beta1.Backups{} + annotations := map[string]string{ + naming.AuthorizeBackupRemovalAnnotation: "true", + } + postgresCluster.Annotations = annotations + + backupsSpecFound, backupsReconciliationAllowed, err := r.BackupsEnabled(ctx, postgresCluster) + + assert.NilError(t, err) + assert.Assert(t, !backupsSpecFound) + assert.Assert(t, backupsReconciliationAllowed) + }) +} diff --git a/internal/controller/postgrescluster/pgbouncer.go b/internal/controller/postgrescluster/pgbouncer.go index a85aa524eb..76207fac02 100644 --- a/internal/controller/postgrescluster/pgbouncer.go +++ b/internal/controller/postgrescluster/pgbouncer.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster @@ -181,8 +170,8 @@ func (r *Reconciler) reconcilePGBouncerInPostgreSQL( if err == nil { ctx := logging.NewContext(ctx, logging.FromContext(ctx).WithValues("revision", revision)) - err = action(ctx, func(_ context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string) error { - return r.PodExec(pod.Namespace, pod.Name, naming.ContainerDatabase, stdin, stdout, stderr, command...) + err = action(ctx, func(ctx context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string) error { + return r.PodExec(ctx, pod.Namespace, pod.Name, naming.ContainerDatabase, stdin, stdout, stderr, command...) }) } if err == nil { @@ -315,6 +304,8 @@ func (r *Reconciler) generatePGBouncerService( } servicePort.NodePort = *spec.NodePort } + service.Spec.ExternalTrafficPolicy = initialize.FromPointer(spec.ExternalTrafficPolicy) + service.Spec.InternalTrafficPolicy = spec.InternalTrafficPolicy } service.Spec.Ports = []corev1.ServicePort{servicePort} @@ -351,7 +342,7 @@ func (r *Reconciler) reconcilePGBouncerService( // generatePGBouncerDeployment returns an appsv1.Deployment that runs PgBouncer pods. func (r *Reconciler) generatePGBouncerDeployment( - cluster *v1beta1.PostgresCluster, + ctx context.Context, cluster *v1beta1.PostgresCluster, primaryCertificate *corev1.SecretProjection, configmap *corev1.ConfigMap, secret *corev1.Secret, ) (*appsv1.Deployment, bool, error) { @@ -404,25 +395,20 @@ func (r *Reconciler) generatePGBouncerDeployment( // - https://docs.k8s.io/concepts/workloads/controllers/deployment/#rolling-update-deployment deploy.Spec.Strategy.Type = appsv1.RollingUpdateDeploymentStrategyType deploy.Spec.Strategy.RollingUpdate = &appsv1.RollingUpdateDeployment{ - MaxUnavailable: intstr.ValueOrDefault(nil, intstr.FromInt(0)), + MaxUnavailable: initialize.Pointer(intstr.FromInt32(0)), } // Use scheduling constraints from the cluster spec. deploy.Spec.Template.Spec.Affinity = cluster.Spec.Proxy.PGBouncer.Affinity deploy.Spec.Template.Spec.Tolerations = cluster.Spec.Proxy.PGBouncer.Tolerations - - if cluster.Spec.Proxy.PGBouncer.PriorityClassName != nil { - deploy.Spec.Template.Spec.PriorityClassName = *cluster.Spec.Proxy.PGBouncer.PriorityClassName - } - + deploy.Spec.Template.Spec.PriorityClassName = + initialize.FromPointer(cluster.Spec.Proxy.PGBouncer.PriorityClassName) deploy.Spec.Template.Spec.TopologySpreadConstraints = cluster.Spec.Proxy.PGBouncer.TopologySpreadConstraints // if default pod scheduling is not explicitly disabled, add the default // pod topology spread constraints - if cluster.Spec.DisableDefaultPodScheduling == nil || - (cluster.Spec.DisableDefaultPodScheduling != nil && - !*cluster.Spec.DisableDefaultPodScheduling) { + if !initialize.FromPointer(cluster.Spec.DisableDefaultPodScheduling) { deploy.Spec.Template.Spec.TopologySpreadConstraints = append( deploy.Spec.Template.Spec.TopologySpreadConstraints, defaultTopologySpreadConstraints(*deploy.Spec.Selector)...) @@ -455,7 +441,7 @@ func (r *Reconciler) generatePGBouncerDeployment( err := errors.WithStack(r.setControllerReference(cluster, deploy)) if err == nil { - pgbouncer.Pod(cluster, configmap, primaryCertificate, secret, &deploy.Spec.Template.Spec) + pgbouncer.Pod(ctx, cluster, configmap, primaryCertificate, secret, &deploy.Spec.Template.Spec) } return deploy, true, err @@ -471,7 +457,7 @@ func (r *Reconciler) reconcilePGBouncerDeployment( configmap *corev1.ConfigMap, secret *corev1.Secret, ) error { deploy, specified, err := r.generatePGBouncerDeployment( - cluster, primaryCertificate, configmap, secret) + ctx, cluster, primaryCertificate, configmap, secret) // Set observations whether the deployment exists or not. defer func() { diff --git a/internal/controller/postgrescluster/pgbouncer_test.go b/internal/controller/postgrescluster/pgbouncer_test.go index e8f46908fe..9bbced5247 100644 --- a/internal/controller/postgrescluster/pgbouncer_test.go +++ b/internal/controller/postgrescluster/pgbouncer_test.go @@ -1,20 +1,6 @@ -//go:build envtest -// +build envtest - -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster @@ -29,11 +15,13 @@ import ( policyv1 "k8s.io/api/policy/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/internal/testing/cmp" "github.com/crunchydata/postgres-operator/internal/testing/require" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -62,7 +50,7 @@ func TestGeneratePGBouncerService(t *testing.T) { assert.NilError(t, err) assert.Assert(t, !specified) - assert.Assert(t, marshalMatches(service.ObjectMeta, ` + assert.Assert(t, cmp.MarshalMatches(service.ObjectMeta, ` creationTimestamp: null name: pg7-pgbouncer namespace: ns5 @@ -77,11 +65,11 @@ namespace: ns5 } alwaysExpect := func(t testing.TB, service *corev1.Service) { - assert.Assert(t, marshalMatches(service.TypeMeta, ` + assert.Assert(t, cmp.MarshalMatches(service.TypeMeta, ` apiVersion: v1 kind: Service `)) - assert.Assert(t, marshalMatches(service.ObjectMeta, ` + assert.Assert(t, cmp.MarshalMatches(service.ObjectMeta, ` creationTimestamp: null labels: postgres-operator.crunchydata.com/cluster: pg7 @@ -175,7 +163,7 @@ ownerReferences: alwaysExpect(t, service) // Defaults to ClusterIP. assert.Equal(t, service.Spec.Type, corev1.ServiceTypeClusterIP) - assert.Assert(t, marshalMatches(service.Spec.Ports, ` + assert.Assert(t, cmp.MarshalMatches(service.Spec.Ports, ` - name: pgbouncer port: 9651 protocol: TCP @@ -208,7 +196,7 @@ ownerReferences: assert.Assert(t, specified) alwaysExpect(t, service) test.Expect(t, service) - assert.Assert(t, marshalMatches(service.Spec.Ports, ` + assert.Assert(t, cmp.MarshalMatches(service.Spec.Ports, ` - name: pgbouncer port: 9651 protocol: TCP @@ -233,7 +221,7 @@ ownerReferences: assert.NilError(t, err) assert.Equal(t, service.Spec.Type, corev1.ServiceTypeNodePort) alwaysExpect(t, service) - assert.Assert(t, marshalMatches(service.Spec.Ports, ` + assert.Assert(t, cmp.MarshalMatches(service.Spec.Ports, ` - name: pgbouncer nodePort: 32001 port: 9651 @@ -246,7 +234,7 @@ ownerReferences: assert.NilError(t, err) assert.Equal(t, service.Spec.Type, corev1.ServiceTypeLoadBalancer) alwaysExpect(t, service) - assert.Assert(t, marshalMatches(service.Spec.Ports, ` + assert.Assert(t, cmp.MarshalMatches(service.Spec.Ports, ` - name: pgbouncer nodePort: 32002 port: 9651 @@ -380,6 +368,7 @@ func TestGeneratePGBouncerDeployment(t *testing.T) { _, cc := setupKubernetes(t) require.ParallelCapacity(t, 0) + ctx := context.Background() reconciler := &Reconciler{Client: cc} cluster := &v1beta1.PostgresCluster{} @@ -393,11 +382,11 @@ func TestGeneratePGBouncerDeployment(t *testing.T) { cluster := cluster.DeepCopy() cluster.Spec.Proxy = spec - deploy, specified, err := reconciler.generatePGBouncerDeployment(cluster, nil, nil, nil) + deploy, specified, err := reconciler.generatePGBouncerDeployment(ctx, cluster, nil, nil, nil) assert.NilError(t, err) assert.Assert(t, !specified) - assert.Assert(t, marshalMatches(deploy.ObjectMeta, ` + assert.Assert(t, cmp.MarshalMatches(deploy.ObjectMeta, ` creationTimestamp: null name: test-cluster-pgbouncer namespace: ns3 @@ -426,7 +415,7 @@ namespace: ns3 } deploy, specified, err := reconciler.generatePGBouncerDeployment( - cluster, primary, configmap, secret) + ctx, cluster, primary, configmap, secret) assert.NilError(t, err) assert.Assert(t, specified) @@ -466,7 +455,7 @@ namespace: ns3 t.Run("PodSpec", func(t *testing.T) { deploy, specified, err := reconciler.generatePGBouncerDeployment( - cluster, primary, configmap, secret) + ctx, cluster, primary, configmap, secret) assert.NilError(t, err) assert.Assert(t, specified) @@ -482,7 +471,7 @@ namespace: ns3 // topology spread constraints and spec.disableDefaultPodScheduling being // set to true (as done in instance StatefulSet tests). - assert.Assert(t, marshalMatches(deploy.Spec.Template.Spec, ` + assert.Assert(t, cmp.MarshalMatches(deploy.Spec.Template.Spec, ` automountServiceAccountToken: false containers: null enableServiceLinks: false @@ -512,7 +501,7 @@ topologySpreadConstraints: cluster.Spec.DisableDefaultPodScheduling = initialize.Bool(true) deploy, specified, err := reconciler.generatePGBouncerDeployment( - cluster, primary, configmap, secret) + ctx, cluster, primary, configmap, secret) assert.NilError(t, err) assert.Assert(t, specified) @@ -563,7 +552,7 @@ func TestReconcilePGBouncerDisruptionBudget(t *testing.T) { cluster := testCluster() cluster.Namespace = ns.Name cluster.Spec.Proxy.PGBouncer.Replicas = initialize.Int32(1) - cluster.Spec.Proxy.PGBouncer.MinAvailable = initialize.IntOrStringInt32(0) + cluster.Spec.Proxy.PGBouncer.MinAvailable = initialize.Pointer(intstr.FromInt32(0)) assert.NilError(t, r.reconcilePGBouncerPodDisruptionBudget(ctx, cluster)) assert.Assert(t, !foundPDB(cluster)) }) @@ -572,7 +561,7 @@ func TestReconcilePGBouncerDisruptionBudget(t *testing.T) { cluster := testCluster() cluster.Namespace = ns.Name cluster.Spec.Proxy.PGBouncer.Replicas = initialize.Int32(1) - cluster.Spec.Proxy.PGBouncer.MinAvailable = initialize.IntOrStringInt32(1) + cluster.Spec.Proxy.PGBouncer.MinAvailable = initialize.Pointer(intstr.FromInt32(1)) assert.NilError(t, r.Client.Create(ctx, cluster)) t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) @@ -581,7 +570,7 @@ func TestReconcilePGBouncerDisruptionBudget(t *testing.T) { assert.Assert(t, foundPDB(cluster)) t.Run("deleted", func(t *testing.T) { - cluster.Spec.Proxy.PGBouncer.MinAvailable = initialize.IntOrStringInt32(0) + cluster.Spec.Proxy.PGBouncer.MinAvailable = initialize.Pointer(intstr.FromInt32(0)) err := r.reconcilePGBouncerPodDisruptionBudget(ctx, cluster) if apierrors.IsConflict(err) { // When running in an existing environment another controller will sometimes update @@ -599,7 +588,7 @@ func TestReconcilePGBouncerDisruptionBudget(t *testing.T) { cluster := testCluster() cluster.Namespace = ns.Name cluster.Spec.Proxy.PGBouncer.Replicas = initialize.Int32(1) - cluster.Spec.Proxy.PGBouncer.MinAvailable = initialize.IntOrStringString("50%") + cluster.Spec.Proxy.PGBouncer.MinAvailable = initialize.Pointer(intstr.FromString("50%")) assert.NilError(t, r.Client.Create(ctx, cluster)) t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) @@ -608,7 +597,7 @@ func TestReconcilePGBouncerDisruptionBudget(t *testing.T) { assert.Assert(t, foundPDB(cluster)) t.Run("deleted", func(t *testing.T) { - cluster.Spec.Proxy.PGBouncer.MinAvailable = initialize.IntOrStringString("0%") + cluster.Spec.Proxy.PGBouncer.MinAvailable = initialize.Pointer(intstr.FromString("0%")) err := r.reconcilePGBouncerPodDisruptionBudget(ctx, cluster) if apierrors.IsConflict(err) { // When running in an existing environment another controller will sometimes update @@ -622,13 +611,13 @@ func TestReconcilePGBouncerDisruptionBudget(t *testing.T) { }) t.Run("delete with 00%", func(t *testing.T) { - cluster.Spec.Proxy.PGBouncer.MinAvailable = initialize.IntOrStringString("50%") + cluster.Spec.Proxy.PGBouncer.MinAvailable = initialize.Pointer(intstr.FromString("50%")) assert.NilError(t, r.reconcilePGBouncerPodDisruptionBudget(ctx, cluster)) assert.Assert(t, foundPDB(cluster)) t.Run("deleted", func(t *testing.T) { - cluster.Spec.Proxy.PGBouncer.MinAvailable = initialize.IntOrStringString("00%") + cluster.Spec.Proxy.PGBouncer.MinAvailable = initialize.Pointer(intstr.FromString("00%")) err := r.reconcilePGBouncerPodDisruptionBudget(ctx, cluster) if apierrors.IsConflict(err) { // When running in an existing environment another controller will sometimes update diff --git a/internal/controller/postgrescluster/pgmonitor.go b/internal/controller/postgrescluster/pgmonitor.go index 1ada858aa7..e1b5186cb4 100644 --- a/internal/controller/postgrescluster/pgmonitor.go +++ b/internal/controller/postgrescluster/pgmonitor.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster @@ -27,6 +16,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "github.com/crunchydata/postgres-operator/internal/config" + "github.com/crunchydata/postgres-operator/internal/feature" "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/logging" "github.com/crunchydata/postgres-operator/internal/naming" @@ -54,7 +44,7 @@ func (r *Reconciler) reconcilePGMonitor(ctx context.Context, // Status.Monitoring.ExporterConfiguration is used to determine when the // pgMonitor postgres_exporter configuration should be added/changed to // limit how often PodExec is used -// - TODO jmckulk: kube perms comment? +// - TODO (jmckulk): kube perms comment? func (r *Reconciler) reconcilePGMonitorExporter(ctx context.Context, cluster *v1beta1.PostgresCluster, instances *observedInstances, monitoringSecret *corev1.Secret) error { @@ -144,9 +134,9 @@ func (r *Reconciler) reconcilePGMonitorExporter(ctx context.Context, // Apply the necessary SQL and record its hash in cluster.Status if err == nil { - err = action(ctx, func(_ context.Context, stdin io.Reader, + err = action(ctx, func(ctx context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string) error { - return r.PodExec(writablePod.Namespace, writablePod.Name, naming.ContainerDatabase, stdin, stdout, stderr, command...) + return r.PodExec(ctx, writablePod.Namespace, writablePod.Name, naming.ContainerDatabase, stdin, stdout, stderr, command...) }) } if err == nil { @@ -240,21 +230,23 @@ func (r *Reconciler) reconcileMonitoringSecret( // addPGMonitorToInstancePodSpec performs the necessary setup to add // pgMonitor resources on a PodTemplateSpec func addPGMonitorToInstancePodSpec( + ctx context.Context, cluster *v1beta1.PostgresCluster, template *corev1.PodTemplateSpec, exporterQueriesConfig, exporterWebConfig *corev1.ConfigMap) error { - err := addPGMonitorExporterToInstancePodSpec(cluster, template, exporterQueriesConfig, exporterWebConfig) + err := addPGMonitorExporterToInstancePodSpec(ctx, cluster, template, exporterQueriesConfig, exporterWebConfig) return err } // addPGMonitorExporterToInstancePodSpec performs the necessary setup to // add pgMonitor exporter resources to a PodTemplateSpec -// TODO jmckulk: refactor to pass around monitoring secret; Without the secret +// TODO (jmckulk): refactor to pass around monitoring secret; Without the secret // the exporter container cannot be created; Testing relies on ensuring the // monitoring secret is available func addPGMonitorExporterToInstancePodSpec( + ctx context.Context, cluster *v1beta1.PostgresCluster, template *corev1.PodTemplateSpec, exporterQueriesConfig, exporterWebConfig *corev1.ConfigMap) error { @@ -263,15 +255,38 @@ func addPGMonitorExporterToInstancePodSpec( return nil } + certSecret := cluster.Spec.Monitoring.PGMonitor.Exporter.CustomTLSSecret + withBuiltInCollectors := + !strings.EqualFold(cluster.Annotations[naming.PostgresExporterCollectorsAnnotation], "None") + + var cmd []string + // PG 17 does not include some of the columns found in stat_bgwriter with older PGs. + // Selectively turn off the collector for stat_bgwriter in PG 17, unless the user + // requests all collectors to be turned off. + switch { + case cluster.Spec.PostgresVersion == 17 && withBuiltInCollectors && certSecret == nil: + cmd = pgmonitor.ExporterStartCommand(withBuiltInCollectors, + pgmonitor.ExporterDeactivateStatBGWriterFlag) + case cluster.Spec.PostgresVersion == 17 && withBuiltInCollectors && certSecret != nil: + cmd = pgmonitor.ExporterStartCommand(withBuiltInCollectors, + pgmonitor.ExporterWebConfigFileFlag, + pgmonitor.ExporterDeactivateStatBGWriterFlag) + // If you're turning off all built-in collectors, we don't care which + // version of PG you're using. + case certSecret != nil: + cmd = pgmonitor.ExporterStartCommand(withBuiltInCollectors, + pgmonitor.ExporterWebConfigFileFlag) + default: + cmd = pgmonitor.ExporterStartCommand(withBuiltInCollectors) + } + securityContext := initialize.RestrictedSecurityContext() exporterContainer := corev1.Container{ Name: naming.ContainerPGMonitorExporter, Image: config.PGExporterContainerImage(cluster), ImagePullPolicy: cluster.Spec.ImagePullPolicy, Resources: cluster.Spec.Monitoring.PGMonitor.Exporter.Resources, - Command: pgmonitor.ExporterStartCommand([]string{ - pgmonitor.ExporterExtendQueryPathFlag, pgmonitor.ExporterWebListenAddressFlag, - }), + Command: cmd, Env: []corev1.EnvVar{ {Name: "DATA_SOURCE_URI", Value: fmt.Sprintf("%s:%d/%s", pgmonitor.ExporterHost, *cluster.Spec.Port, pgmonitor.ExporterDB)}, {Name: "DATA_SOURCE_USER", Value: pgmonitor.MonitoringUser}, @@ -295,8 +310,6 @@ func addPGMonitorExporterToInstancePodSpec( }}, } - template.Spec.Containers = append(template.Spec.Containers, exporterContainer) - passwordVolume := corev1.Volume{ Name: "monitoring-secret", VolumeSource: corev1.VolumeSource{ @@ -305,7 +318,6 @@ func addPGMonitorExporterToInstancePodSpec( }, }, } - template.Spec.Volumes = append(template.Spec.Volumes, passwordVolume) // add custom exporter config volume configVolume := corev1.Volume{ @@ -316,7 +328,7 @@ func addPGMonitorExporterToInstancePodSpec( }, }, } - template.Spec.Volumes = append(template.Spec.Volumes, configVolume) + template.Spec.Volumes = append(template.Spec.Volumes, configVolume, passwordVolume) // The original "custom queries" ability allowed users to provide a file with custom queries; // however, it would turn off the default queries. The new "custom queries" ability allows @@ -324,7 +336,7 @@ func addPGMonitorExporterToInstancePodSpec( // Therefore, we only want to add the default queries ConfigMap as a source for the // "exporter-config" volume if the AppendCustomQueries feature gate is turned on OR if the // user has not provided any custom configuration. - if util.DefaultMutableFeatureGate.Enabled(util.AppendCustomQueries) || + if feature.Enabled(ctx, feature.AppendCustomQueries) || cluster.Spec.Monitoring.PGMonitor.Exporter.Configuration == nil { defaultConfigVolumeProjection := corev1.VolumeProjection{ @@ -338,48 +350,13 @@ func addPGMonitorExporterToInstancePodSpec( defaultConfigVolumeProjection) } - if cluster.Spec.Monitoring.PGMonitor.Exporter.CustomTLSSecret != nil { - configureExporterTLS(cluster, template, exporterWebConfig) - } - - // add the proper label to support Pod discovery by Prometheus per pgMonitor configuration - initialize.Labels(template) - template.Labels[naming.LabelPGMonitorDiscovery] = "true" - - return nil -} - -// getExporterCertSecret retrieves the custom tls cert secret projection from the exporter spec -// TODO (jmckulk): One day we might want to generate certs here -func getExporterCertSecret(cluster *v1beta1.PostgresCluster) *corev1.SecretProjection { - if cluster.Spec.Monitoring.PGMonitor.Exporter.CustomTLSSecret != nil { - return cluster.Spec.Monitoring.PGMonitor.Exporter.CustomTLSSecret - } - - return nil -} - -// configureExporterTLS takes a cluster and pod template spec. If enabled, the pod template spec -// will be updated with exporter tls configuration -func configureExporterTLS(cluster *v1beta1.PostgresCluster, template *corev1.PodTemplateSpec, exporterWebConfig *corev1.ConfigMap) { - var found bool - var exporterContainer *corev1.Container - for i, container := range template.Spec.Containers { - if container.Name == naming.ContainerPGMonitorExporter { - exporterContainer = &template.Spec.Containers[i] - found = true - } - } - - if found && - pgmonitor.ExporterEnabled(cluster) && - (cluster.Spec.Monitoring.PGMonitor.Exporter.CustomTLSSecret != nil) { + if certSecret != nil { // TODO (jmckulk): params for paths and such certVolume := corev1.Volume{Name: "exporter-certs"} certVolume.Projected = &corev1.ProjectedVolumeSource{ Sources: append([]corev1.VolumeProjection{}, corev1.VolumeProjection{ - Secret: getExporterCertSecret(cluster), + Secret: certSecret, }, ), } @@ -401,10 +378,15 @@ func configureExporterTLS(cluster *v1beta1.PostgresCluster, template *corev1.Pod }} exporterContainer.VolumeMounts = append(exporterContainer.VolumeMounts, mounts...) - exporterContainer.Command = pgmonitor.ExporterStartCommand([]string{ - pgmonitor.ExporterExtendQueryPathFlag, pgmonitor.ExporterWebListenAddressFlag, pgmonitor.ExporterWebConfigFileFlag, - }) } + + template.Spec.Containers = append(template.Spec.Containers, exporterContainer) + + // add the proper label to support Pod discovery by Prometheus per pgMonitor configuration + initialize.Labels(template) + template.Labels[naming.LabelPGMonitorDiscovery] = "true" + + return nil } // reconcileExporterWebConfig reconciles the configmap containing the webconfig for exporter tls diff --git a/internal/controller/postgrescluster/pgmonitor_test.go b/internal/controller/postgrescluster/pgmonitor_test.go index dad7b93fcf..8d8c8281d0 100644 --- a/internal/controller/postgrescluster/pgmonitor_test.go +++ b/internal/controller/postgrescluster/pgmonitor_test.go @@ -1,27 +1,12 @@ -//go:build envtest -// +build envtest - -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster import ( "bytes" "context" - "fmt" "io" "os" "strings" @@ -35,20 +20,73 @@ import ( "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" + "github.com/crunchydata/postgres-operator/internal/feature" "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/naming" - "github.com/crunchydata/postgres-operator/internal/pgmonitor" + "github.com/crunchydata/postgres-operator/internal/testing/cmp" "github.com/crunchydata/postgres-operator/internal/testing/require" - "github.com/crunchydata/postgres-operator/internal/util" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) +func testExporterCollectorsAnnotation(t *testing.T, ctx context.Context, cluster *v1beta1.PostgresCluster, queriesConfig, webConfig *corev1.ConfigMap) { + t.Helper() + + t.Run("ExporterCollectorsAnnotation", func(t *testing.T) { + t.Run("UnexpectedValue", func(t *testing.T) { + template := new(corev1.PodTemplateSpec) + cluster := cluster.DeepCopy() + cluster.SetAnnotations(map[string]string{ + naming.PostgresExporterCollectorsAnnotation: "wrong-value", + }) + + assert.NilError(t, addPGMonitorExporterToInstancePodSpec(ctx, cluster, template, queriesConfig, webConfig)) + + assert.Equal(t, len(template.Spec.Containers), 1) + container := template.Spec.Containers[0] + + command := strings.Join(container.Command, "\n") + assert.Assert(t, cmp.Contains(command, "postgres_exporter")) + assert.Assert(t, !strings.Contains(command, "collector")) + }) + + t.Run("ExpectedValueNone", func(t *testing.T) { + template := new(corev1.PodTemplateSpec) + cluster := cluster.DeepCopy() + cluster.SetAnnotations(map[string]string{ + naming.PostgresExporterCollectorsAnnotation: "None", + }) + + assert.NilError(t, addPGMonitorExporterToInstancePodSpec(ctx, cluster, template, queriesConfig, webConfig)) + + assert.Equal(t, len(template.Spec.Containers), 1) + container := template.Spec.Containers[0] + + command := strings.Join(container.Command, "\n") + assert.Assert(t, cmp.Contains(command, "postgres_exporter")) + assert.Assert(t, cmp.Contains(command, "--[no-]collector")) + + t.Run("LowercaseToo", func(t *testing.T) { + template := new(corev1.PodTemplateSpec) + cluster.SetAnnotations(map[string]string{ + naming.PostgresExporterCollectorsAnnotation: "none", + }) + + assert.NilError(t, addPGMonitorExporterToInstancePodSpec(ctx, cluster, template, queriesConfig, webConfig)) + assert.Assert(t, cmp.Contains(strings.Join(template.Spec.Containers[0].Command, "\n"), "--[no-]collector")) + }) + }) + }) +} + func TestAddPGMonitorExporterToInstancePodSpec(t *testing.T) { + t.Parallel() + + ctx := context.Background() image := "test/image:tag" cluster := &v1beta1.PostgresCluster{} + cluster.Name = "pg1" cluster.Spec.Port = initialize.Int32(5432) - cluster.Spec.Image = image cluster.Spec.ImagePullPolicy = corev1.PullAlways resources := corev1.ResourceRequirements{ @@ -57,18 +95,12 @@ func TestAddPGMonitorExporterToInstancePodSpec(t *testing.T) { }, } - getContainerWithName := func(containers []corev1.Container, name string) corev1.Container { - for _, container := range containers { - if container.Name == name { - return container - } - } - return corev1.Container{} - } + exporterQueriesConfig := new(corev1.ConfigMap) + exporterQueriesConfig.Name = "query-conf" t.Run("ExporterDisabled", func(t *testing.T) { template := &corev1.PodTemplateSpec{} - assert.NilError(t, addPGMonitorExporterToInstancePodSpec(cluster, template, nil, nil)) + assert.NilError(t, addPGMonitorExporterToInstancePodSpec(ctx, cluster, template, nil, nil)) assert.DeepEqual(t, template, &corev1.PodTemplateSpec{}) }) @@ -88,83 +120,69 @@ func TestAddPGMonitorExporterToInstancePodSpec(t *testing.T) { }}, }, } - exporterQueriesConfig := &corev1.ConfigMap{ - ObjectMeta: naming.ExporterQueriesConfigMap(cluster), - } - assert.NilError(t, addPGMonitorExporterToInstancePodSpec(cluster, template, exporterQueriesConfig, nil)) - container := getContainerWithName(template.Spec.Containers, naming.ContainerPGMonitorExporter) - assert.Equal(t, container.Image, image) - assert.Equal(t, container.ImagePullPolicy, corev1.PullAlways) - assert.DeepEqual(t, container.Resources, resources) - assert.DeepEqual(t, container.Command[:3], []string{"bash", "-ceu", "--"}) - assert.Assert(t, len(container.Command) > 3, "Command does not have enough arguments.") - - commandStringsFound := make(map[string]bool) - for _, elem := range container.Command { - commandStringsFound[elem] = true - } - assert.Assert(t, commandStringsFound[pgmonitor.ExporterExtendQueryPathFlag], - "Command string does not contain the --extend.query-path flag.") - assert.Assert(t, commandStringsFound[pgmonitor.ExporterWebListenAddressFlag], - "Command string does not contain the --web.listen-address flag.") - assert.Assert(t, !commandStringsFound[pgmonitor.ExporterWebConfigFileFlag], - "Command string contains the --web.config.file flag when it shouldn't.") - - assert.DeepEqual(t, container.SecurityContext.Capabilities, &corev1.Capabilities{ - Drop: []corev1.Capability{"ALL"}, - }) - assert.Equal(t, *container.SecurityContext.Privileged, false) - assert.Equal(t, *container.SecurityContext.ReadOnlyRootFilesystem, true) - assert.Equal(t, *container.SecurityContext.AllowPrivilegeEscalation, false) - assert.Equal(t, *container.Resources.Requests.Cpu(), resource.MustParse("100m")) - - expectedENV := []corev1.EnvVar{ - {Name: "DATA_SOURCE_URI", Value: fmt.Sprintf("localhost:%d/postgres", *cluster.Spec.Port)}, - {Name: "DATA_SOURCE_USER", Value: pgmonitor.MonitoringUser}, - {Name: "DATA_SOURCE_PASS_FILE", Value: "/opt/crunchy/password"}} - assert.DeepEqual(t, container.Env, expectedENV) - - assert.Assert(t, container.Ports[0].ContainerPort == int32(9187), "Exporter container port number not set to '9187'.") - assert.Assert(t, container.Ports[0].Name == "exporter", "Exporter container port name not set to 'exporter'.") - assert.Assert(t, container.Ports[0].Protocol == "TCP", "Exporter container port protocol not set to 'TCP'.") - - assert.Assert(t, template.Spec.Volumes != nil, "No volumes were found.") - - var foundExporterConfigVolume bool - for _, v := range template.Spec.Volumes { - if v.Name == "exporter-config" { - assert.DeepEqual(t, v, corev1.Volume{ - Name: "exporter-config", - VolumeSource: corev1.VolumeSource{ - Projected: &corev1.ProjectedVolumeSource{ - Sources: []corev1.VolumeProjection{{ConfigMap: &corev1.ConfigMapProjection{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: exporterQueriesConfig.Name, - }, - }}, - }, - }, - }, - }) - foundExporterConfigVolume = true - break - } - } - assert.Assert(t, foundExporterConfigVolume, "The exporter-config volume was not found.") - var foundExporterConfigVolumeMount bool - for _, vm := range container.VolumeMounts { - if vm.Name == "exporter-config" && vm.MountPath == "/conf" { - foundExporterConfigVolumeMount = true - break - } - } - assert.Assert(t, foundExporterConfigVolumeMount, "The 'exporter-config' volume mount was not found.") + assert.NilError(t, addPGMonitorExporterToInstancePodSpec(ctx, cluster, template, exporterQueriesConfig, nil)) + + assert.Equal(t, len(template.Spec.Containers), 2) + container := template.Spec.Containers[1] + + command := strings.Join(container.Command, "\n") + assert.Assert(t, cmp.Contains(command, "postgres_exporter")) + assert.Assert(t, cmp.Contains(command, "--extend.query-path")) + assert.Assert(t, cmp.Contains(command, "--web.listen-address")) + + // Exclude command from the following comparison. + container.Command = nil + assert.Assert(t, cmp.MarshalMatches(container, ` +env: +- name: DATA_SOURCE_URI + value: localhost:5432/postgres +- name: DATA_SOURCE_USER + value: ccp_monitoring +- name: DATA_SOURCE_PASS_FILE + value: /opt/crunchy/password +image: test/image:tag +imagePullPolicy: Always +name: exporter +ports: +- containerPort: 9187 + name: exporter + protocol: TCP +resources: + requests: + cpu: 100m +securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault +volumeMounts: +- mountPath: /conf + name: exporter-config +- mountPath: /opt/crunchy/ + name: monitoring-secret + `)) + + assert.Assert(t, cmp.MarshalMatches(template.Spec.Volumes, ` +- name: exporter-config + projected: + sources: + - configMap: + name: query-conf +- name: monitoring-secret + secret: + secretName: pg1-monitoring + `)) + + testExporterCollectorsAnnotation(t, ctx, cluster, exporterQueriesConfig, nil) }) t.Run("CustomConfigAppendCustomQueriesOff", func(t *testing.T) { - assert.NilError(t, util.AddAndSetFeatureGates(string(util.AppendCustomQueries+"=false"))) - cluster.Spec.Monitoring = &v1beta1.MonitoringSpec{ PGMonitor: &v1beta1.PGMonitorSpec{ Exporter: &v1beta1.ExporterSpec{ @@ -186,42 +204,34 @@ func TestAddPGMonitorExporterToInstancePodSpec(t *testing.T) { }}, }, } - exporterQueriesConfig := &corev1.ConfigMap{ - ObjectMeta: naming.ExporterQueriesConfigMap(cluster), - } - - assert.NilError(t, addPGMonitorExporterToInstancePodSpec(cluster, template, exporterQueriesConfig, nil)) - var foundConfigVolume bool - for _, v := range template.Spec.Volumes { - if v.Name == "exporter-config" { - assert.DeepEqual(t, v, corev1.Volume{ - Name: "exporter-config", - VolumeSource: corev1.VolumeSource{ - Projected: &corev1.ProjectedVolumeSource{ - Sources: cluster.Spec.Monitoring.PGMonitor.Exporter.Configuration, - }, - }, - }) - foundConfigVolume = true - break - } - } - assert.Assert(t, foundConfigVolume, "The 'exporter-config' volume was not found.") - - container := getContainerWithName(template.Spec.Containers, naming.ContainerPGMonitorExporter) - var foundConfigMount bool - for _, vm := range container.VolumeMounts { - if vm.Name == "exporter-config" && vm.MountPath == "/conf" { - foundConfigMount = true - break - } - } - assert.Assert(t, foundConfigMount, "The 'exporter-config' volume mount was not found.") + assert.NilError(t, addPGMonitorExporterToInstancePodSpec(ctx, cluster, template, exporterQueriesConfig, nil)) + + assert.Equal(t, len(template.Spec.Containers), 2) + container := template.Spec.Containers[1] + + assert.Assert(t, len(template.Spec.Volumes) > 0) + assert.Assert(t, cmp.MarshalMatches(template.Spec.Volumes[0], ` +name: exporter-config +projected: + sources: + - configMap: + name: exporter-custom-config-test + `)) + + assert.Assert(t, len(container.VolumeMounts) > 0) + assert.Assert(t, cmp.MarshalMatches(container.VolumeMounts[0], ` +mountPath: /conf +name: exporter-config + `)) }) t.Run("CustomConfigAppendCustomQueriesOn", func(t *testing.T) { - assert.NilError(t, util.AddAndSetFeatureGates(string(util.AppendCustomQueries+"=true"))) + gate := feature.NewGate() + assert.NilError(t, gate.SetFromMap(map[string]bool{ + feature.AppendCustomQueries: true, + })) + ctx := feature.NewContext(ctx, gate) cluster.Spec.Monitoring = &v1beta1.MonitoringSpec{ PGMonitor: &v1beta1.PGMonitorSpec{ @@ -244,47 +254,83 @@ func TestAddPGMonitorExporterToInstancePodSpec(t *testing.T) { }}, }, } - exporterQueriesConfig := &corev1.ConfigMap{ - ObjectMeta: naming.ExporterQueriesConfigMap(cluster), - } - assert.NilError(t, addPGMonitorExporterToInstancePodSpec(cluster, template, exporterQueriesConfig, nil)) - - var foundConfigVolume bool - for _, v := range template.Spec.Volumes { - if v.Name == "exporter-config" { - assert.DeepEqual(t, v, corev1.Volume{ - Name: "exporter-config", - VolumeSource: corev1.VolumeSource{ - Projected: &corev1.ProjectedVolumeSource{ - Sources: []corev1.VolumeProjection{{ConfigMap: &corev1.ConfigMapProjection{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: "exporter-custom-config-test", - }, - }}, {ConfigMap: &corev1.ConfigMapProjection{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: exporterQueriesConfig.Name, - }, - }}, - }, + assert.NilError(t, addPGMonitorExporterToInstancePodSpec(ctx, cluster, template, exporterQueriesConfig, nil)) + + assert.Equal(t, len(template.Spec.Containers), 2) + container := template.Spec.Containers[1] + + assert.Assert(t, len(template.Spec.Volumes) > 0) + assert.Assert(t, cmp.MarshalMatches(template.Spec.Volumes[0], ` +name: exporter-config +projected: + sources: + - configMap: + name: exporter-custom-config-test + - configMap: + name: query-conf + `)) + + assert.Assert(t, len(container.VolumeMounts) > 0) + assert.Assert(t, cmp.MarshalMatches(container.VolumeMounts[0], ` +mountPath: /conf +name: exporter-config + `)) + }) + + t.Run("CustomTLS", func(t *testing.T) { + cluster.Spec.Monitoring = &v1beta1.MonitoringSpec{ + PGMonitor: &v1beta1.PGMonitorSpec{ + Exporter: &v1beta1.ExporterSpec{ + CustomTLSSecret: &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "custom-exporter-certs", }, }, - }) - foundConfigVolume = true - break - } + }, + }, } - assert.Assert(t, foundConfigVolume, "The 'exporter-config' volume was not found.") - - container := getContainerWithName(template.Spec.Containers, naming.ContainerPGMonitorExporter) - var foundConfigMount bool - for _, vm := range container.VolumeMounts { - if vm.Name == "exporter-config" && vm.MountPath == "/conf" { - foundConfigMount = true - break - } + template := &corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: naming.ContainerDatabase, + }}, + }, } - assert.Assert(t, foundConfigMount, "The 'exporter-config' volume mount was not found.") + + testConfigMap := new(corev1.ConfigMap) + testConfigMap.Name = "test-web-conf" + + assert.NilError(t, addPGMonitorExporterToInstancePodSpec(ctx, cluster, template, exporterQueriesConfig, testConfigMap)) + + assert.Equal(t, len(template.Spec.Containers), 2) + container := template.Spec.Containers[1] + + assert.Assert(t, len(template.Spec.Volumes) > 2, "Expected the original two volumes") + assert.Assert(t, cmp.MarshalMatches(template.Spec.Volumes[2:], ` +- name: exporter-certs + projected: + sources: + - secret: + name: custom-exporter-certs +- configMap: + name: test-web-conf + name: web-config + `)) + + assert.Assert(t, len(container.VolumeMounts) > 2, "Expected the original two mounts") + assert.Assert(t, cmp.MarshalMatches(container.VolumeMounts[2:], ` +- mountPath: /certs + name: exporter-certs +- mountPath: /web-config + name: web-config + `)) + + command := strings.Join(container.Command, "\n") + assert.Assert(t, cmp.Contains(command, "postgres_exporter")) + assert.Assert(t, cmp.Contains(command, "--web.config.file")) + + testExporterCollectorsAnnotation(t, ctx, cluster, exporterQueriesConfig, testConfigMap) }) } @@ -292,6 +338,10 @@ func TestAddPGMonitorExporterToInstancePodSpec(t *testing.T) { // reacts when the kubernetes resources are in different states (e.g., checks // what happens when the database pod is terminating) func TestReconcilePGMonitorExporterSetupErrors(t *testing.T) { + if os.Getenv("QUERIES_CONFIG_DIR") == "" { + t.Skip("QUERIES_CONFIG_DIR must be set") + } + for _, test := range []struct { name string podExecCalled bool @@ -446,8 +496,8 @@ func TestReconcilePGMonitorExporterSetupErrors(t *testing.T) { ctx := context.Background() var called bool reconciler := &Reconciler{ - PodExec: func(namespace, pod, container string, stdin io.Reader, stdout, - stderr io.Writer, command ...string) error { + PodExec: func(ctx context.Context, namespace, pod, container string, stdin io.Reader, + stdout, stderr io.Writer, command ...string) error { called = true return nil }, @@ -470,8 +520,8 @@ func TestReconcilePGMonitorExporter(t *testing.T) { ctx := context.Background() var called bool reconciler := &Reconciler{ - PodExec: func(namespace, pod, container string, stdin io.Reader, stdout, - stderr io.Writer, command ...string) error { + PodExec: func(ctx context.Context, namespace, pod, container string, stdin io.Reader, + stdout, stderr io.Writer, command ...string) error { called = true return nil }, @@ -516,6 +566,10 @@ func TestReconcilePGMonitorExporter(t *testing.T) { // when it should be. Because the status updated when we update the setup sql from // pgmonitor (by using podExec), we check if podExec is called when a change is needed. func TestReconcilePGMonitorExporterStatus(t *testing.T) { + if os.Getenv("QUERIES_CONFIG_DIR") == "" { + t.Skip("QUERIES_CONFIG_DIR must be set") + } + for _, test := range []struct { name string exporterEnabled bool @@ -547,8 +601,8 @@ func TestReconcilePGMonitorExporterStatus(t *testing.T) { exporterEnabled: true, podExecCalled: false, // Status was generated manually for this test case - // TODO jmckulk: add code to generate status - status: v1beta1.MonitoringStatus{ExporterConfiguration: "66c45b8cfd"}, + // TODO (jmckulk): add code to generate status + status: v1beta1.MonitoringStatus{ExporterConfiguration: "6d874c58df"}, statusChangedAfterReconcile: false, }} { t.Run(test.name, func(t *testing.T) { @@ -560,8 +614,8 @@ func TestReconcilePGMonitorExporterStatus(t *testing.T) { // Create reconciler with mock PodExec function reconciler := &Reconciler{ - PodExec: func(namespace, pod, container string, stdin io.Reader, stdout, - stderr io.Writer, command ...string) error { + PodExec: func(ctx context.Context, namespace, pod, container string, stdin io.Reader, + stdout, stderr io.Writer, command ...string) error { called = true return nil }, @@ -638,7 +692,7 @@ func TestReconcilePGMonitorExporterStatus(t *testing.T) { // is correct. If exporter is enabled, the return shouldn't be nil. If the exporter is disabled, the // return should be nil. func TestReconcileMonitoringSecret(t *testing.T) { - // TODO jmckulk: debug test with existing cluster + // TODO (jmckulk): debug test with existing cluster // Seems to be an issue when running with other tests if strings.EqualFold(os.Getenv("USE_EXISTING_CLUSTER"), "true") { t.Skip("Test failing with existing cluster") @@ -714,131 +768,6 @@ func TestReconcileMonitoringSecret(t *testing.T) { }) } -// TestConfigureExporterTLS checks that tls settings are configured on a podTemplate. -// When exporter is enabled with custom tls configureExporterTLS should add volumes, -// volumeMounts, and a flag to the Command. Ensure that existing template configurations -// are still present. -func TestConfigureExporterTLS(t *testing.T) { - // Define an existing template with values that could be overwritten - baseTemplate := &corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Containers: []corev1.Container{{ - Name: naming.ContainerPGMonitorExporter, - Command: pgmonitor.ExporterStartCommand([]string{ - pgmonitor.ExporterExtendQueryPathFlag, pgmonitor.ExporterWebListenAddressFlag, - }), - VolumeMounts: []corev1.VolumeMount{{ - Name: "existing-volume", - MountPath: "some-path", - }}, - }}, - Volumes: []corev1.Volume{{ - Name: "existing-volume", - VolumeSource: corev1.VolumeSource{ - EmptyDir: &corev1.EmptyDirVolumeSource{}, - }, - }}, - }, - } - - t.Run("Exporter disabled", func(t *testing.T) { - cluster := &v1beta1.PostgresCluster{} - template := baseTemplate.DeepCopy() - configureExporterTLS(cluster, template, nil) - // Template shouldn't have changed - assert.DeepEqual(t, template, baseTemplate) - }) - - t.Run("Exporter enabled no tls", func(t *testing.T) { - cluster := &v1beta1.PostgresCluster{ - Spec: v1beta1.PostgresClusterSpec{ - Monitoring: &v1beta1.MonitoringSpec{ - PGMonitor: &v1beta1.PGMonitorSpec{ - Exporter: &v1beta1.ExporterSpec{}, - }, - }, - }, - } - template := baseTemplate.DeepCopy() - configureExporterTLS(cluster, template, nil) - // Template shouldn't have changed - assert.DeepEqual(t, template, baseTemplate) - }) - - t.Run("Custom TLS provided", func(t *testing.T) { - cluster := &v1beta1.PostgresCluster{ - ObjectMeta: metav1.ObjectMeta{Name: "test"}, - Spec: v1beta1.PostgresClusterSpec{ - Monitoring: &v1beta1.MonitoringSpec{ - PGMonitor: &v1beta1.PGMonitorSpec{ - Exporter: &v1beta1.ExporterSpec{ - CustomTLSSecret: &corev1.SecretProjection{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: "custom-exporter-certs", - }, - }, - }, - }, - }, - }, - } - template := baseTemplate.DeepCopy() - - testConfigMap := &corev1.ConfigMap{ - ObjectMeta: naming.ExporterWebConfigMap(cluster), - } - - // What happens if the template already includes volumes/Mounts and envs? - configureExporterTLS(cluster, template, testConfigMap) - - // Did we configure the cert volume and the web config volume while leaving - // existing volumes in place? - assert.Assert(t, marshalMatches(template.Spec.Volumes, ` -- emptyDir: {} - name: existing-volume -- name: exporter-certs - projected: - sources: - - secret: - name: custom-exporter-certs -- configMap: - name: test-exporter-web-config - name: web-config - `), "Volumes are not what they should be.") - - // Is the exporter container in position 0? - assert.Assert(t, template.Spec.Containers[0].Name == naming.ContainerPGMonitorExporter, - "Exporter container is not in the zeroth position.") - - // Did we configure the volume mounts on the container while leaving existing - // mounts in place? - assert.Assert(t, marshalMatches(template.Spec.Containers[0].VolumeMounts, ` -- mountPath: some-path - name: existing-volume -- mountPath: /certs - name: exporter-certs -- mountPath: /web-config - name: web-config - `), "Volume mounts are not what they should be.") - - // Did we add the "--web.config.file" flag to the command while leaving the - // rest intact? - assert.DeepEqual(t, template.Spec.Containers[0].Command[:3], []string{"bash", "-ceu", "--"}) - assert.Assert(t, len(template.Spec.Containers[0].Command) > 3, "Command does not have enough arguments.") - - commandStringsFound := make(map[string]bool) - for _, elem := range template.Spec.Containers[0].Command { - commandStringsFound[elem] = true - } - assert.Assert(t, commandStringsFound[pgmonitor.ExporterExtendQueryPathFlag], - "Command string does not contain the --extend.query-path flag.") - assert.Assert(t, commandStringsFound[pgmonitor.ExporterWebListenAddressFlag], - "Command string does not contain the --web.listen-address flag.") - assert.Assert(t, commandStringsFound[pgmonitor.ExporterWebConfigFileFlag], - "Command string does not contain the --web.config.file flag.") - }) -} - // TestReconcileExporterQueriesConfig checks that the ConfigMap intent returned by // reconcileExporterQueriesConfig is correct. If exporter is enabled, the return // shouldn't be nil. If the exporter is disabled, the return should be nil. diff --git a/internal/controller/postgrescluster/pki.go b/internal/controller/postgrescluster/pki.go index c18a3a3e1f..0314ad4406 100644 --- a/internal/controller/postgrescluster/pki.go +++ b/internal/controller/postgrescluster/pki.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster diff --git a/internal/controller/postgrescluster/pki_test.go b/internal/controller/postgrescluster/pki_test.go index cb2ad030f2..c2fe7af82a 100644 --- a/internal/controller/postgrescluster/pki_test.go +++ b/internal/controller/postgrescluster/pki_test.go @@ -1,20 +1,6 @@ -//go:build envtest -// +build envtest - -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster diff --git a/internal/controller/postgrescluster/pod_disruption_budget.go b/internal/controller/postgrescluster/pod_disruption_budget.go index 5bf2331671..4bff4a9743 100644 --- a/internal/controller/postgrescluster/pod_disruption_budget.go +++ b/internal/controller/postgrescluster/pod_disruption_budget.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster @@ -75,5 +64,5 @@ func getMinAvailable( } // If more than one replica is not defined, we will default to '0' - return initialize.IntOrStringInt32(expect) + return initialize.Pointer(intstr.FromInt32(expect)) } diff --git a/internal/controller/postgrescluster/pod_disruption_budget_test.go b/internal/controller/postgrescluster/pod_disruption_budget_test.go index 08136c25e3..55e2bb63c6 100644 --- a/internal/controller/postgrescluster/pod_disruption_budget_test.go +++ b/internal/controller/postgrescluster/pod_disruption_budget_test.go @@ -1,20 +1,6 @@ -//go:build envtest -// +build envtest - -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster @@ -64,7 +50,7 @@ func TestGeneratePodDisruptionBudget(t *testing.T) { "anno-key": "anno-value", }, } - minAvailable = initialize.IntOrStringInt32(1) + minAvailable = initialize.Pointer(intstr.FromInt32(1)) selector := metav1.LabelSelector{ MatchLabels: map[string]string{ "key": "value", @@ -92,19 +78,19 @@ func TestGeneratePodDisruptionBudget(t *testing.T) { func TestGetMinAvailable(t *testing.T) { t.Run("minAvailable provided", func(t *testing.T) { // minAvailable is defined so use that value - ma := initialize.IntOrStringInt32(0) + ma := initialize.Pointer(intstr.FromInt32(0)) expect := getMinAvailable(ma, 1) assert.Equal(t, *expect, intstr.FromInt(0)) - ma = initialize.IntOrStringInt32(1) + ma = initialize.Pointer(intstr.FromInt32(1)) expect = getMinAvailable(ma, 2) assert.Equal(t, *expect, intstr.FromInt(1)) - ma = initialize.IntOrStringString("50%") + ma = initialize.Pointer(intstr.FromString("50%")) expect = getMinAvailable(ma, 3) assert.Equal(t, *expect, intstr.FromString("50%")) - ma = initialize.IntOrStringString("200%") + ma = initialize.Pointer(intstr.FromString("200%")) expect = getMinAvailable(ma, 2147483647) assert.Equal(t, *expect, intstr.FromString("200%")) }) diff --git a/internal/controller/postgrescluster/postgres.go b/internal/controller/postgrescluster/postgres.go index 7e3de0d881..312079d824 100644 --- a/internal/controller/postgrescluster/postgres.go +++ b/internal/controller/postgrescluster/postgres.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster @@ -23,16 +12,19 @@ import ( "net" "net/url" "regexp" + "sort" "strings" "github.com/pkg/errors" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/validation/field" "sigs.k8s.io/controller-runtime/pkg/client" + "github.com/crunchydata/postgres-operator/internal/feature" "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/logging" "github.com/crunchydata/postgres-operator/internal/naming" @@ -53,7 +45,7 @@ func (r *Reconciler) generatePostgresUserSecret( username := string(spec.Name) intent := &corev1.Secret{ObjectMeta: naming.PostgresUserSecret(cluster, username)} intent.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("Secret")) - initialize.ByteMap(&intent.Data) + initialize.Map(&intent.Data) // Populate the Secret with libpq keywords for connecting through // the primary Service. @@ -197,14 +189,14 @@ func (r *Reconciler) reconcilePostgresDatabases( ctx = logging.NewContext(ctx, logging.FromContext(ctx).WithValues("pod", pod.Name)) podExecutor = func( - _ context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, + ctx context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, ) error { - return r.PodExec(pod.Namespace, pod.Name, container, stdin, stdout, stderr, command...) + return r.PodExec(ctx, pod.Namespace, pod.Name, container, stdin, stdout, stderr, command...) } // Gather the list of database that should exist in PostgreSQL. - databases := sets.String{} + databases := sets.Set[string]{} if cluster.Spec.Users == nil { // Users are unspecified; create one database matching the cluster name // if it is also a valid database name. @@ -229,8 +221,6 @@ func (r *Reconciler) reconcilePostgresDatabases( } } - // Calculate a hash of the SQL that should be executed in PostgreSQL. - var pgAuditOK, postgisInstallOK bool create := func(ctx context.Context, exec postgres.Executor) error { if pgAuditOK = pgaudit.EnableInPostgreSQL(ctx, exec) == nil; !pgAuditOK { @@ -254,9 +244,10 @@ func (r *Reconciler) reconcilePostgresDatabases( "Unable to install PostGIS") } - return postgres.CreateDatabasesInPostgreSQL(ctx, exec, databases.List()) + return postgres.CreateDatabasesInPostgreSQL(ctx, exec, sets.List(databases)) } + // Calculate a hash of the SQL that should be executed in PostgreSQL. revision, err := safeHash32(func(hasher io.Writer) error { // Discard log messages about executing SQL. return create(logging.NewContext(ctx, logging.Discard()), func( @@ -297,6 +288,8 @@ func (r *Reconciler) reconcilePostgresDatabases( func (r *Reconciler) reconcilePostgresUsers( ctx context.Context, cluster *v1beta1.PostgresCluster, instances *observedInstances, ) error { + r.validatePostgresUsers(cluster) + users, secrets, err := r.reconcilePostgresUserSecrets(ctx, cluster) if err == nil { err = r.reconcilePostgresUsersInPostgreSQL(ctx, cluster, instances, users, secrets) @@ -311,6 +304,40 @@ func (r *Reconciler) reconcilePostgresUsers( return err } +// validatePostgresUsers emits warnings when cluster.Spec.Users contains values +// that are no longer valid. NOTE(ratcheting) NOTE(validation) +// - https://docs.k8s.io/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#validation-ratcheting +func (r *Reconciler) validatePostgresUsers(cluster *v1beta1.PostgresCluster) { + if len(cluster.Spec.Users) == 0 { + return + } + + path := field.NewPath("spec", "users") + reComments := regexp.MustCompile(`(?:--|/[*]|[*]/)`) + rePassword := regexp.MustCompile(`(?i:PASSWORD)`) + + for i := range cluster.Spec.Users { + errs := field.ErrorList{} + spec := cluster.Spec.Users[i] + + if reComments.MatchString(spec.Options) { + errs = append(errs, + field.Invalid(path.Index(i).Child("options"), spec.Options, + "cannot contain comments")) + } + if rePassword.MatchString(spec.Options) { + errs = append(errs, + field.Invalid(path.Index(i).Child("options"), spec.Options, + "cannot assign password")) + } + + if len(errs) > 0 { + r.Recorder.Event(cluster, corev1.EventTypeWarning, "InvalidUser", + errs.ToAggregate().Error()) + } + } +} + // +kubebuilder:rbac:groups="",resources="secrets",verbs={list} // +kubebuilder:rbac:groups="",resources="secrets",verbs={create,delete,patch} @@ -375,6 +402,36 @@ func (r *Reconciler) reconcilePostgresUserSecrets( )) } + // Sorts the slice of secrets.Items based on secrets with identical labels + // If one secret has "pguser" in its name and the other does not, the + // one without "pguser" is moved to the front. + // If both secrets have "pguser" in their names or neither has "pguser", they + // are sorted by creation timestamp. + // If two secrets have the same creation timestamp, they are further sorted by name. + // The secret to be used by PGO is put at the end of the sorted slice. + sort.Slice(secrets.Items, func(i, j int) bool { + // Check if either secrets have "pguser" in their names + isIPgUser := strings.Contains(secrets.Items[i].Name, "pguser") + isJPgUser := strings.Contains(secrets.Items[j].Name, "pguser") + + // If one secret has "pguser" and the other does not, + // move the one without "pguser" to the front + if isIPgUser && !isJPgUser { + return false + } else if !isIPgUser && isJPgUser { + return true + } + + if secrets.Items[i].CreationTimestamp.Time.Equal(secrets.Items[j].CreationTimestamp.Time) { + // If the creation timestamps are equal, sort by name + return secrets.Items[i].Name < secrets.Items[j].Name + } + + // If both secrets have "pguser" or neither have "pguser", + // sort by creation timestamp + return secrets.Items[i].CreationTimestamp.Time.After(secrets.Items[j].CreationTimestamp.Time) + }) + // Index secrets by PostgreSQL user name and delete any that are not in the // cluster spec. Keep track of the deprecated default secret to migrate its // contents when the current secret doesn't exist. @@ -448,9 +505,9 @@ func (r *Reconciler) reconcilePostgresUsersInPostgreSQL( ctx = logging.NewContext(ctx, logging.FromContext(ctx).WithValues("pod", pod.Name)) podExecutor = func( - _ context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, + ctx context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, ) error { - return r.PodExec(pod.Namespace, pod.Name, container, stdin, stdout, stderr, command...) + return r.PodExec(ctx, pod.Namespace, pod.Name, container, stdin, stdout, stderr, command...) } break } @@ -467,7 +524,7 @@ func (r *Reconciler) reconcilePostgresUsersInPostgreSQL( } write := func(ctx context.Context, exec postgres.Executor) error { - return postgres.WriteUsersInPostgreSQL(ctx, exec, specUsers, verifiers) + return postgres.WriteUsersInPostgreSQL(ctx, cluster, exec, specUsers, verifiers) } revision, err := safeHash32(func(hasher io.Writer) error { @@ -512,7 +569,7 @@ func (r *Reconciler) reconcilePostgresUsersInPostgreSQL( func (r *Reconciler) reconcilePostgresDataVolume( ctx context.Context, cluster *v1beta1.PostgresCluster, instanceSpec *v1beta1.PostgresInstanceSetSpec, instance *appsv1.StatefulSet, - clusterVolumes []corev1.PersistentVolumeClaim, + clusterVolumes []corev1.PersistentVolumeClaim, sourceCluster *v1beta1.PostgresCluster, ) (*corev1.PersistentVolumeClaim, error) { labelMap := map[string]string{ @@ -553,6 +610,38 @@ func (r *Reconciler) reconcilePostgresDataVolume( pvc.Spec = instanceSpec.DataVolumeClaimSpec + // If a source cluster was provided and VolumeSnapshots are turned on in the source cluster and + // there is a VolumeSnapshot available for the source cluster that is ReadyToUse, use it as the + // source for the PVC. If there is an error when retrieving VolumeSnapshots, or no ReadyToUse + // snapshots were found, create a warning event, but continue creating PVC in the usual fashion. + if sourceCluster != nil && sourceCluster.Spec.Backups.Snapshots != nil && feature.Enabled(ctx, feature.VolumeSnapshots) { + snapshots, err := r.getSnapshotsForCluster(ctx, sourceCluster) + if err == nil { + snapshot := getLatestReadySnapshot(snapshots) + if snapshot != nil { + r.Recorder.Eventf(cluster, corev1.EventTypeNormal, "BootstrappingWithSnapshot", + "Snapshot found for %v; bootstrapping cluster with snapshot.", sourceCluster.Name) + pvc.Spec.DataSource = &corev1.TypedLocalObjectReference{ + APIGroup: initialize.String("snapshot.storage.k8s.io"), + Kind: snapshot.Kind, + Name: snapshot.Name, + } + } else { + r.Recorder.Eventf(cluster, corev1.EventTypeWarning, "SnapshotNotFound", + "No ReadyToUse snapshots were found for %v; proceeding with typical restore process.", sourceCluster.Name) + } + } else { + r.Recorder.Eventf(cluster, corev1.EventTypeWarning, "SnapshotNotFound", + "Could not get snapshots for %v, proceeding with typical restore process.", sourceCluster.Name) + } + } + + r.setVolumeSize(ctx, cluster, pvc, instanceSpec.Name) + + // Clear any set limit before applying PVC. This is needed to allow the limit + // value to change later. + pvc.Spec.Resources.Limits = nil + if err == nil { err = r.handlePersistentVolumeClaimError(cluster, errors.WithStack(r.apply(ctx, pvc))) @@ -561,6 +650,75 @@ func (r *Reconciler) reconcilePostgresDataVolume( return pvc, err } +// setVolumeSize compares the potential sizes from the instance spec, status +// and limit and sets the appropriate current value. +func (r *Reconciler) setVolumeSize(ctx context.Context, cluster *v1beta1.PostgresCluster, + pvc *corev1.PersistentVolumeClaim, instanceSpecName string) { + log := logging.FromContext(ctx) + + // Store the limit for this instance set. This value will not change below. + volumeLimitFromSpec := pvc.Spec.Resources.Limits.Storage() + + // Capture the largest pgData volume size currently defined for a given instance set. + // This value will capture our desired update. + volumeRequestSize := pvc.Spec.Resources.Requests.Storage() + + // If the request value is greater than the set limit, use the limit and issue + // a warning event. A limit of 0 is ignorned. + if !volumeLimitFromSpec.IsZero() && + volumeRequestSize.Value() > volumeLimitFromSpec.Value() { + r.Recorder.Eventf(cluster, corev1.EventTypeWarning, "VolumeRequestOverLimit", + "pgData volume request (%v) for %s/%s is greater than set limit (%v). Limit value will be used.", + volumeRequestSize, cluster.Name, instanceSpecName, volumeLimitFromSpec) + + pvc.Spec.Resources.Requests = corev1.ResourceList{ + corev1.ResourceStorage: *resource.NewQuantity(volumeLimitFromSpec.Value(), resource.BinarySI), + } + // Otherwise, if the limit is not set or the feature gate is not enabled, do not autogrow. + } else if !volumeLimitFromSpec.IsZero() && feature.Enabled(ctx, feature.AutoGrowVolumes) { + for i := range cluster.Status.InstanceSets { + if instanceSpecName == cluster.Status.InstanceSets[i].Name { + for _, dpv := range cluster.Status.InstanceSets[i].DesiredPGDataVolume { + if dpv != "" { + desiredRequest, err := resource.ParseQuantity(dpv) + if err == nil { + if desiredRequest.Value() > volumeRequestSize.Value() { + volumeRequestSize = &desiredRequest + } + } else { + log.Error(err, "Unable to parse volume request: "+dpv) + } + } + } + } + } + + // If the volume request size is greater than or equal to the limit and the + // limit is not zero, update the request size to the limit value. + // If the user manually requests a lower limit that is smaller than the current + // or requested volume size, it will be ignored in favor of the limit value. + if volumeRequestSize.Value() >= volumeLimitFromSpec.Value() { + + r.Recorder.Eventf(cluster, corev1.EventTypeNormal, "VolumeLimitReached", + "pgData volume(s) for %s/%s are at size limit (%v).", cluster.Name, + instanceSpecName, volumeLimitFromSpec) + + // If the volume size request is greater than the limit, issue an + // additional event warning. + if volumeRequestSize.Value() > volumeLimitFromSpec.Value() { + r.Recorder.Eventf(cluster, corev1.EventTypeWarning, "DesiredVolumeAboveLimit", + "The desired size (%v) for the %s/%s pgData volume(s) is greater than the size limit (%v).", + volumeRequestSize, cluster.Name, instanceSpecName, volumeLimitFromSpec) + } + + volumeRequestSize = volumeLimitFromSpec + } + pvc.Spec.Resources.Requests = corev1.ResourceList{ + corev1.ResourceStorage: *resource.NewQuantity(volumeRequestSize.Value(), resource.BinarySI), + } + } +} + // +kubebuilder:rbac:groups="",resources="persistentvolumeclaims",verbs={create,patch} // reconcileTablespaceVolumes writes the PersistentVolumeClaims for instance's @@ -571,7 +729,7 @@ func (r *Reconciler) reconcileTablespaceVolumes( clusterVolumes []corev1.PersistentVolumeClaim, ) (tablespaceVolumes []*corev1.PersistentVolumeClaim, err error) { - if !util.DefaultMutableFeatureGate.Enabled(util.TablespaceVolumes) { + if !feature.Enabled(ctx, feature.TablespaceVolumes) { return } @@ -698,7 +856,7 @@ func (r *Reconciler) reconcilePostgresWALVolume( // This assumes that $PGDATA matches the configured PostgreSQL "data_directory". var stdout bytes.Buffer err = errors.WithStack(r.PodExec( - observed.Pods[0].Namespace, observed.Pods[0].Name, naming.ContainerDatabase, + ctx, observed.Pods[0].Namespace, observed.Pods[0].Name, naming.ContainerDatabase, nil, &stdout, nil, "bash", "-ceu", "--", `exec realpath "${PGDATA}/pg_wal"`)) walDirectory = strings.TrimRight(stdout.String(), "\n") @@ -802,9 +960,9 @@ func (r *Reconciler) reconcileDatabaseInitSQL(ctx context.Context, } podExecutor = func( - _ context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, + ctx context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, ) error { - return r.PodExec(pod.Namespace, pod.Name, naming.ContainerDatabase, stdin, stdout, stderr, command...) + return r.PodExec(ctx, pod.Namespace, pod.Name, naming.ContainerDatabase, stdin, stdout, stderr, command...) } // A writable pod executor has been found and we have the sql provided by diff --git a/internal/controller/postgrescluster/postgres_test.go b/internal/controller/postgrescluster/postgres_test.go index 1dfe00a187..0780b0f577 100644 --- a/internal/controller/postgrescluster/postgres_test.go +++ b/internal/controller/postgrescluster/postgres_test.go @@ -1,42 +1,36 @@ -//go:build envtest -// +build envtest - -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster import ( "context" + "fmt" "io" "testing" + "github.com/go-logr/logr/funcr" "github.com/google/go-cmp/cmp/cmpopts" + volumesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" "github.com/pkg/errors" "gotest.tools/v3/assert" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/yaml" + "github.com/crunchydata/postgres-operator/internal/controller/runtime" + "github.com/crunchydata/postgres-operator/internal/feature" "github.com/crunchydata/postgres-operator/internal/initialize" + "github.com/crunchydata/postgres-operator/internal/logging" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/internal/postgres" "github.com/crunchydata/postgres-operator/internal/testing/cmp" + "github.com/crunchydata/postgres-operator/internal/testing/events" "github.com/crunchydata/postgres-operator/internal/testing/require" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -247,26 +241,116 @@ func TestReconcilePostgresVolumes(t *testing.T) { Owner: client.FieldOwner(t.Name()), } - cluster := testCluster() - cluster.Namespace = setupNamespace(t, tClient).Name + t.Run("DataVolumeNoSourceCluster", func(t *testing.T) { + cluster := testCluster() + ns := setupNamespace(t, tClient) + cluster.Namespace = ns.Name - assert.NilError(t, tClient.Create(ctx, cluster)) - t.Cleanup(func() { assert.Check(t, tClient.Delete(ctx, cluster)) }) + assert.NilError(t, tClient.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, tClient.Delete(ctx, cluster)) }) - spec := &v1beta1.PostgresInstanceSetSpec{} - assert.NilError(t, yaml.Unmarshal([]byte(`{ - name: "some-instance", - dataVolumeClaimSpec: { - accessModes: [ReadWriteOnce], - resources: { requests: { storage: 1Gi } }, - storageClassName: "storage-class-for-data", - }, - }`), spec)) + spec := &v1beta1.PostgresInstanceSetSpec{} + assert.NilError(t, yaml.Unmarshal([]byte(`{ + name: "some-instance", + dataVolumeClaimSpec: { + accessModes: [ReadWriteOnce], + resources: { requests: { storage: 1Gi } }, + storageClassName: "storage-class-for-data", + }, + }`), spec)) + instance := &appsv1.StatefulSet{ObjectMeta: naming.GenerateInstance(cluster, spec)} + + pvc, err := reconciler.reconcilePostgresDataVolume(ctx, cluster, spec, instance, nil, nil) + assert.NilError(t, err) + + assert.Assert(t, metav1.IsControlledBy(pvc, cluster)) + + assert.Equal(t, pvc.Labels[naming.LabelCluster], cluster.Name) + assert.Equal(t, pvc.Labels[naming.LabelInstance], instance.Name) + assert.Equal(t, pvc.Labels[naming.LabelInstanceSet], spec.Name) + assert.Equal(t, pvc.Labels[naming.LabelRole], "pgdata") + + assert.Assert(t, cmp.MarshalMatches(pvc.Spec, ` +accessModes: +- ReadWriteOnce +resources: + requests: + storage: 1Gi +storageClassName: storage-class-for-data +volumeMode: Filesystem + `)) + }) + + t.Run("DataVolumeSourceClusterWithGoodSnapshot", func(t *testing.T) { + cluster := testCluster() + ns := setupNamespace(t, tClient) + cluster.Namespace = ns.Name - instance := &appsv1.StatefulSet{ObjectMeta: naming.GenerateInstance(cluster, spec)} + assert.NilError(t, tClient.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, tClient.Delete(ctx, cluster)) }) - t.Run("DataVolume", func(t *testing.T) { - pvc, err := reconciler.reconcilePostgresDataVolume(ctx, cluster, spec, instance, nil) + spec := &v1beta1.PostgresInstanceSetSpec{} + assert.NilError(t, yaml.Unmarshal([]byte(`{ + name: "some-instance", + dataVolumeClaimSpec: { + accessModes: [ReadWriteOnce], + resources: { requests: { storage: 1Gi } }, + storageClassName: "storage-class-for-data", + }, + }`), spec)) + instance := &appsv1.StatefulSet{ObjectMeta: naming.GenerateInstance(cluster, spec)} + + recorder := events.NewRecorder(t, runtime.Scheme) + reconciler.Recorder = recorder + + // Turn on VolumeSnapshots feature gate + gate := feature.NewGate() + assert.NilError(t, gate.SetFromMap(map[string]bool{ + feature.VolumeSnapshots: true, + })) + ctx := feature.NewContext(ctx, gate) + + // Create source cluster and enable snapshots + sourceCluster := testCluster() + sourceCluster.Namespace = ns.Name + sourceCluster.Name = "rhino" + sourceCluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ + VolumeSnapshotClassName: "some-class-name", + } + + // Create a snapshot + snapshot := &volumesnapshotv1.VolumeSnapshot{ + TypeMeta: metav1.TypeMeta{ + APIVersion: volumesnapshotv1.SchemeGroupVersion.String(), + Kind: "VolumeSnapshot", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "some-snapshot", + Namespace: ns.Name, + Labels: map[string]string{ + naming.LabelCluster: "rhino", + }, + }, + } + snapshot.Spec.Source.PersistentVolumeClaimName = initialize.String("some-pvc-name") + snapshot.Spec.VolumeSnapshotClassName = initialize.String("some-class-name") + err := reconciler.apply(ctx, snapshot) + assert.NilError(t, err) + + // Get snapshot and update Status.ReadyToUse and CreationTime + err = reconciler.Client.Get(ctx, client.ObjectKeyFromObject(snapshot), snapshot) + assert.NilError(t, err) + + currentTime := metav1.Now() + snapshot.Status = &volumesnapshotv1.VolumeSnapshotStatus{ + ReadyToUse: initialize.Bool(true), + CreationTime: ¤tTime, + } + err = reconciler.Client.Status().Update(ctx, snapshot) + assert.NilError(t, err) + + // Reconcile volume + pvc, err := reconciler.reconcilePostgresDataVolume(ctx, cluster, spec, instance, nil, sourceCluster) assert.NilError(t, err) assert.Assert(t, metav1.IsControlledBy(pvc, cluster)) @@ -276,18 +360,111 @@ func TestReconcilePostgresVolumes(t *testing.T) { assert.Equal(t, pvc.Labels[naming.LabelInstanceSet], spec.Name) assert.Equal(t, pvc.Labels[naming.LabelRole], "pgdata") - assert.Assert(t, marshalMatches(pvc.Spec, ` + assert.Assert(t, cmp.MarshalMatches(pvc.Spec, ` accessModes: - ReadWriteOnce +dataSource: + apiGroup: snapshot.storage.k8s.io + kind: VolumeSnapshot + name: some-snapshot +dataSourceRef: + apiGroup: snapshot.storage.k8s.io + kind: VolumeSnapshot + name: some-snapshot resources: requests: storage: 1Gi storageClassName: storage-class-for-data volumeMode: Filesystem `)) + assert.Equal(t, len(recorder.Events), 1) + assert.Equal(t, recorder.Events[0].Regarding.Name, cluster.Name) + assert.Equal(t, recorder.Events[0].Reason, "BootstrappingWithSnapshot") + assert.Equal(t, recorder.Events[0].Note, "Snapshot found for rhino; bootstrapping cluster with snapshot.") + }) + + t.Run("DataVolumeSourceClusterSnapshotsEnabledNoSnapshots", func(t *testing.T) { + cluster := testCluster() + ns := setupNamespace(t, tClient) + cluster.Namespace = ns.Name + + assert.NilError(t, tClient.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, tClient.Delete(ctx, cluster)) }) + + spec := &v1beta1.PostgresInstanceSetSpec{} + assert.NilError(t, yaml.Unmarshal([]byte(`{ + name: "some-instance", + dataVolumeClaimSpec: { + accessModes: [ReadWriteOnce], + resources: { requests: { storage: 1Gi } }, + storageClassName: "storage-class-for-data", + }, + }`), spec)) + instance := &appsv1.StatefulSet{ObjectMeta: naming.GenerateInstance(cluster, spec)} + + recorder := events.NewRecorder(t, runtime.Scheme) + reconciler.Recorder = recorder + + // Turn on VolumeSnapshots feature gate + gate := feature.NewGate() + assert.NilError(t, gate.SetFromMap(map[string]bool{ + feature.VolumeSnapshots: true, + })) + ctx := feature.NewContext(ctx, gate) + + // Create source cluster and enable snapshots + sourceCluster := testCluster() + sourceCluster.Namespace = ns.Name + sourceCluster.Name = "rhino" + sourceCluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ + VolumeSnapshotClassName: "some-class-name", + } + + // Reconcile volume + pvc, err := reconciler.reconcilePostgresDataVolume(ctx, cluster, spec, instance, nil, sourceCluster) + assert.NilError(t, err) + + assert.Assert(t, metav1.IsControlledBy(pvc, cluster)) + + assert.Equal(t, pvc.Labels[naming.LabelCluster], cluster.Name) + assert.Equal(t, pvc.Labels[naming.LabelInstance], instance.Name) + assert.Equal(t, pvc.Labels[naming.LabelInstanceSet], spec.Name) + assert.Equal(t, pvc.Labels[naming.LabelRole], "pgdata") + + assert.Assert(t, cmp.MarshalMatches(pvc.Spec, ` +accessModes: +- ReadWriteOnce +resources: + requests: + storage: 1Gi +storageClassName: storage-class-for-data +volumeMode: Filesystem + `)) + assert.Equal(t, len(recorder.Events), 1) + assert.Equal(t, recorder.Events[0].Regarding.Name, cluster.Name) + assert.Equal(t, recorder.Events[0].Reason, "SnapshotNotFound") + assert.Equal(t, recorder.Events[0].Note, "No ReadyToUse snapshots were found for rhino; proceeding with typical restore process.") }) t.Run("WALVolume", func(t *testing.T) { + cluster := testCluster() + ns := setupNamespace(t, tClient) + cluster.Namespace = ns.Name + + assert.NilError(t, tClient.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, tClient.Delete(ctx, cluster)) }) + + spec := &v1beta1.PostgresInstanceSetSpec{} + assert.NilError(t, yaml.Unmarshal([]byte(`{ + name: "some-instance", + dataVolumeClaimSpec: { + accessModes: [ReadWriteOnce], + resources: { requests: { storage: 1Gi } }, + storageClassName: "storage-class-for-data", + }, + }`), spec)) + instance := &appsv1.StatefulSet{ObjectMeta: naming.GenerateInstance(cluster, spec)} + observed := &Instance{} t.Run("None", func(t *testing.T) { @@ -316,7 +493,7 @@ volumeMode: Filesystem assert.Equal(t, pvc.Labels[naming.LabelInstanceSet], spec.Name) assert.Equal(t, pvc.Labels[naming.LabelRole], "pgwal") - assert.Assert(t, marshalMatches(pvc.Spec, ` + assert.Assert(t, cmp.MarshalMatches(pvc.Spec, ` accessModes: - ReadWriteMany resources: @@ -355,7 +532,7 @@ volumeMode: Filesystem expected := errors.New("flop") reconciler.PodExec = func( - namespace, pod, container string, + ctx context.Context, namespace, pod, container string, _ io.Reader, _, _ io.Writer, command ...string, ) error { assert.Equal(t, namespace, "pod-ns") @@ -372,7 +549,7 @@ volumeMode: Filesystem // Files are in the wrong place; expect no changes to the PVC. reconciler.PodExec = func( - _, _, _ string, _ io.Reader, stdout, _ io.Writer, _ ...string, + ctx context.Context, _, _, _ string, _ io.Reader, stdout, _ io.Writer, _ ...string, ) error { assert.Assert(t, stdout != nil) _, err := stdout.Write([]byte("some-place\n")) @@ -395,7 +572,7 @@ volumeMode: Filesystem new(corev1.ContainerStateRunning) reconciler.PodExec = func( - _, _, _ string, _ io.Reader, stdout, _ io.Writer, _ ...string, + ctx context.Context, _, _, _ string, _ io.Reader, stdout, _ io.Writer, _ ...string, ) error { assert.Assert(t, stdout != nil) _, err := stdout.Write([]byte(postgres.WALDirectory(cluster, spec) + "\n")) @@ -425,6 +602,318 @@ volumeMode: Filesystem }) } +func TestSetVolumeSize(t *testing.T) { + t.Parallel() + + ctx := context.Background() + cluster := v1beta1.PostgresCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "elephant", + Namespace: "test-namespace", + }, + Spec: v1beta1.PostgresClusterSpec{ + InstanceSets: []v1beta1.PostgresInstanceSetSpec{{ + Name: "some-instance", + Replicas: initialize.Int32(1), + }}, + }, + } + + instance := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "elephant-some-instance-wxyz-0", + Namespace: cluster.Namespace, + }} + + setupLogCapture := func(ctx context.Context) (context.Context, *[]string) { + calls := []string{} + testlog := funcr.NewJSON(func(object string) { + calls = append(calls, object) + }, funcr.Options{ + Verbosity: 1, + }) + return logging.NewContext(ctx, testlog), &calls + } + + // helper functions + instanceSetSpec := func(request, limit string) *v1beta1.PostgresInstanceSetSpec { + return &v1beta1.PostgresInstanceSetSpec{ + Name: "some-instance", + DataVolumeClaimSpec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + Resources: corev1.VolumeResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceStorage: resource.MustParse(request), + }, + Limits: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceStorage: resource.MustParse(limit), + }}}} + } + + desiredStatus := func(request string) v1beta1.PostgresClusterStatus { + desiredMap := make(map[string]string) + desiredMap["elephant-some-instance-wxyz-0"] = request + return v1beta1.PostgresClusterStatus{ + InstanceSets: []v1beta1.PostgresInstanceSetStatus{{ + Name: "some-instance", + DesiredPGDataVolume: desiredMap, + }}} + } + + t.Run("RequestAboveLimit", func(t *testing.T) { + recorder := events.NewRecorder(t, runtime.Scheme) + reconciler := &Reconciler{Recorder: recorder} + ctx, logs := setupLogCapture(ctx) + + pvc := &corev1.PersistentVolumeClaim{ObjectMeta: naming.InstancePostgresDataVolume(instance)} + spec := instanceSetSpec("4Gi", "3Gi") + pvc.Spec = spec.DataVolumeClaimSpec + + reconciler.setVolumeSize(ctx, &cluster, pvc, spec.Name) + + assert.Assert(t, cmp.MarshalMatches(pvc.Spec, ` +accessModes: +- ReadWriteOnce +resources: + limits: + storage: 3Gi + requests: + storage: 3Gi +`)) + assert.Equal(t, len(*logs), 0) + assert.Equal(t, len(recorder.Events), 1) + assert.Equal(t, recorder.Events[0].Regarding.Name, cluster.Name) + assert.Equal(t, recorder.Events[0].Reason, "VolumeRequestOverLimit") + assert.Equal(t, recorder.Events[0].Note, "pgData volume request (4Gi) for elephant/some-instance is greater than set limit (3Gi). Limit value will be used.") + }) + + t.Run("NoFeatureGate", func(t *testing.T) { + recorder := events.NewRecorder(t, runtime.Scheme) + reconciler := &Reconciler{Recorder: recorder} + ctx, logs := setupLogCapture(ctx) + + pvc := &corev1.PersistentVolumeClaim{ObjectMeta: naming.InstancePostgresDataVolume(instance)} + spec := instanceSetSpec("1Gi", "3Gi") + + desiredMap := make(map[string]string) + desiredMap["elephant-some-instance-wxyz-0"] = "2Gi" + cluster.Status = v1beta1.PostgresClusterStatus{ + InstanceSets: []v1beta1.PostgresInstanceSetStatus{{ + Name: "some-instance", + DesiredPGDataVolume: desiredMap, + }}, + } + + pvc.Spec = spec.DataVolumeClaimSpec + + reconciler.setVolumeSize(ctx, &cluster, pvc, spec.Name) + + assert.Assert(t, cmp.MarshalMatches(pvc.Spec, ` +accessModes: +- ReadWriteOnce +resources: + limits: + storage: 3Gi + requests: + storage: 1Gi + `)) + + assert.Equal(t, len(recorder.Events), 0) + assert.Equal(t, len(*logs), 0) + + // clear status for other tests + cluster.Status = v1beta1.PostgresClusterStatus{} + }) + + t.Run("FeatureEnabled", func(t *testing.T) { + gate := feature.NewGate() + assert.NilError(t, gate.SetFromMap(map[string]bool{ + feature.AutoGrowVolumes: true, + })) + ctx := feature.NewContext(ctx, gate) + + t.Run("StatusNoLimit", func(t *testing.T) { + recorder := events.NewRecorder(t, runtime.Scheme) + reconciler := &Reconciler{Recorder: recorder} + ctx, logs := setupLogCapture(ctx) + + pvc := &corev1.PersistentVolumeClaim{ObjectMeta: naming.InstancePostgresDataVolume(instance)} + spec := &v1beta1.PostgresInstanceSetSpec{ + Name: "some-instance", + DataVolumeClaimSpec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + Resources: corev1.VolumeResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceStorage: resource.MustParse("1Gi"), + }}}} + cluster.Status = desiredStatus("2Gi") + pvc.Spec = spec.DataVolumeClaimSpec + + reconciler.setVolumeSize(ctx, &cluster, pvc, spec.Name) + + assert.Assert(t, cmp.MarshalMatches(pvc.Spec, ` +accessModes: +- ReadWriteOnce +resources: + requests: + storage: 1Gi +`)) + assert.Equal(t, len(recorder.Events), 0) + assert.Equal(t, len(*logs), 0) + + // clear status for other tests + cluster.Status = v1beta1.PostgresClusterStatus{} + }) + + t.Run("LimitNoStatus", func(t *testing.T) { + recorder := events.NewRecorder(t, runtime.Scheme) + reconciler := &Reconciler{Recorder: recorder} + ctx, logs := setupLogCapture(ctx) + + pvc := &corev1.PersistentVolumeClaim{ObjectMeta: naming.InstancePostgresDataVolume(instance)} + spec := instanceSetSpec("1Gi", "2Gi") + pvc.Spec = spec.DataVolumeClaimSpec + + reconciler.setVolumeSize(ctx, &cluster, pvc, spec.Name) + + assert.Assert(t, cmp.MarshalMatches(pvc.Spec, ` +accessModes: +- ReadWriteOnce +resources: + limits: + storage: 2Gi + requests: + storage: 1Gi +`)) + assert.Equal(t, len(recorder.Events), 0) + assert.Equal(t, len(*logs), 0) + }) + + t.Run("BadStatusWithLimit", func(t *testing.T) { + recorder := events.NewRecorder(t, runtime.Scheme) + reconciler := &Reconciler{Recorder: recorder} + ctx, logs := setupLogCapture(ctx) + + pvc := &corev1.PersistentVolumeClaim{ObjectMeta: naming.InstancePostgresDataVolume(instance)} + spec := instanceSetSpec("1Gi", "3Gi") + cluster.Status = desiredStatus("NotAValidValue") + pvc.Spec = spec.DataVolumeClaimSpec + + reconciler.setVolumeSize(ctx, &cluster, pvc, spec.Name) + + assert.Assert(t, cmp.MarshalMatches(pvc.Spec, ` +accessModes: +- ReadWriteOnce +resources: + limits: + storage: 3Gi + requests: + storage: 1Gi +`)) + + assert.Equal(t, len(recorder.Events), 0) + assert.Equal(t, len(*logs), 1) + assert.Assert(t, cmp.Contains((*logs)[0], "Unable to parse volume request: NotAValidValue")) + }) + + t.Run("StatusWithLimit", func(t *testing.T) { + recorder := events.NewRecorder(t, runtime.Scheme) + reconciler := &Reconciler{Recorder: recorder} + ctx, logs := setupLogCapture(ctx) + + pvc := &corev1.PersistentVolumeClaim{ObjectMeta: naming.InstancePostgresDataVolume(instance)} + spec := instanceSetSpec("1Gi", "3Gi") + cluster.Status = desiredStatus("2Gi") + pvc.Spec = spec.DataVolumeClaimSpec + + reconciler.setVolumeSize(ctx, &cluster, pvc, spec.Name) + + assert.Assert(t, cmp.MarshalMatches(pvc.Spec, ` +accessModes: +- ReadWriteOnce +resources: + limits: + storage: 3Gi + requests: + storage: 2Gi +`)) + assert.Equal(t, len(recorder.Events), 0) + assert.Equal(t, len(*logs), 0) + }) + + t.Run("StatusWithLimitGrowToLimit", func(t *testing.T) { + recorder := events.NewRecorder(t, runtime.Scheme) + reconciler := &Reconciler{Recorder: recorder} + ctx, logs := setupLogCapture(ctx) + + pvc := &corev1.PersistentVolumeClaim{ObjectMeta: naming.InstancePostgresDataVolume(instance)} + spec := instanceSetSpec("1Gi", "2Gi") + cluster.Status = desiredStatus("2Gi") + pvc.Spec = spec.DataVolumeClaimSpec + + reconciler.setVolumeSize(ctx, &cluster, pvc, spec.Name) + + assert.Assert(t, cmp.MarshalMatches(pvc.Spec, ` +accessModes: +- ReadWriteOnce +resources: + limits: + storage: 2Gi + requests: + storage: 2Gi +`)) + + assert.Equal(t, len(*logs), 0) + assert.Equal(t, len(recorder.Events), 1) + assert.Equal(t, recorder.Events[0].Regarding.Name, cluster.Name) + assert.Equal(t, recorder.Events[0].Reason, "VolumeLimitReached") + assert.Equal(t, recorder.Events[0].Note, "pgData volume(s) for elephant/some-instance are at size limit (2Gi).") + }) + + t.Run("DesiredStatusOverLimit", func(t *testing.T) { + recorder := events.NewRecorder(t, runtime.Scheme) + reconciler := &Reconciler{Recorder: recorder} + ctx, logs := setupLogCapture(ctx) + + pvc := &corev1.PersistentVolumeClaim{ObjectMeta: naming.InstancePostgresDataVolume(instance)} + spec := instanceSetSpec("4Gi", "5Gi") + cluster.Status = desiredStatus("10Gi") + pvc.Spec = spec.DataVolumeClaimSpec + + reconciler.setVolumeSize(ctx, &cluster, pvc, spec.Name) + + assert.Assert(t, cmp.MarshalMatches(pvc.Spec, ` +accessModes: +- ReadWriteOnce +resources: + limits: + storage: 5Gi + requests: + storage: 5Gi +`)) + + assert.Equal(t, len(*logs), 0) + assert.Equal(t, len(recorder.Events), 2) + var found1, found2 bool + for _, event := range recorder.Events { + if event.Reason == "VolumeLimitReached" { + found1 = true + assert.Equal(t, event.Regarding.Name, cluster.Name) + assert.Equal(t, event.Note, "pgData volume(s) for elephant/some-instance are at size limit (5Gi).") + } + if event.Reason == "DesiredVolumeAboveLimit" { + found2 = true + assert.Equal(t, event.Regarding.Name, cluster.Name) + assert.Equal(t, event.Note, + "The desired size (10Gi) for the elephant/some-instance pgData volume(s) is greater than the size limit (5Gi).") + } + } + assert.Assert(t, found1 && found2) + }) + + }) +} + func TestReconcileDatabaseInitSQL(t *testing.T) { ctx := context.Background() var called bool @@ -438,8 +927,8 @@ func TestReconcileDatabaseInitSQL(t *testing.T) { // Overwrite the PodExec function with a check to ensure the exec // call would have been made - PodExec: func(namespace, pod, container string, stdin io.Reader, stdout, - stderr io.Writer, command ...string) error { + PodExec: func(ctx context.Context, namespace, pod, container string, stdin io.Reader, + stdout, stderr io.Writer, command ...string) error { called = true return nil }, @@ -562,8 +1051,8 @@ func TestReconcileDatabaseInitSQLConfigMap(t *testing.T) { // Overwrite the PodExec function with a check to ensure the exec // call would have been made - PodExec: func(namespace, pod, container string, stdin io.Reader, stdout, - stderr io.Writer, command ...string) error { + PodExec: func(ctx context.Context, namespace, pod, container string, stdin io.Reader, + stdout, stderr io.Writer, command ...string) error { called = true return nil }, @@ -661,3 +1150,84 @@ func TestReconcileDatabaseInitSQLConfigMap(t *testing.T) { assert.Assert(t, called) }) } + +func TestValidatePostgresUsers(t *testing.T) { + t.Parallel() + + t.Run("Empty", func(t *testing.T) { + cluster := v1beta1.NewPostgresCluster() + recorder := events.NewRecorder(t, runtime.Scheme) + reconciler := &Reconciler{Recorder: recorder} + + cluster.Spec.Users = nil + reconciler.validatePostgresUsers(cluster) + assert.Equal(t, len(recorder.Events), 0) + + cluster.Spec.Users = []v1beta1.PostgresUserSpec{} + reconciler.validatePostgresUsers(cluster) + assert.Equal(t, len(recorder.Events), 0) + }) + + // See [internal/testing/validation.TestPostgresUserOptions] + + t.Run("NoComments", func(t *testing.T) { + cluster := v1beta1.NewPostgresCluster() + cluster.Name = "pg1" + cluster.Spec.Users = []v1beta1.PostgresUserSpec{ + {Name: "dashes", Options: "ANY -- comment"}, + {Name: "block-open", Options: "/* asdf"}, + {Name: "block-close", Options: " qw */ rt"}, + } + + recorder := events.NewRecorder(t, runtime.Scheme) + reconciler := &Reconciler{Recorder: recorder} + + reconciler.validatePostgresUsers(cluster) + assert.Equal(t, len(recorder.Events), 3) + + for i, event := range recorder.Events { + assert.Equal(t, event.Regarding.Name, cluster.Name) + assert.Equal(t, event.Reason, "InvalidUser") + assert.Assert(t, cmp.Contains(event.Note, "cannot contain comments")) + assert.Assert(t, cmp.Contains(event.Note, + fmt.Sprintf("spec.users[%d].options", i))) + } + }) + + t.Run("NoPassword", func(t *testing.T) { + cluster := v1beta1.NewPostgresCluster() + cluster.Name = "pg5" + cluster.Spec.Users = []v1beta1.PostgresUserSpec{ + {Name: "uppercase", Options: "SUPERUSER PASSWORD ''"}, + {Name: "lowercase", Options: "password 'asdf'"}, + } + + recorder := events.NewRecorder(t, runtime.Scheme) + reconciler := &Reconciler{Recorder: recorder} + + reconciler.validatePostgresUsers(cluster) + assert.Equal(t, len(recorder.Events), 2) + + for i, event := range recorder.Events { + assert.Equal(t, event.Regarding.Name, cluster.Name) + assert.Equal(t, event.Reason, "InvalidUser") + assert.Assert(t, cmp.Contains(event.Note, "cannot assign password")) + assert.Assert(t, cmp.Contains(event.Note, + fmt.Sprintf("spec.users[%d].options", i))) + } + }) + + t.Run("Valid", func(t *testing.T) { + cluster := v1beta1.NewPostgresCluster() + cluster.Spec.Users = []v1beta1.PostgresUserSpec{ + {Name: "normal", Options: "CREATEDB valid until '2006-01-02'"}, + {Name: "very-full", Options: "NOSUPERUSER NOCREATEDB NOCREATEROLE NOINHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT 5"}, + } + + reconciler := &Reconciler{} + assert.Assert(t, reconciler.Recorder == nil, + "expected the following to not use a Recorder at all") + + reconciler.validatePostgresUsers(cluster) + }) +} diff --git a/internal/controller/postgrescluster/rbac.go b/internal/controller/postgrescluster/rbac.go index 43122b56ac..38dd808c44 100644 --- a/internal/controller/postgrescluster/rbac.go +++ b/internal/controller/postgrescluster/rbac.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster diff --git a/internal/controller/postgrescluster/snapshots.go b/internal/controller/postgrescluster/snapshots.go new file mode 100644 index 0000000000..76ad195600 --- /dev/null +++ b/internal/controller/postgrescluster/snapshots.go @@ -0,0 +1,617 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package postgrescluster + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/pkg/errors" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + volumesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" + + "github.com/crunchydata/postgres-operator/internal/config" + "github.com/crunchydata/postgres-operator/internal/feature" + "github.com/crunchydata/postgres-operator/internal/initialize" + "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/internal/pgbackrest" + "github.com/crunchydata/postgres-operator/internal/postgres" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +//+kubebuilder:rbac:groups="snapshot.storage.k8s.io",resources="volumesnapshots",verbs={get,list,create,patch,delete} + +// The controller-runtime client sets up a cache that watches anything we "get" or "list". +//+kubebuilder:rbac:groups="snapshot.storage.k8s.io",resources="volumesnapshots",verbs={watch} + +// reconcileVolumeSnapshots creates and manages VolumeSnapshots if the proper VolumeSnapshot CRDs +// are installed and VolumeSnapshots are enabled for the PostgresCluster. A VolumeSnapshot of the +// primary instance's pgdata volume will be created whenever a backup is completed. The steps to +// create snapshots include the following sequence: +// 1. We find the latest completed backup job and check the timestamp. +// 2. If the timestamp is later than what's on the dedicated snapshot PVC, a restore job runs in +// the dedicated snapshot volume. +// 3. When the restore job completes, an annotation is updated on the PVC. If the restore job +// fails, we don't run it again. +// 4. When the PVC annotation is updated, we see if there's a volume snapshot with an earlier +// timestamp. +// 5. If there are no snapshots at all, we take a snapshot and put the backup job's completion +// timestamp on the snapshot annotation. +// 6. If an earlier snapshot is found, we take a new snapshot, annotate it and delete the old +// snapshot. +// 7. When the snapshot job completes, we delete the restore job. +func (r *Reconciler) reconcileVolumeSnapshots(ctx context.Context, + postgrescluster *v1beta1.PostgresCluster, pvc *corev1.PersistentVolumeClaim) error { + + // If VolumeSnapshots feature gate is disabled. Do nothing and return early. + if !feature.Enabled(ctx, feature.VolumeSnapshots) { + return nil + } + + // Check if the Kube cluster has VolumeSnapshots installed. If VolumeSnapshots + // are not installed, we need to return early. If user is attempting to use + // VolumeSnapshots, return an error, otherwise return nil. + volumeSnapshotKindExists, err := r.GroupVersionKindExists("snapshot.storage.k8s.io/v1", "VolumeSnapshot") + if err != nil { + return err + } + if !*volumeSnapshotKindExists { + if postgrescluster.Spec.Backups.Snapshots != nil { + return errors.New("VolumeSnapshots are not installed/enabled in this Kubernetes cluster; cannot create snapshot.") + } else { + return nil + } + } + + // If user is attempting to use snapshots and has tablespaces enabled, we + // need to create a warning event indicating that the two features are not + // currently compatible and return early. + if postgrescluster.Spec.Backups.Snapshots != nil && + clusterUsingTablespaces(ctx, postgrescluster) { + r.Recorder.Event(postgrescluster, corev1.EventTypeWarning, "IncompatibleFeatures", + "VolumeSnapshots not currently compatible with TablespaceVolumes; cannot create snapshot.") + return nil + } + + // Get all snapshots for the cluster. + snapshots, err := r.getSnapshotsForCluster(ctx, postgrescluster) + if err != nil { + return err + } + + // If snapshots are disabled, delete any existing snapshots and return early. + if postgrescluster.Spec.Backups.Snapshots == nil { + return r.deleteSnapshots(ctx, postgrescluster, snapshots) + } + + // If we got here, then the snapshots are enabled (feature gate is enabled and the + // cluster has a Spec.Backups.Snapshots section defined). + + // Check snapshots for errors; if present, create an event. If there are + // multiple snapshots with errors, create event for the latest error and + // delete any older snapshots with error. + snapshotWithLatestError := getSnapshotWithLatestError(snapshots) + if snapshotWithLatestError != nil { + r.Recorder.Event(postgrescluster, corev1.EventTypeWarning, "VolumeSnapshotError", + *snapshotWithLatestError.Status.Error.Message) + for _, snapshot := range snapshots.Items { + if snapshot.Status != nil && snapshot.Status.Error != nil && + snapshot.Status.Error.Time.Before(snapshotWithLatestError.Status.Error.Time) { + err = r.deleteControlled(ctx, postgrescluster, &snapshot) + if err != nil { + return err + } + } + } + } + + // Get pvc backup job completion annotation. If it does not exist, there has not been + // a successful restore yet, so return early. + pvcUpdateTimeStamp, pvcAnnotationExists := pvc.GetAnnotations()[naming.PGBackRestBackupJobCompletion] + if !pvcAnnotationExists { + return err + } + + // Check to see if snapshot exists for the latest backup that has been restored into + // the dedicated pvc. + var snapshotForPvcUpdateIdx int + snapshotFoundForPvcUpdate := false + for idx, snapshot := range snapshots.Items { + if snapshot.GetAnnotations()[naming.PGBackRestBackupJobCompletion] == pvcUpdateTimeStamp { + snapshotForPvcUpdateIdx = idx + snapshotFoundForPvcUpdate = true + } + } + + // If a snapshot exists for the latest backup that has been restored into the dedicated pvc + // and the snapshot is Ready, delete all other snapshots. + if snapshotFoundForPvcUpdate && snapshots.Items[snapshotForPvcUpdateIdx].Status.ReadyToUse != nil && + *snapshots.Items[snapshotForPvcUpdateIdx].Status.ReadyToUse { + for idx, snapshot := range snapshots.Items { + if idx != snapshotForPvcUpdateIdx { + err = r.deleteControlled(ctx, postgrescluster, &snapshot) + if err != nil { + return err + } + } + } + } + + // If a snapshot for the latest backup/restore does not exist, create a snapshot. + if !snapshotFoundForPvcUpdate { + var snapshot *volumesnapshotv1.VolumeSnapshot + snapshot, err = r.generateSnapshotOfDedicatedSnapshotVolume(postgrescluster, pvc) + if err == nil { + err = errors.WithStack(r.apply(ctx, snapshot)) + } + } + + return err +} + +// +kubebuilder:rbac:groups="",resources="persistentvolumeclaims",verbs={get} +// +kubebuilder:rbac:groups="",resources="persistentvolumeclaims",verbs={create,delete,patch} + +// reconcileDedicatedSnapshotVolume reconciles the PersistentVolumeClaim that holds a +// copy of the pgdata and is dedicated for clean snapshots of the database. It creates +// and manages the volume as well as the restore jobs that bring the volume data forward +// after a successful backup. +func (r *Reconciler) reconcileDedicatedSnapshotVolume( + ctx context.Context, cluster *v1beta1.PostgresCluster, + clusterVolumes []corev1.PersistentVolumeClaim, +) (*corev1.PersistentVolumeClaim, error) { + + // If VolumeSnapshots feature gate is disabled, do nothing and return early. + if !feature.Enabled(ctx, feature.VolumeSnapshots) { + return nil, nil + } + + // Set appropriate labels for dedicated snapshot volume + labelMap := map[string]string{ + naming.LabelCluster: cluster.Name, + naming.LabelRole: naming.RoleSnapshot, + naming.LabelData: naming.DataPostgres, + } + + // If volume already exists, use existing name. Otherwise, generate a name. + var pvc *corev1.PersistentVolumeClaim + existingPVCName, err := getPGPVCName(labelMap, clusterVolumes) + if err != nil { + return nil, errors.WithStack(err) + } + if existingPVCName != "" { + pvc = &corev1.PersistentVolumeClaim{ObjectMeta: metav1.ObjectMeta{ + Namespace: cluster.GetNamespace(), + Name: existingPVCName, + }} + } else { + pvc = &corev1.PersistentVolumeClaim{ObjectMeta: naming.ClusterDedicatedSnapshotVolume(cluster)} + } + pvc.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("PersistentVolumeClaim")) + + // If snapshots are disabled, delete the PVC if it exists and return early. + // Check the client cache first using Get. + if cluster.Spec.Backups.Snapshots == nil { + key := client.ObjectKeyFromObject(pvc) + err := errors.WithStack(r.Client.Get(ctx, key, pvc)) + if err == nil { + err = errors.WithStack(r.deleteControlled(ctx, cluster, pvc)) + } + return nil, client.IgnoreNotFound(err) + } + + // If we've got this far, snapshots are enabled so we should create/update/get + // the dedicated snapshot volume + pvc, err = r.createDedicatedSnapshotVolume(ctx, cluster, labelMap, pvc) + if err != nil { + return pvc, err + } + + // Determine if we need to run a restore job, based on the most recent backup + // and an annotation on the PVC. + + // Find the most recently completed backup job. + backupJob, err := r.getLatestCompleteBackupJob(ctx, cluster) + if err != nil { + return pvc, err + } + + // Return early if no complete backup job is found. + if backupJob == nil { + return pvc, nil + } + + // Return early if the pvc is annotated with a timestamp newer or equal to the latest backup job. + // If the annotation value cannot be parsed, we want to proceed with a restore. + pvcAnnotationTimestampString := pvc.GetAnnotations()[naming.PGBackRestBackupJobCompletion] + if pvcAnnotationTime, err := time.Parse(time.RFC3339, pvcAnnotationTimestampString); err == nil { + if backupJob.Status.CompletionTime.Compare(pvcAnnotationTime) <= 0 { + return pvc, nil + } + } + + // If we've made it here, the pvc has not been restored with latest backup. + // Find the dedicated snapshot volume restore job if it exists. Since we delete + // successful restores after we annotate the PVC and stop making restore jobs + // if a failed DSV restore job exists, there should only ever be one DSV restore + // job in existence at a time. + // TODO(snapshots): Should this function throw an error or something if multiple + // DSV restores somehow exist? + restoreJob, err := r.getDedicatedSnapshotVolumeRestoreJob(ctx, cluster) + if err != nil { + return pvc, err + } + + // If we don't find a restore job, we run one. + if restoreJob == nil { + err = r.dedicatedSnapshotVolumeRestore(ctx, cluster, pvc, backupJob) + return pvc, err + } + + // If we've made it here, we have found a restore job. If the restore job was + // successful, set/update the annotation on the PVC and delete the restore job. + if restoreJob.Status.Succeeded == 1 { + if pvc.GetAnnotations() == nil { + pvc.Annotations = map[string]string{} + } + pvc.Annotations[naming.PGBackRestBackupJobCompletion] = restoreJob.GetAnnotations()[naming.PGBackRestBackupJobCompletion] + annotations := fmt.Sprintf(`{"metadata":{"annotations":{"%s": "%s"}}}`, + naming.PGBackRestBackupJobCompletion, pvc.Annotations[naming.PGBackRestBackupJobCompletion]) + + patch := client.RawPatch(client.Merge.Type(), []byte(annotations)) + err = r.handlePersistentVolumeClaimError(cluster, + errors.WithStack(r.patch(ctx, pvc, patch))) + + if err != nil { + return pvc, err + } + + err = r.Client.Delete(ctx, restoreJob, client.PropagationPolicy(metav1.DeletePropagationBackground)) + return pvc, errors.WithStack(err) + } + + // If the restore job failed, create a warning event. + if restoreJob.Status.Failed == 1 { + r.Recorder.Event(cluster, corev1.EventTypeWarning, + "DedicatedSnapshotVolumeRestoreJobError", "restore job failed, check the logs") + return pvc, nil + } + + // If we made it here, the restore job is still running and we should do nothing. + return pvc, err +} + +// createDedicatedSnapshotVolume creates/updates/gets the dedicated snapshot volume. +// It expects that the volume name and GVK has already been set on the pvc that is passed in. +func (r *Reconciler) createDedicatedSnapshotVolume(ctx context.Context, + cluster *v1beta1.PostgresCluster, labelMap map[string]string, + pvc *corev1.PersistentVolumeClaim, +) (*corev1.PersistentVolumeClaim, error) { + var err error + + // An InstanceSet must be chosen to scale resources for the dedicated snapshot volume. + // TODO: We've chosen the first InstanceSet for the time being, but might want to consider + // making the choice configurable. + instanceSpec := cluster.Spec.InstanceSets[0] + + pvc.Annotations = naming.Merge( + cluster.Spec.Metadata.GetAnnotationsOrNil(), + instanceSpec.Metadata.GetAnnotationsOrNil()) + + pvc.Labels = naming.Merge( + cluster.Spec.Metadata.GetLabelsOrNil(), + instanceSpec.Metadata.GetLabelsOrNil(), + labelMap, + ) + + err = errors.WithStack(r.setControllerReference(cluster, pvc)) + if err != nil { + return pvc, err + } + + pvc.Spec = instanceSpec.DataVolumeClaimSpec + + // Set the snapshot volume to the same size as the pgdata volume. The size should scale with auto-grow. + r.setVolumeSize(ctx, cluster, pvc, instanceSpec.Name) + + // Clear any set limit before applying PVC. This is needed to allow the limit + // value to change later. + pvc.Spec.Resources.Limits = nil + + err = r.handlePersistentVolumeClaimError(cluster, + errors.WithStack(r.apply(ctx, pvc))) + if err != nil { + return pvc, err + } + + return pvc, err +} + +// dedicatedSnapshotVolumeRestore creates a Job that performs a restore into the dedicated +// snapshot volume. +// This function is very similar to reconcileRestoreJob, but specifically tailored to the +// dedicated snapshot volume. +func (r *Reconciler) dedicatedSnapshotVolumeRestore(ctx context.Context, + cluster *v1beta1.PostgresCluster, dedicatedSnapshotVolume *corev1.PersistentVolumeClaim, + backupJob *batchv1.Job, +) error { + + pgdata := postgres.DataDirectory(cluster) + repoName := backupJob.GetLabels()[naming.LabelPGBackRestRepo] + + opts := []string{ + "--stanza=" + pgbackrest.DefaultStanzaName, + "--pg1-path=" + pgdata, + "--repo=" + regexRepoIndex.FindString(repoName), + "--delta", + } + + cmd := pgbackrest.DedicatedSnapshotVolumeRestoreCommand(pgdata, strings.Join(opts, " ")) + + // Create the volume resources required for the Postgres data directory. + dataVolumeMount := postgres.DataVolumeMount() + dataVolume := corev1.Volume{ + Name: dataVolumeMount.Name, + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: dedicatedSnapshotVolume.GetName(), + }, + }, + } + volumes := []corev1.Volume{dataVolume} + volumeMounts := []corev1.VolumeMount{dataVolumeMount} + + _, configHash, err := pgbackrest.CalculateConfigHashes(cluster) + if err != nil { + return err + } + + // A DataSource is required to avoid a nil pointer exception. + fakeDataSource := &v1beta1.PostgresClusterDataSource{RepoName: ""} + + restoreJob := &batchv1.Job{} + instanceName := cluster.Status.StartupInstance + + if err := r.generateRestoreJobIntent(cluster, configHash, instanceName, cmd, + volumeMounts, volumes, fakeDataSource, restoreJob); err != nil { + return errors.WithStack(err) + } + + // Attempt the restore exactly once. If the restore job fails, we prompt the user to investigate. + restoreJob.Spec.BackoffLimit = initialize.Int32(0) + restoreJob.Spec.Template.Spec.RestartPolicy = corev1.RestartPolicyNever + + // Add pgBackRest configs to template. + pgbackrest.AddConfigToRestorePod(cluster, cluster, &restoreJob.Spec.Template.Spec) + + // Add nss_wrapper init container and add nss_wrapper env vars to the pgbackrest restore container. + addNSSWrapper( + config.PGBackRestContainerImage(cluster), + cluster.Spec.ImagePullPolicy, + &restoreJob.Spec.Template) + + addTMPEmptyDir(&restoreJob.Spec.Template) + + restoreJob.Annotations[naming.PGBackRestBackupJobCompletion] = backupJob.Status.CompletionTime.Format(time.RFC3339) + return errors.WithStack(r.apply(ctx, restoreJob)) +} + +// generateSnapshotOfDedicatedSnapshotVolume will generate a VolumeSnapshot of +// the dedicated snapshot PersistentVolumeClaim and annotate it with the +// provided backup job's UID. +func (r *Reconciler) generateSnapshotOfDedicatedSnapshotVolume( + postgrescluster *v1beta1.PostgresCluster, + dedicatedSnapshotVolume *corev1.PersistentVolumeClaim, +) (*volumesnapshotv1.VolumeSnapshot, error) { + + snapshot, err := r.generateVolumeSnapshot(postgrescluster, *dedicatedSnapshotVolume, + postgrescluster.Spec.Backups.Snapshots.VolumeSnapshotClassName) + if err == nil { + if snapshot.Annotations == nil { + snapshot.Annotations = map[string]string{} + } + snapshot.Annotations[naming.PGBackRestBackupJobCompletion] = dedicatedSnapshotVolume.GetAnnotations()[naming.PGBackRestBackupJobCompletion] + } + + return snapshot, err +} + +// generateVolumeSnapshot generates a VolumeSnapshot that will use the supplied +// PersistentVolumeClaim and VolumeSnapshotClassName and will set the provided +// PostgresCluster as the owner. +func (r *Reconciler) generateVolumeSnapshot(postgrescluster *v1beta1.PostgresCluster, + pvc corev1.PersistentVolumeClaim, volumeSnapshotClassName string, +) (*volumesnapshotv1.VolumeSnapshot, error) { + + snapshot := &volumesnapshotv1.VolumeSnapshot{ + TypeMeta: metav1.TypeMeta{ + APIVersion: volumesnapshotv1.SchemeGroupVersion.String(), + Kind: "VolumeSnapshot", + }, + ObjectMeta: naming.ClusterVolumeSnapshot(postgrescluster), + } + snapshot.Spec.Source.PersistentVolumeClaimName = &pvc.Name + snapshot.Spec.VolumeSnapshotClassName = &volumeSnapshotClassName + + snapshot.Annotations = postgrescluster.Spec.Metadata.GetAnnotationsOrNil() + snapshot.Labels = naming.Merge(postgrescluster.Spec.Metadata.GetLabelsOrNil(), + map[string]string{ + naming.LabelCluster: postgrescluster.Name, + }) + + err := errors.WithStack(r.setControllerReference(postgrescluster, snapshot)) + + return snapshot, err +} + +// getDedicatedSnapshotVolumeRestoreJob finds a dedicated snapshot volume (DSV) +// restore job if one exists. Since we delete successful restore jobs and stop +// creating new restore jobs when one fails, there should only ever be one DSV +// restore job present at a time. If a DSV restore cannot be found, we return nil. +func (r *Reconciler) getDedicatedSnapshotVolumeRestoreJob(ctx context.Context, + postgrescluster *v1beta1.PostgresCluster) (*batchv1.Job, error) { + + // Get all restore jobs for this cluster + jobs := &batchv1.JobList{} + selectJobs, err := naming.AsSelector(naming.ClusterRestoreJobs(postgrescluster.Name)) + if err == nil { + err = errors.WithStack( + r.Client.List(ctx, jobs, + client.InNamespace(postgrescluster.Namespace), + client.MatchingLabelsSelector{Selector: selectJobs}, + )) + } + if err != nil { + return nil, err + } + + // Get restore job that has PGBackRestBackupJobCompletion annotation + for _, job := range jobs.Items { + _, annotationExists := job.GetAnnotations()[naming.PGBackRestBackupJobCompletion] + if annotationExists { + return &job, nil + } + } + + return nil, nil +} + +// getLatestCompleteBackupJob finds the most recently completed +// backup job for a cluster +func (r *Reconciler) getLatestCompleteBackupJob(ctx context.Context, + postgrescluster *v1beta1.PostgresCluster) (*batchv1.Job, error) { + + // Get all backup jobs for this cluster + jobs := &batchv1.JobList{} + selectJobs, err := naming.AsSelector(naming.ClusterBackupJobs(postgrescluster.Name)) + if err == nil { + err = errors.WithStack( + r.Client.List(ctx, jobs, + client.InNamespace(postgrescluster.Namespace), + client.MatchingLabelsSelector{Selector: selectJobs}, + )) + } + if err != nil { + return nil, err + } + + zeroTime := metav1.NewTime(time.Time{}) + latestCompleteBackupJob := batchv1.Job{ + Status: batchv1.JobStatus{ + Succeeded: 1, + CompletionTime: &zeroTime, + }, + } + for _, job := range jobs.Items { + if job.Status.Succeeded > 0 && + latestCompleteBackupJob.Status.CompletionTime.Before(job.Status.CompletionTime) { + latestCompleteBackupJob = job + } + } + + if latestCompleteBackupJob.Status.CompletionTime.Equal(&zeroTime) { + return nil, nil + } + + return &latestCompleteBackupJob, nil +} + +// getSnapshotWithLatestError takes a VolumeSnapshotList and returns a pointer to the +// snapshot that has most recently had an error. If no snapshot errors exist +// then it returns nil. +func getSnapshotWithLatestError(snapshots *volumesnapshotv1.VolumeSnapshotList) *volumesnapshotv1.VolumeSnapshot { + zeroTime := metav1.NewTime(time.Time{}) + snapshotWithLatestError := volumesnapshotv1.VolumeSnapshot{ + Status: &volumesnapshotv1.VolumeSnapshotStatus{ + Error: &volumesnapshotv1.VolumeSnapshotError{ + Time: &zeroTime, + }, + }, + } + for _, snapshot := range snapshots.Items { + if snapshot.Status != nil && snapshot.Status.Error != nil && + snapshotWithLatestError.Status.Error.Time.Before(snapshot.Status.Error.Time) { + snapshotWithLatestError = snapshot + } + } + + if snapshotWithLatestError.Status.Error.Time.Equal(&zeroTime) { + return nil + } + + return &snapshotWithLatestError +} + +// getSnapshotsForCluster gets all the VolumeSnapshots for a given postgrescluster. +func (r *Reconciler) getSnapshotsForCluster(ctx context.Context, cluster *v1beta1.PostgresCluster) ( + *volumesnapshotv1.VolumeSnapshotList, error) { + + selectSnapshots, err := naming.AsSelector(naming.Cluster(cluster.Name)) + if err != nil { + return nil, err + } + snapshots := &volumesnapshotv1.VolumeSnapshotList{} + err = errors.WithStack( + r.Client.List(ctx, snapshots, + client.InNamespace(cluster.Namespace), + client.MatchingLabelsSelector{Selector: selectSnapshots}, + )) + + return snapshots, err +} + +// getLatestReadySnapshot takes a VolumeSnapshotList and returns the latest ready VolumeSnapshot. +func getLatestReadySnapshot(snapshots *volumesnapshotv1.VolumeSnapshotList) *volumesnapshotv1.VolumeSnapshot { + zeroTime := metav1.NewTime(time.Time{}) + latestReadySnapshot := volumesnapshotv1.VolumeSnapshot{ + Status: &volumesnapshotv1.VolumeSnapshotStatus{ + CreationTime: &zeroTime, + }, + } + for _, snapshot := range snapshots.Items { + if snapshot.Status != nil && snapshot.Status.ReadyToUse != nil && *snapshot.Status.ReadyToUse && + latestReadySnapshot.Status.CreationTime.Before(snapshot.Status.CreationTime) { + latestReadySnapshot = snapshot + } + } + + if latestReadySnapshot.Status.CreationTime.Equal(&zeroTime) { + return nil + } + + return &latestReadySnapshot +} + +// deleteSnapshots takes a postgrescluster and a snapshot list and deletes all snapshots +// in the list that are controlled by the provided postgrescluster. +func (r *Reconciler) deleteSnapshots(ctx context.Context, + postgrescluster *v1beta1.PostgresCluster, snapshots *volumesnapshotv1.VolumeSnapshotList) error { + + for i := range snapshots.Items { + err := errors.WithStack(client.IgnoreNotFound( + r.deleteControlled(ctx, postgrescluster, &snapshots.Items[i]))) + if err != nil { + return err + } + } + return nil +} + +// tablespaceVolumesInUse determines if the TablespaceVolumes feature is enabled and the given +// cluster has tablespace volumes in place. +func clusterUsingTablespaces(ctx context.Context, postgrescluster *v1beta1.PostgresCluster) bool { + for _, instanceSet := range postgrescluster.Spec.InstanceSets { + if len(instanceSet.TablespaceVolumes) > 0 { + return feature.Enabled(ctx, feature.TablespaceVolumes) + } + } + return false +} diff --git a/internal/controller/postgrescluster/snapshots_test.go b/internal/controller/postgrescluster/snapshots_test.go new file mode 100644 index 0000000000..4c3d987ecd --- /dev/null +++ b/internal/controller/postgrescluster/snapshots_test.go @@ -0,0 +1,1476 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package postgrescluster + +import ( + "context" + "testing" + "time" + + "github.com/pkg/errors" + "gotest.tools/v3/assert" + appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/discovery" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/crunchydata/postgres-operator/internal/controller/runtime" + "github.com/crunchydata/postgres-operator/internal/feature" + "github.com/crunchydata/postgres-operator/internal/initialize" + "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/internal/testing/cmp" + "github.com/crunchydata/postgres-operator/internal/testing/events" + "github.com/crunchydata/postgres-operator/internal/testing/require" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" + + volumesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" +) + +func TestReconcileVolumeSnapshots(t *testing.T) { + ctx := context.Background() + cfg, cc := setupKubernetes(t) + require.ParallelCapacity(t, 1) + discoveryClient, err := discovery.NewDiscoveryClientForConfig(cfg) + assert.NilError(t, err) + + recorder := events.NewRecorder(t, runtime.Scheme) + r := &Reconciler{ + Client: cc, + Owner: client.FieldOwner(t.Name()), + DiscoveryClient: discoveryClient, + Recorder: recorder, + } + ns := setupNamespace(t, cc) + + // Enable snapshots feature gate + gate := feature.NewGate() + assert.NilError(t, gate.SetFromMap(map[string]bool{ + feature.VolumeSnapshots: true, + })) + ctx = feature.NewContext(ctx, gate) + + t.Run("SnapshotsDisabledDeleteSnapshots", func(t *testing.T) { + // Create cluster (without snapshots spec) + cluster := testCluster() + cluster.Namespace = ns.Name + cluster.ObjectMeta.UID = "the-uid-123" + assert.NilError(t, r.Client.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + + // Create a snapshot + pvc := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dedicated-snapshot-volume", + }, + } + volumeSnapshotClassName := "my-snapshotclass" + snapshot, err := r.generateVolumeSnapshot(cluster, *pvc, volumeSnapshotClassName) + assert.NilError(t, err) + err = errors.WithStack(r.apply(ctx, snapshot)) + assert.NilError(t, err) + + // Get all snapshots for this cluster and assert 1 exists + selectSnapshots, err := naming.AsSelector(naming.Cluster(cluster.Name)) + assert.NilError(t, err) + snapshots := &volumesnapshotv1.VolumeSnapshotList{} + err = errors.WithStack( + r.Client.List(ctx, snapshots, + client.InNamespace(cluster.Namespace), + client.MatchingLabelsSelector{Selector: selectSnapshots}, + )) + assert.NilError(t, err) + assert.Equal(t, len(snapshots.Items), 1) + + // Reconcile snapshots + err = r.reconcileVolumeSnapshots(ctx, cluster, pvc) + assert.NilError(t, err) + + // Get all snapshots for this cluster and assert 0 exist + assert.NilError(t, err) + snapshots = &volumesnapshotv1.VolumeSnapshotList{} + err = errors.WithStack( + r.Client.List(ctx, snapshots, + client.InNamespace(cluster.Namespace), + client.MatchingLabelsSelector{Selector: selectSnapshots}, + )) + assert.NilError(t, err) + assert.Equal(t, len(snapshots.Items), 0) + }) + + t.Run("SnapshotsEnabledTablespacesEnabled", func(t *testing.T) { + // Enable both tablespaces and snapshots feature gates + gate := feature.NewGate() + assert.NilError(t, gate.SetFromMap(map[string]bool{ + feature.TablespaceVolumes: true, + feature.VolumeSnapshots: true, + })) + ctx := feature.NewContext(ctx, gate) + + // Create a cluster with snapshots and tablespaces enabled + volumeSnapshotClassName := "my-snapshotclass" + cluster := testCluster() + cluster.Namespace = ns.Name + cluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ + VolumeSnapshotClassName: volumeSnapshotClassName, + } + cluster.Spec.InstanceSets[0].TablespaceVolumes = []v1beta1.TablespaceVolume{{ + Name: "volume-1", + }} + + // Create pvc for reconcile + pvc := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dedicated-snapshot-volume", + }, + } + + // Reconcile + err = r.reconcileVolumeSnapshots(ctx, cluster, pvc) + assert.NilError(t, err) + + // Assert warning event was created and has expected attributes + if assert.Check(t, len(recorder.Events) > 0) { + assert.Equal(t, recorder.Events[0].Type, "Warning") + assert.Equal(t, recorder.Events[0].Regarding.Kind, "PostgresCluster") + assert.Equal(t, recorder.Events[0].Regarding.Name, "hippo") + assert.Equal(t, recorder.Events[0].Reason, "IncompatibleFeatures") + assert.Assert(t, cmp.Contains(recorder.Events[0].Note, "VolumeSnapshots not currently compatible with TablespaceVolumes")) + } + }) + + t.Run("SnapshotsEnabledNoPvcAnnotation", func(t *testing.T) { + // Create a volume snapshot class + volumeSnapshotClassName := "my-snapshotclass" + volumeSnapshotClass := &volumesnapshotv1.VolumeSnapshotClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: volumeSnapshotClassName, + }, + DeletionPolicy: "Delete", + } + assert.NilError(t, r.Client.Create(ctx, volumeSnapshotClass)) + t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, volumeSnapshotClass)) }) + + // Create a cluster with snapshots enabled + cluster := testCluster() + cluster.Namespace = ns.Name + cluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ + VolumeSnapshotClassName: volumeSnapshotClassName, + } + assert.NilError(t, r.Client.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + + // Create pvc for reconcile + pvc := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dedicated-snapshot-volume", + }, + } + + // Reconcile + err = r.reconcileVolumeSnapshots(ctx, cluster, pvc) + assert.NilError(t, err) + + // Assert no snapshots exist + selectSnapshots, err := naming.AsSelector(naming.Cluster(cluster.Name)) + assert.NilError(t, err) + snapshots := &volumesnapshotv1.VolumeSnapshotList{} + err = errors.WithStack( + r.Client.List(ctx, snapshots, + client.InNamespace(cluster.Namespace), + client.MatchingLabelsSelector{Selector: selectSnapshots}, + )) + assert.NilError(t, err) + assert.Equal(t, len(snapshots.Items), 0) + }) + + t.Run("SnapshotsEnabledReadySnapshotsExist", func(t *testing.T) { + // Create a volume snapshot class + volumeSnapshotClassName := "my-snapshotclass" + volumeSnapshotClass := &volumesnapshotv1.VolumeSnapshotClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: volumeSnapshotClassName, + }, + DeletionPolicy: "Delete", + } + assert.NilError(t, r.Client.Create(ctx, volumeSnapshotClass)) + t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, volumeSnapshotClass)) }) + + // Create a cluster with snapshots enabled + cluster := testCluster() + cluster.Namespace = ns.Name + cluster.ObjectMeta.UID = "the-uid-123" + cluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ + VolumeSnapshotClassName: volumeSnapshotClassName, + } + assert.NilError(t, r.Client.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + + // Create pvc with annotation + pvcName := initialize.String("dedicated-snapshot-volume") + pvc := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: *pvcName, + Annotations: map[string]string{ + naming.PGBackRestBackupJobCompletion: "backup-timestamp", + }, + }, + } + + // Create snapshot with annotation matching the pvc annotation + snapshot1 := &volumesnapshotv1.VolumeSnapshot{ + TypeMeta: metav1.TypeMeta{ + APIVersion: volumesnapshotv1.SchemeGroupVersion.String(), + Kind: "VolumeSnapshot", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "first-snapshot", + Namespace: ns.Name, + Annotations: map[string]string{ + naming.PGBackRestBackupJobCompletion: "backup-timestamp", + }, + Labels: map[string]string{ + naming.LabelCluster: "hippo", + }, + }, + Spec: volumesnapshotv1.VolumeSnapshotSpec{ + Source: volumesnapshotv1.VolumeSnapshotSource{ + PersistentVolumeClaimName: pvcName, + }, + }, + } + err := errors.WithStack(r.setControllerReference(cluster, snapshot1)) + assert.NilError(t, err) + err = r.apply(ctx, snapshot1) + assert.NilError(t, err) + + // Update snapshot status + truePtr := initialize.Bool(true) + snapshot1.Status = &volumesnapshotv1.VolumeSnapshotStatus{ + ReadyToUse: truePtr, + } + err = r.Client.Status().Update(ctx, snapshot1) + assert.NilError(t, err) + + // Create second snapshot with different annotation value + snapshot2 := &volumesnapshotv1.VolumeSnapshot{ + TypeMeta: metav1.TypeMeta{ + APIVersion: volumesnapshotv1.SchemeGroupVersion.String(), + Kind: "VolumeSnapshot", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "second-snapshot", + Namespace: ns.Name, + Annotations: map[string]string{ + naming.PGBackRestBackupJobCompletion: "older-backup-timestamp", + }, + Labels: map[string]string{ + naming.LabelCluster: "hippo", + }, + }, + Spec: volumesnapshotv1.VolumeSnapshotSpec{ + Source: volumesnapshotv1.VolumeSnapshotSource{ + PersistentVolumeClaimName: pvcName, + }, + }, + } + err = errors.WithStack(r.setControllerReference(cluster, snapshot2)) + assert.NilError(t, err) + err = r.apply(ctx, snapshot2) + assert.NilError(t, err) + + // Update second snapshot's status + snapshot2.Status = &volumesnapshotv1.VolumeSnapshotStatus{ + ReadyToUse: truePtr, + } + err = r.Client.Status().Update(ctx, snapshot2) + assert.NilError(t, err) + + // Reconcile + err = r.reconcileVolumeSnapshots(ctx, cluster, pvc) + assert.NilError(t, err) + + // Assert first snapshot exists and second snapshot was deleted + selectSnapshots, err := naming.AsSelector(naming.Cluster(cluster.Name)) + assert.NilError(t, err) + snapshots := &volumesnapshotv1.VolumeSnapshotList{} + err = errors.WithStack( + r.Client.List(ctx, snapshots, + client.InNamespace(cluster.Namespace), + client.MatchingLabelsSelector{Selector: selectSnapshots}, + )) + assert.NilError(t, err) + assert.Equal(t, len(snapshots.Items), 1) + assert.Equal(t, snapshots.Items[0].Name, "first-snapshot") + + // Cleanup + err = r.deleteControlled(ctx, cluster, snapshot1) + assert.NilError(t, err) + }) + + t.Run("SnapshotsEnabledCreateSnapshot", func(t *testing.T) { + // Create a volume snapshot class + volumeSnapshotClassName := "my-snapshotclass" + volumeSnapshotClass := &volumesnapshotv1.VolumeSnapshotClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: volumeSnapshotClassName, + }, + DeletionPolicy: "Delete", + } + assert.NilError(t, r.Client.Create(ctx, volumeSnapshotClass)) + t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, volumeSnapshotClass)) }) + + // Create a cluster with snapshots enabled + cluster := testCluster() + cluster.Namespace = ns.Name + cluster.ObjectMeta.UID = "the-uid-123" + cluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ + VolumeSnapshotClassName: volumeSnapshotClassName, + } + assert.NilError(t, r.Client.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + + // Create pvc with annotation + pvcName := initialize.String("dedicated-snapshot-volume") + pvc := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: *pvcName, + Annotations: map[string]string{ + naming.PGBackRestBackupJobCompletion: "another-backup-timestamp", + }, + }, + } + + // Reconcile + err = r.reconcileVolumeSnapshots(ctx, cluster, pvc) + assert.NilError(t, err) + + // Assert that a snapshot was created + selectSnapshots, err := naming.AsSelector(naming.Cluster(cluster.Name)) + assert.NilError(t, err) + snapshots := &volumesnapshotv1.VolumeSnapshotList{} + err = errors.WithStack( + r.Client.List(ctx, snapshots, + client.InNamespace(cluster.Namespace), + client.MatchingLabelsSelector{Selector: selectSnapshots}, + )) + assert.NilError(t, err) + assert.Equal(t, len(snapshots.Items), 1) + assert.Equal(t, snapshots.Items[0].Annotations[naming.PGBackRestBackupJobCompletion], + "another-backup-timestamp") + }) +} + +func TestReconcileDedicatedSnapshotVolume(t *testing.T) { + ctx := context.Background() + cfg, cc := setupKubernetes(t) + discoveryClient, err := discovery.NewDiscoveryClientForConfig(cfg) + assert.NilError(t, err) + + recorder := events.NewRecorder(t, runtime.Scheme) + r := &Reconciler{ + Client: cc, + Owner: client.FieldOwner(t.Name()), + DiscoveryClient: discoveryClient, + Recorder: recorder, + } + + // Enable snapshots feature gate + gate := feature.NewGate() + assert.NilError(t, gate.SetFromMap(map[string]bool{ + feature.VolumeSnapshots: true, + })) + ctx = feature.NewContext(ctx, gate) + + t.Run("SnapshotsDisabledDeletePvc", func(t *testing.T) { + // Create cluster without snapshots spec + ns := setupNamespace(t, cc) + cluster := testCluster() + cluster.Namespace = ns.Name + cluster.ObjectMeta.UID = "the-uid-123" + assert.NilError(t, r.Client.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + + // Create a dedicated snapshot volume + pvc := &corev1.PersistentVolumeClaim{ + TypeMeta: metav1.TypeMeta{ + Kind: "PersistentVolumeClaim", + APIVersion: corev1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "dedicated-snapshot-volume", + Namespace: ns.Name, + Labels: map[string]string{ + naming.LabelCluster: cluster.Name, + naming.LabelRole: naming.RoleSnapshot, + naming.LabelData: naming.DataPostgres, + }, + }, + Spec: testVolumeClaimSpec(), + } + err = errors.WithStack(r.setControllerReference(cluster, pvc)) + assert.NilError(t, err) + err = r.apply(ctx, pvc) + assert.NilError(t, err) + + // Assert that the pvc was created + selectPvcs, err := naming.AsSelector(naming.Cluster(cluster.Name)) + assert.NilError(t, err) + pvcs := &corev1.PersistentVolumeClaimList{} + err = errors.WithStack( + r.Client.List(ctx, pvcs, + client.InNamespace(cluster.Namespace), + client.MatchingLabelsSelector{Selector: selectPvcs}, + )) + assert.NilError(t, err) + assert.Equal(t, len(pvcs.Items), 1) + + // Create volumes for reconcile + clusterVolumes := []corev1.PersistentVolumeClaim{*pvc} + + // Reconcile + returned, err := r.reconcileDedicatedSnapshotVolume(ctx, cluster, clusterVolumes) + assert.NilError(t, err) + assert.Check(t, returned == nil) + + // Assert that the pvc has been deleted or marked for deletion + key, fetched := client.ObjectKeyFromObject(pvc), &corev1.PersistentVolumeClaim{} + if err := r.Client.Get(ctx, key, fetched); err == nil { + assert.Assert(t, fetched.DeletionTimestamp != nil, "expected deleted") + } else { + assert.Assert(t, apierrors.IsNotFound(err), "expected NotFound, got %v", err) + } + }) + + t.Run("SnapshotsEnabledCreatePvcNoBackupNoRestore", func(t *testing.T) { + // Create cluster with snapshots enabled + ns := setupNamespace(t, cc) + cluster := testCluster() + cluster.Namespace = ns.Name + cluster.ObjectMeta.UID = "the-uid-123" + cluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ + VolumeSnapshotClassName: "my-snapshotclass", + } + assert.NilError(t, r.Client.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + + // Create volumes for reconcile + clusterVolumes := []corev1.PersistentVolumeClaim{} + + // Reconcile + pvc, err := r.reconcileDedicatedSnapshotVolume(ctx, cluster, clusterVolumes) + assert.NilError(t, err) + assert.Assert(t, pvc != nil) + + // Assert pvc was created + selectPvcs, err := naming.AsSelector(naming.Cluster(cluster.Name)) + assert.NilError(t, err) + pvcs := &corev1.PersistentVolumeClaimList{} + err = errors.WithStack( + r.Client.List(ctx, pvcs, + client.InNamespace(cluster.Namespace), + client.MatchingLabelsSelector{Selector: selectPvcs}, + )) + assert.NilError(t, err) + assert.Equal(t, len(pvcs.Items), 1) + }) + + t.Run("SnapshotsEnabledBackupExistsCreateRestore", func(t *testing.T) { + // Create cluster with snapshots enabled + ns := setupNamespace(t, cc) + cluster := testCluster() + cluster.Namespace = ns.Name + cluster.ObjectMeta.UID = "the-uid-123" + cluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ + VolumeSnapshotClassName: "my-snapshotclass", + } + assert.NilError(t, r.Client.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + + // Create successful backup job + backupJob := testBackupJob(cluster) + err = errors.WithStack(r.setControllerReference(cluster, backupJob)) + assert.NilError(t, err) + err = r.apply(ctx, backupJob) + assert.NilError(t, err) + + currentTime := metav1.Now() + backupJob.Status = batchv1.JobStatus{ + Succeeded: 1, + CompletionTime: ¤tTime, + } + err = r.Client.Status().Update(ctx, backupJob) + assert.NilError(t, err) + + // Create instance set and volumes for reconcile + sts := &appsv1.StatefulSet{} + generateInstanceStatefulSetIntent(ctx, cluster, &cluster.Spec.InstanceSets[0], "pod-service", "service-account", sts, 1) + clusterVolumes := []corev1.PersistentVolumeClaim{} + + // Reconcile + pvc, err := r.reconcileDedicatedSnapshotVolume(ctx, cluster, clusterVolumes) + assert.NilError(t, err) + assert.Assert(t, pvc != nil) + + // Assert restore job with annotation was created + restoreJobs := &batchv1.JobList{} + selectJobs, err := naming.AsSelector(naming.ClusterRestoreJobs(cluster.Name)) + assert.NilError(t, err) + err = errors.WithStack( + r.Client.List(ctx, restoreJobs, + client.InNamespace(cluster.Namespace), + client.MatchingLabelsSelector{Selector: selectJobs}, + )) + assert.NilError(t, err) + assert.Equal(t, len(restoreJobs.Items), 1) + assert.Assert(t, restoreJobs.Items[0].Annotations[naming.PGBackRestBackupJobCompletion] != "") + }) + + t.Run("SnapshotsEnabledSuccessfulRestoreExists", func(t *testing.T) { + // Create cluster with snapshots enabled + ns := setupNamespace(t, cc) + cluster := testCluster() + cluster.Namespace = ns.Name + cluster.ObjectMeta.UID = "the-uid-123" + cluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ + VolumeSnapshotClassName: "my-snapshotclass", + } + assert.NilError(t, r.Client.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + + // Create times for jobs + currentTime := metav1.Now() + earlierTime := metav1.NewTime(currentTime.AddDate(-1, 0, 0)) + + // Create successful backup job + backupJob := testBackupJob(cluster) + err = errors.WithStack(r.setControllerReference(cluster, backupJob)) + assert.NilError(t, err) + err = r.apply(ctx, backupJob) + assert.NilError(t, err) + + backupJob.Status = batchv1.JobStatus{ + Succeeded: 1, + CompletionTime: &earlierTime, + } + err = r.Client.Status().Update(ctx, backupJob) + assert.NilError(t, err) + + // Create successful restore job + restoreJob := testRestoreJob(cluster) + restoreJob.Annotations = map[string]string{ + naming.PGBackRestBackupJobCompletion: backupJob.Status.CompletionTime.Format(time.RFC3339), + } + err = errors.WithStack(r.setControllerReference(cluster, restoreJob)) + assert.NilError(t, err) + err = r.apply(ctx, restoreJob) + assert.NilError(t, err) + + restoreJob.Status = batchv1.JobStatus{ + Succeeded: 1, + CompletionTime: ¤tTime, + } + err = r.Client.Status().Update(ctx, restoreJob) + assert.NilError(t, err) + + // Create instance set and volumes for reconcile + sts := &appsv1.StatefulSet{} + generateInstanceStatefulSetIntent(ctx, cluster, &cluster.Spec.InstanceSets[0], "pod-service", "service-account", sts, 1) + clusterVolumes := []corev1.PersistentVolumeClaim{} + + // Reconcile + pvc, err := r.reconcileDedicatedSnapshotVolume(ctx, cluster, clusterVolumes) + assert.NilError(t, err) + assert.Assert(t, pvc != nil) + + // Assert restore job was deleted + restoreJobs := &batchv1.JobList{} + selectJobs, err := naming.AsSelector(naming.ClusterRestoreJobs(cluster.Name)) + assert.NilError(t, err) + err = errors.WithStack( + r.Client.List(ctx, restoreJobs, + client.InNamespace(cluster.Namespace), + client.MatchingLabelsSelector{Selector: selectJobs}, + )) + assert.NilError(t, err) + assert.Equal(t, len(restoreJobs.Items), 0) + + // Assert pvc was annotated + assert.Equal(t, pvc.GetAnnotations()[naming.PGBackRestBackupJobCompletion], backupJob.Status.CompletionTime.Format(time.RFC3339)) + }) + + t.Run("SnapshotsEnabledFailedRestoreExists", func(t *testing.T) { + // Create cluster with snapshots enabled + ns := setupNamespace(t, cc) + cluster := testCluster() + cluster.Namespace = ns.Name + cluster.ObjectMeta.UID = "the-uid-123" + cluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ + VolumeSnapshotClassName: "my-snapshotclass", + } + assert.NilError(t, r.Client.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + + // Create times for jobs + currentTime := metav1.Now() + earlierTime := metav1.NewTime(currentTime.AddDate(-1, 0, 0)) + + // Create successful backup job + backupJob := testBackupJob(cluster) + err = errors.WithStack(r.setControllerReference(cluster, backupJob)) + assert.NilError(t, err) + err = r.apply(ctx, backupJob) + assert.NilError(t, err) + + backupJob.Status = batchv1.JobStatus{ + Succeeded: 1, + CompletionTime: &earlierTime, + } + err = r.Client.Status().Update(ctx, backupJob) + assert.NilError(t, err) + + // Create failed restore job + restoreJob := testRestoreJob(cluster) + restoreJob.Annotations = map[string]string{ + naming.PGBackRestBackupJobCompletion: backupJob.Status.CompletionTime.Format(time.RFC3339), + } + err = errors.WithStack(r.setControllerReference(cluster, restoreJob)) + assert.NilError(t, err) + err = r.apply(ctx, restoreJob) + assert.NilError(t, err) + + restoreJob.Status = batchv1.JobStatus{ + Succeeded: 0, + Failed: 1, + CompletionTime: ¤tTime, + } + err = r.Client.Status().Update(ctx, restoreJob) + assert.NilError(t, err) + + // Setup instances and volumes for reconcile + sts := &appsv1.StatefulSet{} + generateInstanceStatefulSetIntent(ctx, cluster, &cluster.Spec.InstanceSets[0], "pod-service", "service-account", sts, 1) + clusterVolumes := []corev1.PersistentVolumeClaim{} + + // Reconcile + pvc, err := r.reconcileDedicatedSnapshotVolume(ctx, cluster, clusterVolumes) + assert.NilError(t, err) + assert.Assert(t, pvc != nil) + + // Assert warning event was created and has expected attributes + if assert.Check(t, len(recorder.Events) > 0) { + assert.Equal(t, recorder.Events[0].Type, "Warning") + assert.Equal(t, recorder.Events[0].Regarding.Kind, "PostgresCluster") + assert.Equal(t, recorder.Events[0].Regarding.Name, "hippo") + assert.Equal(t, recorder.Events[0].Reason, "DedicatedSnapshotVolumeRestoreJobError") + assert.Assert(t, cmp.Contains(recorder.Events[0].Note, "restore job failed, check the logs")) + } + }) +} + +func TestCreateDedicatedSnapshotVolume(t *testing.T) { + ctx := context.Background() + _, cc := setupKubernetes(t) + + r := &Reconciler{ + Client: cc, + Owner: client.FieldOwner(t.Name()), + } + + ns := setupNamespace(t, cc) + cluster := testCluster() + cluster.Namespace = ns.Name + cluster.ObjectMeta.UID = "the-uid-123" + + labelMap := map[string]string{ + naming.LabelCluster: cluster.Name, + naming.LabelRole: naming.RoleSnapshot, + naming.LabelData: naming.DataPostgres, + } + pvc := &corev1.PersistentVolumeClaim{ObjectMeta: naming.ClusterDedicatedSnapshotVolume(cluster)} + pvc.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("PersistentVolumeClaim")) + + pvc, err := r.createDedicatedSnapshotVolume(ctx, cluster, labelMap, pvc) + assert.NilError(t, err) + assert.Assert(t, metav1.IsControlledBy(pvc, cluster)) + assert.Equal(t, pvc.Spec.Resources.Requests[corev1.ResourceStorage], resource.MustParse("1Gi")) +} + +func TestDedicatedSnapshotVolumeRestore(t *testing.T) { + ctx := context.Background() + _, cc := setupKubernetes(t) + + r := &Reconciler{ + Client: cc, + Owner: client.FieldOwner(t.Name()), + } + + ns := setupNamespace(t, cc) + cluster := testCluster() + cluster.Namespace = ns.Name + cluster.ObjectMeta.UID = "the-uid-123" + + pvc := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dedicated-snapshot-volume", + }, + } + + sts := &appsv1.StatefulSet{} + generateInstanceStatefulSetIntent(ctx, cluster, &cluster.Spec.InstanceSets[0], "pod-service", "service-account", sts, 1) + currentTime := metav1.Now() + backupJob := testBackupJob(cluster) + backupJob.Status.CompletionTime = ¤tTime + + err := r.dedicatedSnapshotVolumeRestore(ctx, cluster, pvc, backupJob) + assert.NilError(t, err) + + // Assert a restore job was created that has the correct annotation + jobs := &batchv1.JobList{} + selectJobs, err := naming.AsSelector(naming.ClusterRestoreJobs(cluster.Name)) + assert.NilError(t, err) + err = errors.WithStack( + r.Client.List(ctx, jobs, + client.InNamespace(cluster.Namespace), + client.MatchingLabelsSelector{Selector: selectJobs}, + )) + assert.NilError(t, err) + assert.Equal(t, len(jobs.Items), 1) + assert.Equal(t, jobs.Items[0].Annotations[naming.PGBackRestBackupJobCompletion], + backupJob.Status.CompletionTime.Format(time.RFC3339)) +} + +func TestGenerateSnapshotOfDedicatedSnapshotVolume(t *testing.T) { + _, cc := setupKubernetes(t) + require.ParallelCapacity(t, 1) + + r := &Reconciler{ + Client: cc, + Owner: client.FieldOwner(t.Name()), + } + ns := setupNamespace(t, cc) + + cluster := testCluster() + cluster.Namespace = ns.Name + cluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ + VolumeSnapshotClassName: "my-snapshot", + } + + pvc := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + naming.PGBackRestBackupJobCompletion: "backup-completion-timestamp", + }, + Name: "dedicated-snapshot-volume", + }, + } + + snapshot, err := r.generateSnapshotOfDedicatedSnapshotVolume(cluster, pvc) + assert.NilError(t, err) + assert.Equal(t, snapshot.GetAnnotations()[naming.PGBackRestBackupJobCompletion], + "backup-completion-timestamp") +} + +func TestGenerateVolumeSnapshot(t *testing.T) { + _, cc := setupKubernetes(t) + require.ParallelCapacity(t, 1) + + r := &Reconciler{ + Client: cc, + Owner: client.FieldOwner(t.Name()), + } + ns := setupNamespace(t, cc) + + cluster := testCluster() + cluster.Namespace = ns.Name + + pvc := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dedicated-snapshot-volume", + }, + } + volumeSnapshotClassName := "my-snapshot" + + snapshot, err := r.generateVolumeSnapshot(cluster, *pvc, volumeSnapshotClassName) + assert.NilError(t, err) + assert.Equal(t, *snapshot.Spec.VolumeSnapshotClassName, "my-snapshot") + assert.Equal(t, *snapshot.Spec.Source.PersistentVolumeClaimName, "dedicated-snapshot-volume") + assert.Equal(t, snapshot.Labels[naming.LabelCluster], "hippo") + assert.Equal(t, snapshot.ObjectMeta.OwnerReferences[0].Name, "hippo") +} + +func TestGetDedicatedSnapshotVolumeRestoreJob(t *testing.T) { + ctx := context.Background() + _, cc := setupKubernetes(t) + require.ParallelCapacity(t, 1) + + r := &Reconciler{ + Client: cc, + Owner: client.FieldOwner(t.Name()), + } + ns := setupNamespace(t, cc) + + cluster := testCluster() + cluster.Namespace = ns.Name + + t.Run("NoRestoreJobs", func(t *testing.T) { + dsvRestoreJob, err := r.getDedicatedSnapshotVolumeRestoreJob(ctx, cluster) + assert.NilError(t, err) + assert.Check(t, dsvRestoreJob == nil) + }) + + t.Run("NoDsvRestoreJobs", func(t *testing.T) { + job1 := testRestoreJob(cluster) + job1.Namespace = ns.Name + + err := r.apply(ctx, job1) + assert.NilError(t, err) + + dsvRestoreJob, err := r.getDedicatedSnapshotVolumeRestoreJob(ctx, cluster) + assert.NilError(t, err) + assert.Check(t, dsvRestoreJob == nil) + }) + + t.Run("DsvRestoreJobExists", func(t *testing.T) { + job2 := testRestoreJob(cluster) + job2.Name = "restore-job-2" + job2.Namespace = ns.Name + job2.Annotations = map[string]string{ + naming.PGBackRestBackupJobCompletion: "backup-timestamp", + } + + err := r.apply(ctx, job2) + assert.NilError(t, err) + + job3 := testRestoreJob(cluster) + job3.Name = "restore-job-3" + job3.Namespace = ns.Name + + err = r.apply(ctx, job3) + assert.NilError(t, err) + + dsvRestoreJob, err := r.getDedicatedSnapshotVolumeRestoreJob(ctx, cluster) + assert.NilError(t, err) + assert.Assert(t, dsvRestoreJob != nil) + assert.Equal(t, dsvRestoreJob.Name, "restore-job-2") + }) +} + +func TestGetLatestCompleteBackupJob(t *testing.T) { + ctx := context.Background() + _, cc := setupKubernetes(t) + // require.ParallelCapacity(t, 1) + + r := &Reconciler{ + Client: cc, + Owner: client.FieldOwner(t.Name()), + } + ns := setupNamespace(t, cc) + + cluster := testCluster() + cluster.Namespace = ns.Name + + t.Run("NoJobs", func(t *testing.T) { + latestCompleteBackupJob, err := r.getLatestCompleteBackupJob(ctx, cluster) + assert.NilError(t, err) + assert.Check(t, latestCompleteBackupJob == nil) + }) + + t.Run("NoCompleteJobs", func(t *testing.T) { + job1 := testBackupJob(cluster) + job1.Namespace = ns.Name + + err := r.apply(ctx, job1) + assert.NilError(t, err) + + latestCompleteBackupJob, err := r.getLatestCompleteBackupJob(ctx, cluster) + assert.NilError(t, err) + assert.Check(t, latestCompleteBackupJob == nil) + }) + + t.Run("OneCompleteBackupJob", func(t *testing.T) { + currentTime := metav1.Now() + + job1 := testBackupJob(cluster) + job1.Namespace = ns.Name + + err := r.apply(ctx, job1) + assert.NilError(t, err) + + job2 := testBackupJob(cluster) + job2.Namespace = ns.Name + job2.Name = "backup-job-2" + + err = r.apply(ctx, job2) + assert.NilError(t, err) + + // Get job1 and update Status. + err = r.Client.Get(ctx, client.ObjectKeyFromObject(job1), job1) + assert.NilError(t, err) + + job1.Status = batchv1.JobStatus{ + Succeeded: 1, + CompletionTime: ¤tTime, + } + err = r.Client.Status().Update(ctx, job1) + assert.NilError(t, err) + + latestCompleteBackupJob, err := r.getLatestCompleteBackupJob(ctx, cluster) + assert.NilError(t, err) + assert.Check(t, latestCompleteBackupJob.Name == "backup-job-1") + }) + + t.Run("TwoCompleteBackupJobs", func(t *testing.T) { + currentTime := metav1.Now() + earlierTime := metav1.NewTime(currentTime.AddDate(-1, 0, 0)) + assert.Check(t, earlierTime.Before(¤tTime)) + + job1 := testBackupJob(cluster) + job1.Namespace = ns.Name + + err := r.apply(ctx, job1) + assert.NilError(t, err) + + job2 := testBackupJob(cluster) + job2.Namespace = ns.Name + job2.Name = "backup-job-2" + + err = r.apply(ctx, job2) + assert.NilError(t, err) + + // Get job1 and update Status. + err = r.Client.Get(ctx, client.ObjectKeyFromObject(job1), job1) + assert.NilError(t, err) + + job1.Status = batchv1.JobStatus{ + Succeeded: 1, + CompletionTime: ¤tTime, + } + err = r.Client.Status().Update(ctx, job1) + assert.NilError(t, err) + + // Get job2 and update Status. + err = r.Client.Get(ctx, client.ObjectKeyFromObject(job2), job2) + assert.NilError(t, err) + + job2.Status = batchv1.JobStatus{ + Succeeded: 1, + CompletionTime: &earlierTime, + } + err = r.Client.Status().Update(ctx, job2) + assert.NilError(t, err) + + latestCompleteBackupJob, err := r.getLatestCompleteBackupJob(ctx, cluster) + assert.NilError(t, err) + assert.Check(t, latestCompleteBackupJob.Name == "backup-job-1") + }) +} + +func TestGetSnapshotWithLatestError(t *testing.T) { + t.Run("NoSnapshots", func(t *testing.T) { + snapshotList := &volumesnapshotv1.VolumeSnapshotList{} + snapshotWithLatestError := getSnapshotWithLatestError(snapshotList) + assert.Check(t, snapshotWithLatestError == nil) + }) + + t.Run("NoSnapshotsWithStatus", func(t *testing.T) { + snapshotList := &volumesnapshotv1.VolumeSnapshotList{ + Items: []volumesnapshotv1.VolumeSnapshot{ + {}, + {}, + }, + } + snapshotWithLatestError := getSnapshotWithLatestError(snapshotList) + assert.Check(t, snapshotWithLatestError == nil) + }) + + t.Run("NoSnapshotsWithErrors", func(t *testing.T) { + snapshotList := &volumesnapshotv1.VolumeSnapshotList{ + Items: []volumesnapshotv1.VolumeSnapshot{ + { + Status: &volumesnapshotv1.VolumeSnapshotStatus{ + ReadyToUse: initialize.Bool(true), + }, + }, + { + Status: &volumesnapshotv1.VolumeSnapshotStatus{ + ReadyToUse: initialize.Bool(false), + }, + }, + }, + } + snapshotWithLatestError := getSnapshotWithLatestError(snapshotList) + assert.Check(t, snapshotWithLatestError == nil) + }) + + t.Run("OneSnapshotWithError", func(t *testing.T) { + currentTime := metav1.Now() + earlierTime := metav1.NewTime(currentTime.AddDate(-1, 0, 0)) + snapshotList := &volumesnapshotv1.VolumeSnapshotList{ + Items: []volumesnapshotv1.VolumeSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "good-snapshot", + UID: "the-uid-123", + }, + Status: &volumesnapshotv1.VolumeSnapshotStatus{ + CreationTime: ¤tTime, + ReadyToUse: initialize.Bool(true), + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "bad-snapshot", + UID: "the-uid-456", + }, + Status: &volumesnapshotv1.VolumeSnapshotStatus{ + ReadyToUse: initialize.Bool(false), + Error: &volumesnapshotv1.VolumeSnapshotError{ + Time: &earlierTime, + }, + }, + }, + }, + } + snapshotWithLatestError := getSnapshotWithLatestError(snapshotList) + assert.Equal(t, snapshotWithLatestError.ObjectMeta.Name, "bad-snapshot") + }) + + t.Run("TwoSnapshotsWithErrors", func(t *testing.T) { + currentTime := metav1.Now() + earlierTime := metav1.NewTime(currentTime.AddDate(-1, 0, 0)) + snapshotList := &volumesnapshotv1.VolumeSnapshotList{ + Items: []volumesnapshotv1.VolumeSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "first-bad-snapshot", + UID: "the-uid-123", + }, + Status: &volumesnapshotv1.VolumeSnapshotStatus{ + ReadyToUse: initialize.Bool(false), + Error: &volumesnapshotv1.VolumeSnapshotError{ + Time: &earlierTime, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "second-bad-snapshot", + UID: "the-uid-456", + }, + Status: &volumesnapshotv1.VolumeSnapshotStatus{ + ReadyToUse: initialize.Bool(false), + Error: &volumesnapshotv1.VolumeSnapshotError{ + Time: ¤tTime, + }, + }, + }, + }, + } + snapshotWithLatestError := getSnapshotWithLatestError(snapshotList) + assert.Equal(t, snapshotWithLatestError.ObjectMeta.Name, "second-bad-snapshot") + }) +} + +func TestGetSnapshotsForCluster(t *testing.T) { + ctx := context.Background() + _, cc := setupKubernetes(t) + require.ParallelCapacity(t, 1) + + r := &Reconciler{ + Client: cc, + Owner: client.FieldOwner(t.Name()), + } + ns := setupNamespace(t, cc) + + cluster := testCluster() + cluster.Namespace = ns.Name + + t.Run("NoSnapshots", func(t *testing.T) { + snapshots, err := r.getSnapshotsForCluster(ctx, cluster) + assert.NilError(t, err) + assert.Equal(t, len(snapshots.Items), 0) + }) + + t.Run("NoSnapshotsForCluster", func(t *testing.T) { + snapshot := &volumesnapshotv1.VolumeSnapshot{ + TypeMeta: metav1.TypeMeta{ + APIVersion: volumesnapshotv1.SchemeGroupVersion.String(), + Kind: "VolumeSnapshot", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "some-snapshot", + Namespace: ns.Name, + Labels: map[string]string{ + naming.LabelCluster: "rhino", + }, + }, + } + snapshot.Spec.Source.PersistentVolumeClaimName = initialize.String("some-pvc-name") + snapshot.Spec.VolumeSnapshotClassName = initialize.String("some-class-name") + err := r.apply(ctx, snapshot) + assert.NilError(t, err) + + snapshots, err := r.getSnapshotsForCluster(ctx, cluster) + assert.NilError(t, err) + assert.Equal(t, len(snapshots.Items), 0) + }) + + t.Run("OneSnapshotForCluster", func(t *testing.T) { + snapshot1 := &volumesnapshotv1.VolumeSnapshot{ + TypeMeta: metav1.TypeMeta{ + APIVersion: volumesnapshotv1.SchemeGroupVersion.String(), + Kind: "VolumeSnapshot", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "some-snapshot", + Namespace: ns.Name, + Labels: map[string]string{ + naming.LabelCluster: "rhino", + }, + }, + } + snapshot1.Spec.Source.PersistentVolumeClaimName = initialize.String("some-pvc-name") + snapshot1.Spec.VolumeSnapshotClassName = initialize.String("some-class-name") + err := r.apply(ctx, snapshot1) + assert.NilError(t, err) + + snapshot2 := &volumesnapshotv1.VolumeSnapshot{ + TypeMeta: metav1.TypeMeta{ + APIVersion: volumesnapshotv1.SchemeGroupVersion.String(), + Kind: "VolumeSnapshot", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "another-snapshot", + Namespace: ns.Name, + Labels: map[string]string{ + naming.LabelCluster: "hippo", + }, + }, + } + snapshot2.Spec.Source.PersistentVolumeClaimName = initialize.String("another-pvc-name") + snapshot2.Spec.VolumeSnapshotClassName = initialize.String("another-class-name") + err = r.apply(ctx, snapshot2) + assert.NilError(t, err) + + snapshots, err := r.getSnapshotsForCluster(ctx, cluster) + assert.NilError(t, err) + assert.Equal(t, len(snapshots.Items), 1) + assert.Equal(t, snapshots.Items[0].Name, "another-snapshot") + }) + + t.Run("TwoSnapshotsForCluster", func(t *testing.T) { + snapshot1 := &volumesnapshotv1.VolumeSnapshot{ + TypeMeta: metav1.TypeMeta{ + APIVersion: volumesnapshotv1.SchemeGroupVersion.String(), + Kind: "VolumeSnapshot", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "some-snapshot", + Namespace: ns.Name, + Labels: map[string]string{ + naming.LabelCluster: "hippo", + }, + }, + } + snapshot1.Spec.Source.PersistentVolumeClaimName = initialize.String("some-pvc-name") + snapshot1.Spec.VolumeSnapshotClassName = initialize.String("some-class-name") + err := r.apply(ctx, snapshot1) + assert.NilError(t, err) + + snapshot2 := &volumesnapshotv1.VolumeSnapshot{ + TypeMeta: metav1.TypeMeta{ + APIVersion: volumesnapshotv1.SchemeGroupVersion.String(), + Kind: "VolumeSnapshot", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "another-snapshot", + Namespace: ns.Name, + Labels: map[string]string{ + naming.LabelCluster: "hippo", + }, + }, + } + snapshot2.Spec.Source.PersistentVolumeClaimName = initialize.String("another-pvc-name") + snapshot2.Spec.VolumeSnapshotClassName = initialize.String("another-class-name") + err = r.apply(ctx, snapshot2) + assert.NilError(t, err) + + snapshots, err := r.getSnapshotsForCluster(ctx, cluster) + assert.NilError(t, err) + assert.Equal(t, len(snapshots.Items), 2) + }) +} + +func TestGetLatestReadySnapshot(t *testing.T) { + t.Run("NoSnapshots", func(t *testing.T) { + snapshotList := &volumesnapshotv1.VolumeSnapshotList{} + latestReadySnapshot := getLatestReadySnapshot(snapshotList) + assert.Assert(t, latestReadySnapshot == nil) + }) + + t.Run("NoSnapshotsWithStatus", func(t *testing.T) { + snapshotList := &volumesnapshotv1.VolumeSnapshotList{ + Items: []volumesnapshotv1.VolumeSnapshot{ + {}, + {}, + }, + } + latestReadySnapshot := getLatestReadySnapshot(snapshotList) + assert.Assert(t, latestReadySnapshot == nil) + }) + + t.Run("NoReadySnapshots", func(t *testing.T) { + snapshotList := &volumesnapshotv1.VolumeSnapshotList{ + Items: []volumesnapshotv1.VolumeSnapshot{ + { + Status: &volumesnapshotv1.VolumeSnapshotStatus{ + ReadyToUse: initialize.Bool(false), + }, + }, + { + Status: &volumesnapshotv1.VolumeSnapshotStatus{ + ReadyToUse: initialize.Bool(false), + }, + }, + }, + } + latestReadySnapshot := getLatestReadySnapshot(snapshotList) + assert.Assert(t, latestReadySnapshot == nil) + }) + + t.Run("OneReadySnapshot", func(t *testing.T) { + currentTime := metav1.Now() + earlierTime := metav1.NewTime(currentTime.AddDate(-1, 0, 0)) + snapshotList := &volumesnapshotv1.VolumeSnapshotList{ + Items: []volumesnapshotv1.VolumeSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "good-snapshot", + UID: "the-uid-123", + }, + Status: &volumesnapshotv1.VolumeSnapshotStatus{ + CreationTime: &earlierTime, + ReadyToUse: initialize.Bool(true), + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "bad-snapshot", + UID: "the-uid-456", + }, + Status: &volumesnapshotv1.VolumeSnapshotStatus{ + CreationTime: ¤tTime, + ReadyToUse: initialize.Bool(false), + }, + }, + }, + } + latestReadySnapshot := getLatestReadySnapshot(snapshotList) + assert.Equal(t, latestReadySnapshot.ObjectMeta.Name, "good-snapshot") + }) + + t.Run("TwoReadySnapshots", func(t *testing.T) { + currentTime := metav1.Now() + earlierTime := metav1.NewTime(currentTime.AddDate(-1, 0, 0)) + snapshotList := &volumesnapshotv1.VolumeSnapshotList{ + Items: []volumesnapshotv1.VolumeSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "first-good-snapshot", + UID: "the-uid-123", + }, + Status: &volumesnapshotv1.VolumeSnapshotStatus{ + CreationTime: &earlierTime, + ReadyToUse: initialize.Bool(true), + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "second-good-snapshot", + UID: "the-uid-456", + }, + Status: &volumesnapshotv1.VolumeSnapshotStatus{ + CreationTime: ¤tTime, + ReadyToUse: initialize.Bool(true), + }, + }, + }, + } + latestReadySnapshot := getLatestReadySnapshot(snapshotList) + assert.Equal(t, latestReadySnapshot.ObjectMeta.Name, "second-good-snapshot") + }) +} + +func TestDeleteSnapshots(t *testing.T) { + ctx := context.Background() + cfg, cc := setupKubernetes(t) + discoveryClient, err := discovery.NewDiscoveryClientForConfig(cfg) + assert.NilError(t, err) + + r := &Reconciler{ + Client: cc, + Owner: client.FieldOwner(t.Name()), + DiscoveryClient: discoveryClient, + } + ns := setupNamespace(t, cc) + + cluster := testCluster() + cluster.Namespace = ns.Name + cluster.ObjectMeta.UID = "the-uid-123" + assert.NilError(t, r.Client.Create(ctx, cluster)) + + rhinoCluster := testCluster() + rhinoCluster.Name = "rhino" + rhinoCluster.Namespace = ns.Name + rhinoCluster.ObjectMeta.UID = "the-uid-456" + assert.NilError(t, r.Client.Create(ctx, rhinoCluster)) + + t.Cleanup(func() { + assert.Check(t, r.Client.Delete(ctx, cluster)) + assert.Check(t, r.Client.Delete(ctx, rhinoCluster)) + }) + + t.Run("NoSnapshots", func(t *testing.T) { + snapshotList := &volumesnapshotv1.VolumeSnapshotList{} + err := r.deleteSnapshots(ctx, cluster, snapshotList) + assert.NilError(t, err) + }) + + t.Run("NoSnapshotsControlledByHippo", func(t *testing.T) { + pvcName := initialize.String("dedicated-snapshot-volume") + snapshot1 := &volumesnapshotv1.VolumeSnapshot{ + TypeMeta: metav1.TypeMeta{ + APIVersion: volumesnapshotv1.SchemeGroupVersion.String(), + Kind: "VolumeSnapshot", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "first-snapshot", + Namespace: ns.Name, + }, + Spec: volumesnapshotv1.VolumeSnapshotSpec{ + Source: volumesnapshotv1.VolumeSnapshotSource{ + PersistentVolumeClaimName: pvcName, + }, + }, + } + err := errors.WithStack(r.setControllerReference(rhinoCluster, snapshot1)) + assert.NilError(t, err) + err = r.apply(ctx, snapshot1) + assert.NilError(t, err) + + snapshotList := &volumesnapshotv1.VolumeSnapshotList{ + Items: []volumesnapshotv1.VolumeSnapshot{ + *snapshot1, + }, + } + err = r.deleteSnapshots(ctx, cluster, snapshotList) + assert.NilError(t, err) + existingSnapshots := &volumesnapshotv1.VolumeSnapshotList{} + err = errors.WithStack( + r.Client.List(ctx, existingSnapshots, + client.InNamespace(ns.Namespace), + )) + assert.NilError(t, err) + assert.Equal(t, len(existingSnapshots.Items), 1) + }) + + t.Run("OneSnapshotControlledByHippo", func(t *testing.T) { + pvcName := initialize.String("dedicated-snapshot-volume") + snapshot1 := &volumesnapshotv1.VolumeSnapshot{ + TypeMeta: metav1.TypeMeta{ + APIVersion: volumesnapshotv1.SchemeGroupVersion.String(), + Kind: "VolumeSnapshot", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "first-snapshot", + Namespace: ns.Name, + }, + Spec: volumesnapshotv1.VolumeSnapshotSpec{ + Source: volumesnapshotv1.VolumeSnapshotSource{ + PersistentVolumeClaimName: pvcName, + }, + }, + } + err := errors.WithStack(r.setControllerReference(rhinoCluster, snapshot1)) + assert.NilError(t, err) + err = r.apply(ctx, snapshot1) + assert.NilError(t, err) + + snapshot2 := &volumesnapshotv1.VolumeSnapshot{ + TypeMeta: metav1.TypeMeta{ + APIVersion: volumesnapshotv1.SchemeGroupVersion.String(), + Kind: "VolumeSnapshot", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "second-snapshot", + Namespace: ns.Name, + }, + Spec: volumesnapshotv1.VolumeSnapshotSpec{ + Source: volumesnapshotv1.VolumeSnapshotSource{ + PersistentVolumeClaimName: pvcName, + }, + }, + } + err = errors.WithStack(r.setControllerReference(cluster, snapshot2)) + assert.NilError(t, err) + err = r.apply(ctx, snapshot2) + assert.NilError(t, err) + + snapshotList := &volumesnapshotv1.VolumeSnapshotList{ + Items: []volumesnapshotv1.VolumeSnapshot{ + *snapshot1, *snapshot2, + }, + } + err = r.deleteSnapshots(ctx, cluster, snapshotList) + assert.NilError(t, err) + existingSnapshots := &volumesnapshotv1.VolumeSnapshotList{} + err = errors.WithStack( + r.Client.List(ctx, existingSnapshots, + client.InNamespace(ns.Namespace), + )) + assert.NilError(t, err) + assert.Equal(t, len(existingSnapshots.Items), 1) + assert.Equal(t, existingSnapshots.Items[0].Name, "first-snapshot") + }) +} + +func TestClusterUsingTablespaces(t *testing.T) { + ctx := context.Background() + cluster := testCluster() + + t.Run("NoVolumesFeatureEnabled", func(t *testing.T) { + // Enable Tablespaces feature gate + gate := feature.NewGate() + assert.NilError(t, gate.SetFromMap(map[string]bool{ + feature.TablespaceVolumes: true, + })) + ctx := feature.NewContext(ctx, gate) + + assert.Assert(t, !clusterUsingTablespaces(ctx, cluster)) + }) + + t.Run("VolumesInPlaceFeatureDisabled", func(t *testing.T) { + cluster.Spec.InstanceSets[0].TablespaceVolumes = []v1beta1.TablespaceVolume{{ + Name: "volume-1", + }} + + assert.Assert(t, !clusterUsingTablespaces(ctx, cluster)) + }) + + t.Run("VolumesInPlaceAndFeatureEnabled", func(t *testing.T) { + // Enable Tablespaces feature gate + gate := feature.NewGate() + assert.NilError(t, gate.SetFromMap(map[string]bool{ + feature.TablespaceVolumes: true, + })) + ctx := feature.NewContext(ctx, gate) + + assert.Assert(t, clusterUsingTablespaces(ctx, cluster)) + }) +} diff --git a/internal/controller/postgrescluster/suite_test.go b/internal/controller/postgrescluster/suite_test.go index a2d59effa9..2a0e3d76ec 100644 --- a/internal/controller/postgrescluster/suite_test.go +++ b/internal/controller/postgrescluster/suite_test.go @@ -1,34 +1,20 @@ -//go:build envtest -// +build envtest - -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster import ( "context" + "os" "path/filepath" + "strings" "testing" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/version" "k8s.io/client-go/discovery" - "k8s.io/client-go/kubernetes/scheme" // Google Kubernetes Engine / Google Cloud Platform authentication provider _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" @@ -38,14 +24,13 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/manager" + "github.com/crunchydata/postgres-operator/internal/controller/runtime" "github.com/crunchydata/postgres-operator/internal/logging" - "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) var suite struct { Client client.Client Config *rest.Config - Scheme *runtime.Scheme Environment *envtest.Environment ServerVersion *version.Version @@ -60,23 +45,28 @@ func TestAPIs(t *testing.T) { } var _ = BeforeSuite(func() { + if os.Getenv("KUBEBUILDER_ASSETS") == "" && !strings.EqualFold(os.Getenv("USE_EXISTING_CLUSTER"), "true") { + Skip("skipping") + } + logging.SetLogSink(logging.Logrus(GinkgoWriter, "test", 1, 1)) log.SetLogger(logging.FromContext(context.Background())) By("bootstrapping test environment") suite.Environment = &envtest.Environment{ - CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "config", "crd", "bases")}, + CRDDirectoryPaths: []string{ + filepath.Join("..", "..", "..", "config", "crd", "bases"), + filepath.Join("..", "..", "..", "hack", "tools", "external-snapshotter", "client", "config", "crd"), + }, } - suite.Scheme = runtime.NewScheme() - Expect(scheme.AddToScheme(suite.Scheme)).To(Succeed()) - Expect(v1beta1.AddToScheme(suite.Scheme)).To(Succeed()) - _, err := suite.Environment.Start() Expect(err).ToNot(HaveOccurred()) + DeferCleanup(suite.Environment.Stop) + suite.Config = suite.Environment.Config - suite.Client, err = client.New(suite.Config, client.Options{Scheme: suite.Scheme}) + suite.Client, err = client.New(suite.Config, client.Options{Scheme: runtime.Scheme}) Expect(err).ToNot(HaveOccurred()) dc, err := discovery.NewDiscoveryClientForConfig(suite.Config) @@ -90,6 +80,5 @@ var _ = BeforeSuite(func() { }) var _ = AfterSuite(func() { - By("tearing down the test environment") - Expect(suite.Environment.Stop()).To(Succeed()) + }) diff --git a/internal/controller/postgrescluster/topology.go b/internal/controller/postgrescluster/topology.go index 6ac89e76ce..58778be907 100644 --- a/internal/controller/postgrescluster/topology.go +++ b/internal/controller/postgrescluster/topology.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster diff --git a/internal/controller/postgrescluster/topology_test.go b/internal/controller/postgrescluster/topology_test.go index 2b4a92c68f..40c8c0dd7f 100644 --- a/internal/controller/postgrescluster/topology_test.go +++ b/internal/controller/postgrescluster/topology_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster @@ -20,6 +9,8 @@ import ( "gotest.tools/v3/assert" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/crunchydata/postgres-operator/internal/testing/cmp" ) func TestDefaultTopologySpreadConstraints(t *testing.T) { @@ -31,7 +22,7 @@ func TestDefaultTopologySpreadConstraints(t *testing.T) { }) // Entire selector, hostname, zone, and ScheduleAnyway. - assert.Assert(t, marshalMatches(constraints, ` + assert.Assert(t, cmp.MarshalMatches(constraints, ` - labelSelector: matchExpressions: - key: k1 diff --git a/internal/controller/postgrescluster/util.go b/internal/controller/postgrescluster/util.go index 3985fae466..25120ab574 100644 --- a/internal/controller/postgrescluster/util.go +++ b/internal/controller/postgrescluster/util.go @@ -1,19 +1,8 @@ -package postgrescluster - -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +package postgrescluster import ( "fmt" @@ -24,7 +13,6 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/util/rand" - "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/naming" @@ -297,22 +285,3 @@ func safeHash32(content func(w io.Writer) error) (string, error) { } return rand.SafeEncodeString(fmt.Sprint(hash.Sum32())), nil } - -// updateReconcileResult creates a new Result based on the new and existing results provided to it. -// This includes setting "Requeue" to true in the Result if set to true in the new Result but not -// in the existing Result, while also updating RequeueAfter if the RequeueAfter value for the new -// result is less the the RequeueAfter value for the existing Result. -func updateReconcileResult(currResult, newResult reconcile.Result) reconcile.Result { - - if newResult.Requeue { - currResult.Requeue = true - } - - if newResult.RequeueAfter != 0 { - if currResult.RequeueAfter == 0 || newResult.RequeueAfter < currResult.RequeueAfter { - currResult.RequeueAfter = newResult.RequeueAfter - } - } - - return currResult -} diff --git a/internal/controller/postgrescluster/util_test.go b/internal/controller/postgrescluster/util_test.go index 2e3c938a9a..51a32f1e85 100644 --- a/internal/controller/postgrescluster/util_test.go +++ b/internal/controller/postgrescluster/util_test.go @@ -1,32 +1,18 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster import ( "errors" - "fmt" "io" "testing" - "time" "gotest.tools/v3/assert" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" - "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/internal/testing/cmp" @@ -53,142 +39,6 @@ func TestSafeHash32(t *testing.T) { assert.Equal(t, same, stuff, "expected deterministic hash") } -func TestUpdateReconcileResult(t *testing.T) { - - testCases := []struct { - currResult reconcile.Result - newResult reconcile.Result - requeueExpected bool - expectedRequeueAfter time.Duration - }{{ - currResult: reconcile.Result{}, - newResult: reconcile.Result{}, - requeueExpected: false, - expectedRequeueAfter: 0, - }, { - currResult: reconcile.Result{Requeue: false}, - newResult: reconcile.Result{Requeue: true}, - requeueExpected: true, - expectedRequeueAfter: 0, - }, { - currResult: reconcile.Result{Requeue: true}, - newResult: reconcile.Result{Requeue: false}, - requeueExpected: true, - expectedRequeueAfter: 0, - }, { - currResult: reconcile.Result{Requeue: true}, - newResult: reconcile.Result{Requeue: true}, - requeueExpected: true, - expectedRequeueAfter: 0, - }, { - currResult: reconcile.Result{Requeue: false}, - newResult: reconcile.Result{Requeue: false}, - requeueExpected: false, - expectedRequeueAfter: 0, - }, { - currResult: reconcile.Result{}, - newResult: reconcile.Result{RequeueAfter: 5 * time.Second}, - requeueExpected: false, - expectedRequeueAfter: 5 * time.Second, - }, { - currResult: reconcile.Result{RequeueAfter: 5 * time.Second}, - newResult: reconcile.Result{}, - requeueExpected: false, - expectedRequeueAfter: 5 * time.Second, - }, { - currResult: reconcile.Result{RequeueAfter: 1 * time.Second}, - newResult: reconcile.Result{RequeueAfter: 5 * time.Second}, - requeueExpected: false, - expectedRequeueAfter: 1 * time.Second, - }, { - currResult: reconcile.Result{RequeueAfter: 5 * time.Second}, - newResult: reconcile.Result{RequeueAfter: 1 * time.Second}, - requeueExpected: false, - expectedRequeueAfter: 1 * time.Second, - }, { - currResult: reconcile.Result{RequeueAfter: 5 * time.Second}, - newResult: reconcile.Result{RequeueAfter: 5 * time.Second}, - requeueExpected: false, - expectedRequeueAfter: 5 * time.Second, - }, { - currResult: reconcile.Result{ - Requeue: true, RequeueAfter: 5 * time.Second, - }, - newResult: reconcile.Result{ - Requeue: true, RequeueAfter: 1 * time.Second, - }, - requeueExpected: true, - expectedRequeueAfter: 1 * time.Second, - }, { - currResult: reconcile.Result{ - Requeue: true, RequeueAfter: 1 * time.Second, - }, - newResult: reconcile.Result{ - Requeue: true, RequeueAfter: 5 * time.Second, - }, - requeueExpected: true, - expectedRequeueAfter: 1 * time.Second, - }, { - currResult: reconcile.Result{ - Requeue: false, RequeueAfter: 1 * time.Second, - }, - newResult: reconcile.Result{ - Requeue: true, RequeueAfter: 5 * time.Second, - }, - requeueExpected: true, - expectedRequeueAfter: 1 * time.Second, - }, { - currResult: reconcile.Result{ - Requeue: true, RequeueAfter: 1 * time.Second, - }, - newResult: reconcile.Result{ - Requeue: false, RequeueAfter: 5 * time.Second, - }, - requeueExpected: true, - expectedRequeueAfter: 1 * time.Second, - }, { - currResult: reconcile.Result{ - Requeue: false, RequeueAfter: 5 * time.Second, - }, - newResult: reconcile.Result{ - Requeue: false, RequeueAfter: 1 * time.Second, - }, - requeueExpected: false, - expectedRequeueAfter: 1 * time.Second, - }, { - currResult: reconcile.Result{ - Requeue: false, RequeueAfter: 1 * time.Second, - }, - newResult: reconcile.Result{ - Requeue: false, RequeueAfter: 5 * time.Second, - }, - requeueExpected: false, - expectedRequeueAfter: 1 * time.Second, - }, { - currResult: reconcile.Result{}, - newResult: reconcile.Result{ - Requeue: true, RequeueAfter: 5 * time.Second, - }, - requeueExpected: true, - expectedRequeueAfter: 5 * time.Second, - }, { - currResult: reconcile.Result{ - Requeue: true, RequeueAfter: 5 * time.Second, - }, - newResult: reconcile.Result{}, - requeueExpected: true, - expectedRequeueAfter: 5 * time.Second, - }} - - for _, tc := range testCases { - t.Run(fmt.Sprintf("curr: %v, new: %v", tc.currResult, tc.newResult), func(t *testing.T) { - result := updateReconcileResult(tc.currResult, tc.newResult) - assert.Assert(t, result.Requeue == tc.requeueExpected) - assert.Assert(t, result.RequeueAfter == tc.expectedRequeueAfter) - }) - } -} - func TestAddDevSHM(t *testing.T) { testCases := []struct { diff --git a/internal/controller/postgrescluster/volumes.go b/internal/controller/postgrescluster/volumes.go index 2cfdd575fe..e40710d4ff 100644 --- a/internal/controller/postgrescluster/volumes.go +++ b/internal/controller/postgrescluster/volumes.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster @@ -130,6 +119,15 @@ func (r *Reconciler) observePersistentVolumeClaims( resizing.LastTransitionTime = minNotZero( resizing.LastTransitionTime, condition.LastTransitionTime) } + + case + // The "ModifyingVolume" and "ModifyVolumeError" conditions occur + // when the attribute class of a PVC is changing. These attributes + // do not affect the size of a volume, so there's nothing to do. + // See the "VolumeAttributesClass" feature gate. + // - https://git.k8s.io/enhancements/keps/sig-storage/3751-volume-attributes-class + corev1.PersistentVolumeClaimVolumeModifyingVolume, + corev1.PersistentVolumeClaimVolumeModifyVolumeError: } } } @@ -501,10 +499,9 @@ func (r *Reconciler) reconcileMovePGDataDir(ctx context.Context, }, } // set the priority class name, if it exists - if len(cluster.Spec.InstanceSets) > 0 && - cluster.Spec.InstanceSets[0].PriorityClassName != nil { + if len(cluster.Spec.InstanceSets) > 0 { jobSpec.Template.Spec.PriorityClassName = - *cluster.Spec.InstanceSets[0].PriorityClassName + initialize.FromPointer(cluster.Spec.InstanceSets[0].PriorityClassName) } moveDirJob.Spec = *jobSpec @@ -619,10 +616,9 @@ func (r *Reconciler) reconcileMoveWALDir(ctx context.Context, }, } // set the priority class name, if it exists - if len(cluster.Spec.InstanceSets) > 0 && - cluster.Spec.InstanceSets[0].PriorityClassName != nil { + if len(cluster.Spec.InstanceSets) > 0 { jobSpec.Template.Spec.PriorityClassName = - *cluster.Spec.InstanceSets[0].PriorityClassName + initialize.FromPointer(cluster.Spec.InstanceSets[0].PriorityClassName) } moveDirJob.Spec = *jobSpec @@ -742,9 +738,7 @@ func (r *Reconciler) reconcileMoveRepoDir(ctx context.Context, } // set the priority class name, if it exists if repoHost := cluster.Spec.Backups.PGBackRest.RepoHost; repoHost != nil { - if repoHost.PriorityClassName != nil { - jobSpec.Template.Spec.PriorityClassName = *repoHost.PriorityClassName - } + jobSpec.Template.Spec.PriorityClassName = initialize.FromPointer(repoHost.PriorityClassName) } moveDirJob.Spec = *jobSpec diff --git a/internal/controller/postgrescluster/volumes_test.go b/internal/controller/postgrescluster/volumes_test.go index 05f90044ca..96eef5f916 100644 --- a/internal/controller/postgrescluster/volumes_test.go +++ b/internal/controller/postgrescluster/volumes_test.go @@ -1,20 +1,6 @@ -//go:build envtest -// +build envtest - -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster @@ -44,10 +30,7 @@ import ( ) func TestHandlePersistentVolumeClaimError(t *testing.T) { - scheme, err := runtime.CreatePostgresOperatorScheme() - assert.NilError(t, err) - - recorder := events.NewRecorder(t, scheme) + recorder := events.NewRecorder(t, runtime.Scheme) reconciler := &Reconciler{ Recorder: recorder, } @@ -287,7 +270,7 @@ func TestGetPVCNameMethods(t *testing.T) { AccessModes: []corev1.PersistentVolumeAccessMode{ "ReadWriteMany", }, - Resources: corev1.ResourceRequirements{ + Resources: corev1.VolumeResourceRequirements{ Requests: corev1.ResourceList{ corev1.ResourceStorage: resource.MustParse("1Gi"), }, @@ -412,7 +395,7 @@ func TestReconcileConfigureExistingPVCs(t *testing.T) { DataVolumeClaimSpec: corev1.PersistentVolumeClaimSpec{ AccessModes: []corev1.PersistentVolumeAccessMode{ corev1.ReadWriteMany}, - Resources: corev1.ResourceRequirements{ + Resources: corev1.VolumeResourceRequirements{ Requests: corev1.ResourceList{ corev1.ResourceStorage: resource.MustParse("1Gi"), }, @@ -428,7 +411,7 @@ func TestReconcileConfigureExistingPVCs(t *testing.T) { VolumeClaimSpec: corev1.PersistentVolumeClaimSpec{ AccessModes: []corev1.PersistentVolumeAccessMode{ corev1.ReadWriteMany}, - Resources: corev1.ResourceRequirements{ + Resources: corev1.VolumeResourceRequirements{ Requests: map[corev1.ResourceName]resource. Quantity{ corev1.ResourceStorage: resource. @@ -482,7 +465,7 @@ func TestReconcileConfigureExistingPVCs(t *testing.T) { assert.Assert(t, len(clusterVolumes) == 1) // observe again, but allow time for the change to be observed - err = wait.Poll(time.Second/2, Scale(time.Second*15), func() (bool, error) { + err = wait.PollUntilContextTimeout(ctx, time.Second/2, Scale(time.Second*15), false, func(ctx context.Context) (bool, error) { clusterVolumes, err = r.observePersistentVolumeClaims(ctx, cluster) return len(clusterVolumes) == 1, err }) @@ -548,7 +531,7 @@ func TestReconcileConfigureExistingPVCs(t *testing.T) { assert.Assert(t, len(clusterVolumes) == 2) // observe again, but allow time for the change to be observed - err = wait.Poll(time.Second/2, Scale(time.Second*15), func() (bool, error) { + err = wait.PollUntilContextTimeout(ctx, time.Second/2, Scale(time.Second*15), false, func(ctx context.Context) (bool, error) { clusterVolumes, err = r.observePersistentVolumeClaims(ctx, cluster) return len(clusterVolumes) == 2, err }) @@ -616,7 +599,7 @@ func TestReconcileConfigureExistingPVCs(t *testing.T) { assert.Assert(t, len(clusterVolumes) == 3) // observe again, but allow time for the change to be observed - err = wait.Poll(time.Second/2, Scale(time.Second*15), func() (bool, error) { + err = wait.PollUntilContextTimeout(ctx, time.Second/2, Scale(time.Second*15), false, func(ctx context.Context) (bool, error) { clusterVolumes, err = r.observePersistentVolumeClaims(ctx, cluster) return len(clusterVolumes) == 3, err }) @@ -695,7 +678,7 @@ func TestReconcileMoveDirectories(t *testing.T) { DataVolumeClaimSpec: corev1.PersistentVolumeClaimSpec{ AccessModes: []corev1.PersistentVolumeAccessMode{ corev1.ReadWriteMany}, - Resources: corev1.ResourceRequirements{ + Resources: corev1.VolumeResourceRequirements{ Requests: corev1.ResourceList{ corev1.ResourceStorage: resource.MustParse("1Gi"), }, @@ -719,7 +702,7 @@ func TestReconcileMoveDirectories(t *testing.T) { VolumeClaimSpec: corev1.PersistentVolumeClaimSpec{ AccessModes: []corev1.PersistentVolumeAccessMode{ corev1.ReadWriteMany}, - Resources: corev1.ResourceRequirements{ + Resources: corev1.VolumeResourceRequirements{ Requests: map[corev1.ResourceName]resource. Quantity{ corev1.ResourceStorage: resource. @@ -782,6 +765,8 @@ containers: privileged: false readOnlyRootFilesystem: true runAsNonRoot: true + seccompProfile: + type: RuntimeDefault terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: @@ -804,7 +789,7 @@ volumes: claimName: testpgdata ` - assert.Assert(t, marshalMatches(moveJobs.Items[i].Spec.Template.Spec, compare+"\n")) + assert.Assert(t, cmp.MarshalMatches(moveJobs.Items[i].Spec.Template.Spec, compare+"\n")) } } @@ -840,6 +825,8 @@ containers: privileged: false readOnlyRootFilesystem: true runAsNonRoot: true + seccompProfile: + type: RuntimeDefault terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: @@ -862,7 +849,7 @@ volumes: claimName: testwal ` - assert.Assert(t, marshalMatches(moveJobs.Items[i].Spec.Template.Spec, compare+"\n")) + assert.Assert(t, cmp.MarshalMatches(moveJobs.Items[i].Spec.Template.Spec, compare+"\n")) } } @@ -900,6 +887,8 @@ containers: privileged: false readOnlyRootFilesystem: true runAsNonRoot: true + seccompProfile: + type: RuntimeDefault terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: @@ -921,7 +910,7 @@ volumes: persistentVolumeClaim: claimName: testrepo ` - assert.Assert(t, marshalMatches(moveJobs.Items[i].Spec.Template.Spec, compare+"\n")) + assert.Assert(t, cmp.MarshalMatches(moveJobs.Items[i].Spec.Template.Spec, compare+"\n")) } } diff --git a/internal/controller/postgrescluster/watches.go b/internal/controller/postgrescluster/watches.go index 698ecd8181..0b5ba5fa87 100644 --- a/internal/controller/postgrescluster/watches.go +++ b/internal/controller/postgrescluster/watches.go @@ -1,21 +1,12 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster import ( + "context" + "k8s.io/client-go/util/workqueue" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" @@ -29,7 +20,7 @@ import ( // watchPods returns a handler.EventHandler for Pods. func (*Reconciler) watchPods() handler.Funcs { return handler.Funcs{ - UpdateFunc: func(e event.UpdateEvent, q workqueue.RateLimitingInterface) { + UpdateFunc: func(ctx context.Context, e event.UpdateEvent, q workqueue.RateLimitingInterface) { labels := e.ObjectNew.GetLabels() cluster := labels[naming.LabelCluster] @@ -57,6 +48,29 @@ func (*Reconciler) watchPods() handler.Funcs { }}) return } + + // Queue an event to start applying changes if the PostgreSQL instance + // now has the "master" role. + if len(cluster) != 0 && + !patroni.PodIsPrimary(e.ObjectOld) && + patroni.PodIsPrimary(e.ObjectNew) { + q.Add(reconcile.Request{NamespacedName: client.ObjectKey{ + Namespace: e.ObjectNew.GetNamespace(), + Name: cluster, + }}) + return + } + + oldAnnotations := e.ObjectOld.GetAnnotations() + newAnnotations := e.ObjectNew.GetAnnotations() + // If the suggested-pgdata-pvc-size annotation is added or changes, reconcile. + if len(cluster) != 0 && oldAnnotations["suggested-pgdata-pvc-size"] != newAnnotations["suggested-pgdata-pvc-size"] { + q.Add(reconcile.Request{NamespacedName: client.ObjectKey{ + Namespace: e.ObjectNew.GetNamespace(), + Name: cluster, + }}) + return + } }, } } diff --git a/internal/controller/postgrescluster/watches_test.go b/internal/controller/postgrescluster/watches_test.go index ccb8d9fadb..fdea498862 100644 --- a/internal/controller/postgrescluster/watches_test.go +++ b/internal/controller/postgrescluster/watches_test.go @@ -1,21 +1,11 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster import ( + "context" "testing" "gotest.tools/v3/assert" @@ -28,21 +18,22 @@ import ( ) func TestWatchPodsUpdate(t *testing.T) { - queue := controllertest.Queue{Interface: workqueue.New()} + ctx := context.Background() + queue := &controllertest.Queue{Interface: workqueue.New()} reconciler := &Reconciler{} update := reconciler.watchPods().UpdateFunc assert.Assert(t, update != nil) // No metadata; no reconcile. - update(event.UpdateEvent{ + update(ctx, event.UpdateEvent{ ObjectOld: &corev1.Pod{}, ObjectNew: &corev1.Pod{}, }, queue) assert.Equal(t, queue.Len(), 0) // Cluster label, but nothing else; no reconcile. - update(event.UpdateEvent{ + update(ctx, event.UpdateEvent{ ObjectOld: &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ @@ -61,7 +52,7 @@ func TestWatchPodsUpdate(t *testing.T) { assert.Equal(t, queue.Len(), 0) // Cluster standby leader changed; one reconcile by label. - update(event.UpdateEvent{ + update(ctx, event.UpdateEvent{ ObjectOld: &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ @@ -108,7 +99,7 @@ func TestWatchPodsUpdate(t *testing.T) { } // Newly pending; one reconcile by label. - update(event.UpdateEvent{ + update(ctx, event.UpdateEvent{ ObjectOld: base.DeepCopy(), ObjectNew: pending.DeepCopy(), }, queue) @@ -119,7 +110,7 @@ func TestWatchPodsUpdate(t *testing.T) { queue.Done(item) // Still pending; one reconcile by label. - update(event.UpdateEvent{ + update(ctx, event.UpdateEvent{ ObjectOld: pending.DeepCopy(), ObjectNew: pending.DeepCopy(), }, queue) @@ -130,7 +121,7 @@ func TestWatchPodsUpdate(t *testing.T) { queue.Done(item) // No longer pending; one reconcile by label. - update(event.UpdateEvent{ + update(ctx, event.UpdateEvent{ ObjectOld: pending.DeepCopy(), ObjectNew: base.DeepCopy(), }, queue) @@ -140,4 +131,54 @@ func TestWatchPodsUpdate(t *testing.T) { assert.Equal(t, item, expected) queue.Done(item) }) + + // Pod annotation with arbitrary key; no reconcile. + update(ctx, event.UpdateEvent{ + ObjectOld: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "clortho": "vince", + }, + Labels: map[string]string{ + "postgres-operator.crunchydata.com/cluster": "starfish", + }, + }, + }, + ObjectNew: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "clortho": "vin", + }, + Labels: map[string]string{ + "postgres-operator.crunchydata.com/cluster": "starfish", + }, + }, + }, + }, queue) + assert.Equal(t, queue.Len(), 0) + + // Pod annotation with suggested-pgdata-pvc-size; reconcile. + update(ctx, event.UpdateEvent{ + ObjectOld: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "suggested-pgdata-pvc-size": "5000Mi", + }, + Labels: map[string]string{ + "postgres-operator.crunchydata.com/cluster": "starfish", + }, + }, + }, + ObjectNew: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "suggested-pgdata-pvc-size": "8000Mi", + }, + Labels: map[string]string{ + "postgres-operator.crunchydata.com/cluster": "starfish", + }, + }, + }, + }, queue) + assert.Equal(t, queue.Len(), 1) } diff --git a/internal/controller/runtime/client.go b/internal/controller/runtime/client.go index 7f416a96fd..4cc05c9835 100644 --- a/internal/controller/runtime/client.go +++ b/internal/controller/runtime/client.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package runtime @@ -23,10 +12,7 @@ import ( // Types that implement single methods of the [client.Reader] interface. type ( - // NOTE: The signature of [client.Client.Get] changes in [sigs.k8s.io/controller-runtime@v0.13.0]. - // - https://github.com/kubernetes-sigs/controller-runtime/releases/tag/v0.13.0 - - ClientGet func(context.Context, client.ObjectKey, client.Object) error + ClientGet func(context.Context, client.ObjectKey, client.Object, ...client.GetOption) error ClientList func(context.Context, client.ObjectList, ...client.ListOption) error ) @@ -73,8 +59,8 @@ func (fn ClientDeleteAll) DeleteAllOf(ctx context.Context, obj client.Object, op return fn(ctx, obj, opts...) } -func (fn ClientGet) Get(ctx context.Context, key client.ObjectKey, obj client.Object) error { - return fn(ctx, key, obj) +func (fn ClientGet) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { + return fn(ctx, key, obj, opts...) } func (fn ClientList) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { diff --git a/internal/controller/postgrescluster/pod_client.go b/internal/controller/runtime/pod_client.go similarity index 61% rename from internal/controller/postgrescluster/pod_client.go rename to internal/controller/runtime/pod_client.go index 9e82df9b76..e842601aa7 100644 --- a/internal/controller/postgrescluster/pod_client.go +++ b/internal/controller/runtime/pod_client.go @@ -1,21 +1,11 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package postgrescluster +package runtime import ( + "context" "io" corev1 "k8s.io/api/core/v1" @@ -29,23 +19,27 @@ import ( // podExecutor runs command on container in pod in namespace. Non-nil streams // (stdin, stdout, and stderr) are attached the to the remote process. type podExecutor func( - namespace, pod, container string, + ctx context.Context, namespace, pod, container string, stdin io.Reader, stdout, stderr io.Writer, command ...string, ) error func newPodClient(config *rest.Config) (rest.Interface, error) { codecs := serializer.NewCodecFactory(scheme.Scheme) gvk, _ := apiutil.GVKForObject(&corev1.Pod{}, scheme.Scheme) - return apiutil.RESTClientForGVK(gvk, false, config, codecs) + httpClient, err := rest.HTTPClientFor(config) + if err != nil { + return nil, err + } + return apiutil.RESTClientForGVK(gvk, false, config, codecs, httpClient) } // +kubebuilder:rbac:groups="",resources="pods/exec",verbs={create} -func newPodExecutor(config *rest.Config) (podExecutor, error) { +func NewPodExecutor(config *rest.Config) (podExecutor, error) { client, err := newPodClient(config) return func( - namespace, pod, container string, + ctx context.Context, namespace, pod, container string, stdin io.Reader, stdout, stderr io.Writer, command ...string, ) error { request := client.Post(). @@ -62,7 +56,7 @@ func newPodExecutor(config *rest.Config) (podExecutor, error) { exec, err := remotecommand.NewSPDYExecutor(config, "POST", request.URL()) if err == nil { - err = exec.Stream(remotecommand.StreamOptions{ + err = exec.StreamWithContext(ctx, remotecommand.StreamOptions{ Stdin: stdin, Stdout: stdout, Stderr: stderr, diff --git a/internal/controller/runtime/reconcile.go b/internal/controller/runtime/reconcile.go new file mode 100644 index 0000000000..a2196d1626 --- /dev/null +++ b/internal/controller/runtime/reconcile.go @@ -0,0 +1,69 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package runtime + +import ( + "time" + + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +// ErrorWithBackoff returns a Result and error that indicate a non-nil err +// should be logged and measured and its [reconcile.Request] should be retried +// later. When err is nil, nothing is logged and the Request is not retried. +// When err unwraps to [reconcile.TerminalError], the Request is not retried. +func ErrorWithBackoff(err error) (reconcile.Result, error) { + // Result should be zero to avoid warning messages. + return reconcile.Result{}, err + + // When error is not nil and not a TerminalError, the controller-runtime Controller + // puts [reconcile.Request] back into the workqueue using AddRateLimited. + // - https://github.com/kubernetes-sigs/controller-runtime/blob/v0.18.4/pkg/internal/controller/controller.go#L317 + // - https://pkg.go.dev/k8s.io/client-go/util/workqueue#RateLimitingInterface +} + +// ErrorWithoutBackoff returns a Result and error that indicate a non-nil err +// should be logged and measured without retrying its [reconcile.Request]. +// When err is nil, nothing is logged and the Request is not retried. +func ErrorWithoutBackoff(err error) (reconcile.Result, error) { + if err != nil { + err = reconcile.TerminalError(err) + } + + // Result should be zero to avoid warning messages. + return reconcile.Result{}, err + + // When error is a TerminalError, the controller-runtime Controller increments + // a counter rather than interact with the workqueue. + // - https://github.com/kubernetes-sigs/controller-runtime/blob/v0.18.4/pkg/internal/controller/controller.go#L314 +} + +// RequeueWithBackoff returns a Result that indicates a [reconcile.Request] +// should be retried later. +func RequeueWithBackoff() reconcile.Result { + return reconcile.Result{Requeue: true} + + // When [reconcile.Result].Requeue is true, the controller-runtime Controller + // puts [reconcile.Request] back into the workqueue using AddRateLimited. + // - https://github.com/kubernetes-sigs/controller-runtime/blob/v0.18.4/pkg/internal/controller/controller.go#L334 + // - https://pkg.go.dev/k8s.io/client-go/util/workqueue#RateLimitingInterface +} + +// RequeueWithoutBackoff returns a Result that indicates a [reconcile.Request] +// should be retried on or before delay. +func RequeueWithoutBackoff(delay time.Duration) reconcile.Result { + // RequeueAfter must be positive to not backoff. + if delay <= 0 { + delay = time.Nanosecond + } + + // RequeueAfter implies Requeue, but set both to remove any ambiguity. + return reconcile.Result{Requeue: true, RequeueAfter: delay} + + // When [reconcile.Result].RequeueAfter is positive, the controller-runtime Controller + // puts [reconcile.Request] back into the workqueue using AddAfter. + // - https://github.com/kubernetes-sigs/controller-runtime/blob/v0.18.4/pkg/internal/controller/controller.go#L325 + // - https://pkg.go.dev/k8s.io/client-go/util/workqueue#DelayingInterface +} diff --git a/internal/controller/runtime/reconcile_test.go b/internal/controller/runtime/reconcile_test.go new file mode 100644 index 0000000000..925b3cf47d --- /dev/null +++ b/internal/controller/runtime/reconcile_test.go @@ -0,0 +1,57 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package runtime + +import ( + "errors" + "testing" + "time" + + "gotest.tools/v3/assert" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +func TestErrorWithBackoff(t *testing.T) { + result, err := ErrorWithBackoff(nil) + assert.Assert(t, result.IsZero()) + assert.NilError(t, err) + + expected := errors.New("doot") + result, err = ErrorWithBackoff(expected) + assert.Assert(t, result.IsZero()) + assert.Equal(t, err, expected) +} + +func TestErrorWithoutBackoff(t *testing.T) { + result, err := ErrorWithoutBackoff(nil) + assert.Assert(t, result.IsZero()) + assert.NilError(t, err) + + expected := errors.New("doot") + result, err = ErrorWithoutBackoff(expected) + assert.Assert(t, result.IsZero()) + assert.Assert(t, errors.Is(err, reconcile.TerminalError(nil))) + assert.Equal(t, errors.Unwrap(err), expected) +} + +func TestRequeueWithBackoff(t *testing.T) { + result := RequeueWithBackoff() + assert.Assert(t, result.Requeue) + assert.Assert(t, result.RequeueAfter == 0) +} + +func TestRequeueWithoutBackoff(t *testing.T) { + result := RequeueWithoutBackoff(0) + assert.Assert(t, result.Requeue) + assert.Assert(t, result.RequeueAfter > 0) + + result = RequeueWithoutBackoff(-1) + assert.Assert(t, result.Requeue) + assert.Assert(t, result.RequeueAfter > 0) + + result = RequeueWithoutBackoff(time.Minute) + assert.Assert(t, result.Requeue) + assert.Equal(t, result.RequeueAfter, time.Minute) +} diff --git a/internal/controller/runtime/runtime.go b/internal/controller/runtime/runtime.go index 60f3ab33fa..34bfeabf61 100644 --- a/internal/controller/runtime/runtime.go +++ b/internal/controller/runtime/runtime.go @@ -1,87 +1,76 @@ -package runtime - -/* -Copyright 2021 - 2023 Crunchy Data Solutions, Inc. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +package runtime import ( - "time" + "context" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client/config" + "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/manager/signals" + "github.com/crunchydata/postgres-operator/internal/logging" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" + + volumesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" ) -// default refresh interval in minutes -var refreshInterval = 60 * time.Minute +type ( + CacheConfig = cache.Config + Manager = manager.Manager + Options = manager.Options +) -// CreateRuntimeManager creates a new controller runtime manager for the PostgreSQL Operator. The -// manager returned is configured specifically for the PostgreSQL Operator, and includes any -// controllers that will be responsible for managing PostgreSQL clusters using the -// 'postgrescluster' custom resource. Additionally, the manager will only watch for resources in -// the namespace specified, with an empty string resulting in the manager watching all namespaces. -func CreateRuntimeManager(namespace string, config *rest.Config, - disableMetrics bool) (manager.Manager, error) { +// Scheme associates standard Kubernetes API objects and PGO API objects with Go structs. +var Scheme *runtime.Scheme = runtime.NewScheme() - pgoScheme, err := CreatePostgresOperatorScheme() - if err != nil { - return nil, err +func init() { + if err := scheme.AddToScheme(Scheme); err != nil { + panic(err) } - - options := manager.Options{ - Namespace: namespace, // if empty then watching all namespaces - SyncPeriod: &refreshInterval, - Scheme: pgoScheme, + if err := v1beta1.AddToScheme(Scheme); err != nil { + panic(err) } - if disableMetrics { - options.HealthProbeBindAddress = "0" - options.MetricsBindAddress = "0" + if err := volumesnapshotv1.AddToScheme(Scheme); err != nil { + panic(err) } - - // create controller runtime manager - mgr, err := manager.New(config, options) - if err != nil { - return nil, err - } - - return mgr, nil } -// GetConfig creates a *rest.Config for talking to a Kubernetes API server. +// GetConfig returns a Kubernetes client configuration from KUBECONFIG or the +// service account Kubernetes gives to pods. func GetConfig() (*rest.Config, error) { return config.GetConfig() } -// CreatePostgresOperatorScheme creates a scheme containing the resource types required by the -// PostgreSQL Operator. This includes any custom resource types specific to the PostgreSQL -// Operator, as well as any standard Kubernetes resource types. -func CreatePostgresOperatorScheme() (*runtime.Scheme, error) { +// NewManager returns a Manager that interacts with the Kubernetes API of config. +// When config is nil, it reads from KUBECONFIG or the local service account. +// When options.Scheme is nil, it uses the Scheme from this package. +func NewManager(config *rest.Config, options manager.Options) (manager.Manager, error) { + var m manager.Manager + var err error - // create a new scheme specifically for this manager - pgoScheme := runtime.NewScheme() + if config == nil { + config, err = GetConfig() + } - // add standard resource types to the scheme - if err := scheme.AddToScheme(pgoScheme); err != nil { - return nil, err + if options.Scheme == nil { + options.Scheme = Scheme } - // add custom resource types to the default scheme - if err := v1beta1.AddToScheme(pgoScheme); err != nil { - return nil, err + if err == nil { + m, err = manager.New(config, options) } - return pgoScheme, nil + return m, err } + +// SetLogger assigns the default Logger used by [sigs.k8s.io/controller-runtime]. +func SetLogger(logger logging.Logger) { log.SetLogger(logger) } + +// SignalHandler returns a Context that is canceled on SIGINT or SIGTERM. +func SignalHandler() context.Context { return signals.SetupSignalHandler() } diff --git a/internal/controller/runtime/ticker.go b/internal/controller/runtime/ticker.go index e4415a79f1..830179eafc 100644 --- a/internal/controller/runtime/ticker.go +++ b/internal/controller/runtime/ticker.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package runtime @@ -22,24 +11,26 @@ import ( "k8s.io/client-go/util/workqueue" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/source" ) type ticker struct { time.Duration event.GenericEvent + Handler handler.EventHandler Immediate bool } // NewTicker returns a Source that emits e every d. -func NewTicker(d time.Duration, e event.GenericEvent) source.Source { - return &ticker{Duration: d, GenericEvent: e} +func NewTicker(d time.Duration, e event.GenericEvent, + h handler.EventHandler) source.Source { + return &ticker{Duration: d, GenericEvent: e, Handler: h} } // NewTickerImmediate returns a Source that emits e at start and every d. -func NewTickerImmediate(d time.Duration, e event.GenericEvent) source.Source { - return &ticker{Duration: d, GenericEvent: e, Immediate: true} +func NewTickerImmediate(d time.Duration, e event.GenericEvent, + h handler.EventHandler) source.Source { + return &ticker{Duration: d, GenericEvent: e, Handler: h, Immediate: true} } func (t ticker) String() string { return "every " + t.Duration.String() } @@ -47,20 +38,14 @@ func (t ticker) String() string { return "every " + t.Duration.String() } // Start is called by controller-runtime Controller and returns quickly. // It cleans up when ctx is cancelled. func (t ticker) Start( - ctx context.Context, h handler.EventHandler, - q workqueue.RateLimitingInterface, p ...predicate.Predicate, + ctx context.Context, q workqueue.RateLimitingInterface, ) error { ticker := time.NewTicker(t.Duration) // Pass t.GenericEvent to h when it is not filtered out by p. // - https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/source/internal#EventHandler emit := func() { - for _, pp := range p { - if !pp.Generic(t.GenericEvent) { - return - } - } - h.Generic(t.GenericEvent, q) + t.Handler.Generic(ctx, t.GenericEvent, q) } if t.Immediate { diff --git a/internal/controller/runtime/ticker_test.go b/internal/controller/runtime/ticker_test.go index 163e5dacdd..49cecd79d7 100644 --- a/internal/controller/runtime/ticker_test.go +++ b/internal/controller/runtime/ticker_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package runtime @@ -25,7 +14,6 @@ import ( "k8s.io/client-go/util/workqueue" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/predicate" ) func TestTickerString(t *testing.T) { @@ -41,21 +29,21 @@ func TestTicker(t *testing.T) { expected := event.GenericEvent{Object: new(corev1.ConfigMap)} tq := workqueue.NewRateLimitingQueue(workqueue.DefaultItemBasedRateLimiter()) - th := handler.Funcs{GenericFunc: func(e event.GenericEvent, q workqueue.RateLimitingInterface) { + th := handler.Funcs{GenericFunc: func(ctx context.Context, e event.GenericEvent, q workqueue.RateLimitingInterface) { called = append(called, e) assert.Equal(t, q, tq, "should be called with the queue passed in Start") }} - t.Run("WithoutPredicates", func(t *testing.T) { + t.Run("NotImmediate", func(t *testing.T) { called = nil - ticker := NewTicker(100*time.Millisecond, expected) + ticker := NewTicker(100*time.Millisecond, expected, th) ctx, cancel := context.WithTimeout(context.Background(), 250*time.Millisecond) t.Cleanup(cancel) // Start the ticker and wait for the deadline to pass. - assert.NilError(t, ticker.Start(ctx, th, tq)) + assert.NilError(t, ticker.Start(ctx, tq)) <-ctx.Done() assert.Equal(t, len(called), 2) @@ -63,39 +51,18 @@ func TestTicker(t *testing.T) { assert.Equal(t, called[1], expected, "expected at 200ms") }) - t.Run("WithPredicates", func(t *testing.T) { - called = nil - - // Predicates that exclude events after a fixed number have passed. - pLength := predicate.Funcs{GenericFunc: func(event.GenericEvent) bool { return len(called) < 3 }} - pTrue := predicate.Funcs{GenericFunc: func(event.GenericEvent) bool { return true }} - - ticker := NewTicker(50*time.Millisecond, expected) - ctx, cancel := context.WithTimeout(context.Background(), 250*time.Millisecond) - t.Cleanup(cancel) - - // Start the ticker and wait for the deadline to pass. - assert.NilError(t, ticker.Start(ctx, th, tq, pTrue, pLength)) - <-ctx.Done() - - assert.Equal(t, len(called), 3) - assert.Equal(t, called[0], expected) - assert.Equal(t, called[1], expected) - assert.Equal(t, called[2], expected) - }) - t.Run("Immediate", func(t *testing.T) { called = nil - ticker := NewTickerImmediate(100*time.Millisecond, expected) + ticker := NewTickerImmediate(100*time.Millisecond, expected, th) ctx, cancel := context.WithTimeout(context.Background(), 250*time.Millisecond) t.Cleanup(cancel) // Start the ticker and wait for the deadline to pass. - assert.NilError(t, ticker.Start(ctx, th, tq)) + assert.NilError(t, ticker.Start(ctx, tq)) <-ctx.Done() - assert.Equal(t, len(called), 3) + assert.Assert(t, len(called) > 2) assert.Equal(t, called[0], expected, "expected at 0ms") assert.Equal(t, called[1], expected, "expected at 100ms") assert.Equal(t, called[2], expected, "expected at 200ms") diff --git a/internal/controller/standalone_pgadmin/apply.go b/internal/controller/standalone_pgadmin/apply.go index af29aec74b..0eaa613df8 100644 --- a/internal/controller/standalone_pgadmin/apply.go +++ b/internal/controller/standalone_pgadmin/apply.go @@ -1,16 +1,6 @@ -// Copyright 2023 Crunchy Data Solutions, Inc. +// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package standalone_pgadmin diff --git a/internal/controller/standalone_pgadmin/config.go b/internal/controller/standalone_pgadmin/config.go index 059439cf40..ddd080985b 100644 --- a/internal/controller/standalone_pgadmin/config.go +++ b/internal/controller/standalone_pgadmin/config.go @@ -1,16 +1,6 @@ -// Copyright 2023 Crunchy Data Solutions, Inc. +// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package standalone_pgadmin @@ -19,7 +9,11 @@ const ( // ConfigMap keys used also in mounting volume to pod settingsConfigMapKey = "pgadmin-settings.json" settingsClusterMapKey = "pgadmin-shared-clusters.json" + gunicornConfigKey = "gunicorn-config.json" // Port address used to define pod and service pgAdminPort = 5050 + + // Directory for pgAdmin in container + pgAdminDir = "/usr/local/lib/python3.11/site-packages/pgadmin4" ) diff --git a/internal/controller/standalone_pgadmin/configmap.go b/internal/controller/standalone_pgadmin/configmap.go index 30646b7bf5..d1ec39bf13 100644 --- a/internal/controller/standalone_pgadmin/configmap.go +++ b/internal/controller/standalone_pgadmin/configmap.go @@ -1,16 +1,6 @@ -// Copyright 2023 Crunchy Data Solutions, Inc. +// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package standalone_pgadmin @@ -20,6 +10,7 @@ import ( "encoding/json" "fmt" "sort" + "strconv" corev1 "k8s.io/api/core/v1" @@ -59,13 +50,10 @@ func configmap(pgadmin *v1beta1.PGAdmin, configmap.Annotations = pgadmin.Spec.Metadata.GetAnnotationsOrNil() configmap.Labels = naming.Merge( pgadmin.Spec.Metadata.GetLabelsOrNil(), - map[string]string{ - naming.LabelStandalonePGAdmin: pgadmin.Name, - naming.LabelRole: naming.RolePGAdmin, - }) + naming.StandalonePGAdminLabels(pgadmin.Name)) // TODO(tjmoore4): Populate configuration details. - initialize.StringMap(&configmap.Data) + initialize.Map(&configmap.Data) configSettings, err := generateConfig(pgadmin) if err == nil { configmap.Data[settingsConfigMapKey] = configSettings @@ -76,16 +64,29 @@ func configmap(pgadmin *v1beta1.PGAdmin, configmap.Data[settingsClusterMapKey] = clusterSettings } + gunicornSettings, err := generateGunicornConfig(pgadmin) + if err == nil { + configmap.Data[gunicornConfigKey] = gunicornSettings + } + return configmap, err } // generateConfig generates the config settings for the pgAdmin func generateConfig(pgadmin *v1beta1.PGAdmin) (string, error) { + settings := map[string]any{ + // Bind to all IPv4 addresses by default. "0.0.0.0" here represents INADDR_ANY. + // - https://flask.palletsprojects.com/en/2.2.x/api/#flask.Flask.run + // - https://flask.palletsprojects.com/en/2.3.x/api/#flask.Flask.run + "DEFAULT_SERVER": "0.0.0.0", + } - settings := *pgadmin.Spec.Config.Settings.DeepCopy() - if settings == nil { - settings = make(map[string]interface{}) + // Copy any specified settings over the defaults. + for k, v := range pgadmin.Spec.Config.Settings { + settings[k] = v } + + // Write mandatory settings over any specified ones. // SERVER_MODE must always be enabled when running on a webserver. // - https://github.com/pgadmin-org/pgadmin4/blob/REL-7_7/web/config.py#L110 settings["SERVER_MODE"] = true @@ -173,3 +174,36 @@ func generateClusterConfig( err := encoder.Encode(servers) return buffer.String(), err } + +// generateGunicornConfig generates the config settings for the gunicorn server +// - https://docs.gunicorn.org/en/latest/settings.html +func generateGunicornConfig(pgadmin *v1beta1.PGAdmin) (string, error) { + settings := map[string]any{ + // Bind to all IPv4 addresses and set 25 threads by default. + // - https://docs.gunicorn.org/en/latest/settings.html#bind + // - https://docs.gunicorn.org/en/latest/settings.html#threads + "bind": "0.0.0.0:" + strconv.Itoa(pgAdminPort), + "threads": 25, + } + + // Copy any specified settings over the defaults. + for k, v := range pgadmin.Spec.Config.Gunicorn { + settings[k] = v + } + + // Write mandatory settings over any specified ones. + // - https://docs.gunicorn.org/en/latest/settings.html#workers + settings["workers"] = 1 + + // To avoid spurious reconciles, the following value must not change when + // the spec does not change. [json.Encoder] and [json.Marshal] do this by + // emitting map keys in sorted order. Indent so the value is not rendered + // as one long line by `kubectl`. + buffer := new(bytes.Buffer) + encoder := json.NewEncoder(buffer) + encoder.SetEscapeHTML(false) + encoder.SetIndent("", " ") + err := encoder.Encode(settings) + + return buffer.String(), err +} diff --git a/internal/controller/standalone_pgadmin/configmap_test.go b/internal/controller/standalone_pgadmin/configmap_test.go index ff78fc7458..5a844e520c 100644 --- a/internal/controller/standalone_pgadmin/configmap_test.go +++ b/internal/controller/standalone_pgadmin/configmap_test.go @@ -1,16 +1,6 @@ -// Copyright 2023 Crunchy Data Solutions, Inc. +// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package standalone_pgadmin @@ -27,25 +17,60 @@ import ( func TestGenerateConfig(t *testing.T) { require.ParallelCapacity(t, 0) - expectedString := `{ + t.Run("Default", func(t *testing.T) { + pgadmin := new(v1beta1.PGAdmin) + result, err := generateConfig(pgadmin) + + assert.NilError(t, err) + assert.Equal(t, result, `{ + "DEFAULT_SERVER": "0.0.0.0", + "SERVER_MODE": true, + "UPGRADE_CHECK_ENABLED": false, + "UPGRADE_CHECK_KEY": "", + "UPGRADE_CHECK_URL": "" +}`+"\n") + }) + + t.Run("Mandatory", func(t *testing.T) { + pgadmin := new(v1beta1.PGAdmin) + pgadmin.Spec.Config.Settings = map[string]any{ + "SERVER_MODE": false, + "UPGRADE_CHECK_ENABLED": true, + } + result, err := generateConfig(pgadmin) + + assert.NilError(t, err) + assert.Equal(t, result, `{ + "DEFAULT_SERVER": "0.0.0.0", + "SERVER_MODE": true, + "UPGRADE_CHECK_ENABLED": false, + "UPGRADE_CHECK_KEY": "", + "UPGRADE_CHECK_URL": "" +}`+"\n") + }) + + t.Run("Specified", func(t *testing.T) { + pgadmin := new(v1beta1.PGAdmin) + pgadmin.Spec.Config.Settings = map[string]any{ + "ALLOWED_HOSTS": []any{"225.0.0.0/8", "226.0.0.0/7", "228.0.0.0/6"}, + "DEFAULT_SERVER": "::", + } + result, err := generateConfig(pgadmin) + + assert.NilError(t, err) + assert.Equal(t, result, `{ "ALLOWED_HOSTS": [ "225.0.0.0/8", "226.0.0.0/7", "228.0.0.0/6" ], + "DEFAULT_SERVER": "::", "SERVER_MODE": true, "UPGRADE_CHECK_ENABLED": false, "UPGRADE_CHECK_KEY": "", "UPGRADE_CHECK_URL": "" -} -` - pgadmin := new(v1beta1.PGAdmin) - pgadmin.Spec.Config.Settings = map[string]interface{}{ - "ALLOWED_HOSTS": []interface{}{"225.0.0.0/8", "226.0.0.0/7", "228.0.0.0/6"}, - } - actualString, err := generateConfig(pgadmin) - assert.NilError(t, err) - assert.Equal(t, actualString, expectedString) +}`+"\n") + }) } func TestGenerateClusterConfig(t *testing.T) { @@ -184,3 +209,85 @@ namespace: some-ns }) }) } + +func TestGenerateGunicornConfig(t *testing.T) { + require.ParallelCapacity(t, 0) + + t.Run("Default", func(t *testing.T) { + pgAdmin := &v1beta1.PGAdmin{} + pgAdmin.Name = "test" + pgAdmin.Namespace = "postgres-operator" + + expectedString := `{ + "bind": "0.0.0.0:5050", + "threads": 25, + "workers": 1 +} +` + actualString, err := generateGunicornConfig(pgAdmin) + assert.NilError(t, err) + assert.Equal(t, actualString, expectedString) + }) + + t.Run("Add Settings", func(t *testing.T) { + pgAdmin := &v1beta1.PGAdmin{} + pgAdmin.Name = "test" + pgAdmin.Namespace = "postgres-operator" + pgAdmin.Spec.Config.Gunicorn = map[string]any{ + "keyfile": "/path/to/keyfile", + "certfile": "/path/to/certfile", + } + + expectedString := `{ + "bind": "0.0.0.0:5050", + "certfile": "/path/to/certfile", + "keyfile": "/path/to/keyfile", + "threads": 25, + "workers": 1 +} +` + actualString, err := generateGunicornConfig(pgAdmin) + assert.NilError(t, err) + assert.Equal(t, actualString, expectedString) + }) + + t.Run("Update Defaults", func(t *testing.T) { + pgAdmin := &v1beta1.PGAdmin{} + pgAdmin.Name = "test" + pgAdmin.Namespace = "postgres-operator" + pgAdmin.Spec.Config.Gunicorn = map[string]any{ + "bind": "127.0.0.1:5051", + "threads": 30, + } + + expectedString := `{ + "bind": "127.0.0.1:5051", + "threads": 30, + "workers": 1 +} +` + actualString, err := generateGunicornConfig(pgAdmin) + assert.NilError(t, err) + assert.Equal(t, actualString, expectedString) + }) + + t.Run("Update Mandatory", func(t *testing.T) { + pgAdmin := &v1beta1.PGAdmin{} + pgAdmin.Name = "test" + pgAdmin.Namespace = "postgres-operator" + pgAdmin.Spec.Config.Gunicorn = map[string]any{ + "workers": "100", + } + + expectedString := `{ + "bind": "0.0.0.0:5050", + "threads": 25, + "workers": 1 +} +` + actualString, err := generateGunicornConfig(pgAdmin) + assert.NilError(t, err) + assert.Equal(t, actualString, expectedString) + }) + +} diff --git a/internal/controller/standalone_pgadmin/controller.go b/internal/controller/standalone_pgadmin/controller.go index 3c40a4666e..81d5fc2d40 100644 --- a/internal/controller/standalone_pgadmin/controller.go +++ b/internal/controller/standalone_pgadmin/controller.go @@ -1,34 +1,23 @@ -// Copyright 2023 Crunchy Data Solutions, Inc. +// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package standalone_pgadmin import ( "context" + "io" + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" - "k8s.io/apimachinery/pkg/runtime" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/record" - "k8s.io/client-go/util/workqueue" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/source" + controllerruntime "github.com/crunchydata/postgres-operator/internal/controller/runtime" "github.com/crunchydata/postgres-operator/internal/logging" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -36,51 +25,52 @@ import ( // PGAdminReconciler reconciles a PGAdmin object type PGAdminReconciler struct { client.Client - Owner client.FieldOwner - Recorder record.EventRecorder - Scheme *runtime.Scheme + Owner client.FieldOwner + PodExec func( + ctx context.Context, namespace, pod, container string, + stdin io.Reader, stdout, stderr io.Writer, command ...string, + ) error + Recorder record.EventRecorder + IsOpenShift bool } +//+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="pgadmins",verbs={list,watch} //+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="postgresclusters",verbs={list,watch} +//+kubebuilder:rbac:groups="",resources="persistentvolumeclaims",verbs={list,watch} +//+kubebuilder:rbac:groups="",resources="secrets",verbs={list,watch} +//+kubebuilder:rbac:groups="",resources="configmaps",verbs={list,watch} +//+kubebuilder:rbac:groups="apps",resources="statefulsets",verbs={list,watch} // SetupWithManager sets up the controller with the Manager. // // TODO(tjmoore4): This function is duplicated from a version that takes a PostgresCluster object. func (r *PGAdminReconciler) SetupWithManager(mgr ctrl.Manager) error { + if r.PodExec == nil { + var err error + r.PodExec, err = controllerruntime.NewPodExecutor(mgr.GetConfig()) + if err != nil { + return err + } + } + return ctrl.NewControllerManagedBy(mgr). For(&v1beta1.PGAdmin{}). + Owns(&corev1.ConfigMap{}). + Owns(&corev1.PersistentVolumeClaim{}). + Owns(&corev1.Secret{}). + Owns(&appsv1.StatefulSet{}). + Owns(&corev1.Service{}). Watches( - &source.Kind{Type: v1beta1.NewPostgresCluster()}, + v1beta1.NewPostgresCluster(), r.watchPostgresClusters(), ). + Watches( + &corev1.Secret{}, + r.watchForRelatedSecret(), + ). Complete(r) } -// watchPostgresClusters returns a [handler.EventHandler] for PostgresClusters. -func (r *PGAdminReconciler) watchPostgresClusters() handler.Funcs { - handle := func(cluster client.Object, q workqueue.RateLimitingInterface) { - ctx := context.Background() - for _, pgadmin := range r.findPGAdminsForPostgresCluster(ctx, cluster) { - - q.Add(ctrl.Request{ - NamespacedName: client.ObjectKeyFromObject(pgadmin), - }) - } - } - - return handler.Funcs{ - CreateFunc: func(e event.CreateEvent, q workqueue.RateLimitingInterface) { - handle(e.Object, q) - }, - UpdateFunc: func(e event.UpdateEvent, q workqueue.RateLimitingInterface) { - handle(e.ObjectNew, q) - }, - DeleteFunc: func(e event.DeleteEvent, q workqueue.RateLimitingInterface) { - handle(e.Object, q) - }, - } -} - //+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="pgadmins",verbs={get} //+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="pgadmins/status",verbs={patch} @@ -113,7 +103,7 @@ func (r *PGAdminReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct } }() - log.Info("Reconciling pgAdmin") + log.V(1).Info("Reconciling pgAdmin") // Set defaults if unset pgAdmin.Default() @@ -122,10 +112,9 @@ func (r *PGAdminReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct configmap *corev1.ConfigMap dataVolume *corev1.PersistentVolumeClaim clusters map[string]*v1beta1.PostgresClusterList + _ *corev1.Service ) - _, err = r.reconcilePGAdminSecret(ctx, pgAdmin) - if err == nil { clusters, err = r.getClustersForPGAdmin(ctx, pgAdmin) } @@ -135,15 +124,21 @@ func (r *PGAdminReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct if err == nil { dataVolume, err = r.reconcilePGAdminDataVolume(ctx, pgAdmin) } + if err == nil { + err = r.reconcilePGAdminService(ctx, pgAdmin) + } if err == nil { err = r.reconcilePGAdminStatefulSet(ctx, pgAdmin, configmap, dataVolume) } + if err == nil { + err = r.reconcilePGAdminUsers(ctx, pgAdmin) + } if err == nil { // at this point everything reconciled successfully, and we can update the // observedGeneration pgAdmin.Status.ObservedGeneration = pgAdmin.GetGeneration() - log.V(1).Info("reconciled cluster") + log.V(1).Info("Reconciled pgAdmin") } return ctrl.Result{}, err @@ -166,3 +161,18 @@ func (r *PGAdminReconciler) setControllerReference( ) error { return controllerutil.SetControllerReference(owner, controlled, r.Client.Scheme()) } + +// deleteControlled safely deletes object when it is controlled by pgAdmin. +func (r *PGAdminReconciler) deleteControlled( + ctx context.Context, pgadmin *v1beta1.PGAdmin, object client.Object, +) error { + if metav1.IsControlledBy(object, pgadmin) { + uid := object.GetUID() + version := object.GetResourceVersion() + exactly := client.Preconditions{UID: &uid, ResourceVersion: &version} + + return r.Client.Delete(ctx, object, exactly) + } + + return nil +} diff --git a/internal/controller/standalone_pgadmin/controller_test.go b/internal/controller/standalone_pgadmin/controller_test.go new file mode 100644 index 0000000000..b0fe17cbe6 --- /dev/null +++ b/internal/controller/standalone_pgadmin/controller_test.go @@ -0,0 +1,75 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package standalone_pgadmin + +import ( + "context" + "strings" + "testing" + + "gotest.tools/v3/assert" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/crunchydata/postgres-operator/internal/testing/require" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +func TestDeleteControlled(t *testing.T) { + ctx := context.Background() + cc := setupKubernetes(t) + require.ParallelCapacity(t, 1) + + ns := setupNamespace(t, cc) + reconciler := PGAdminReconciler{Client: cc} + + pgadmin := new(v1beta1.PGAdmin) + pgadmin.Namespace = ns.Name + pgadmin.Name = strings.ToLower(t.Name()) + assert.NilError(t, cc.Create(ctx, pgadmin)) + + t.Run("NoOwnership", func(t *testing.T) { + secret := &corev1.Secret{} + secret.Namespace = ns.Name + secret.Name = "solo" + + assert.NilError(t, cc.Create(ctx, secret)) + + // No-op when there's no ownership + assert.NilError(t, reconciler.deleteControlled(ctx, pgadmin, secret)) + assert.NilError(t, cc.Get(ctx, client.ObjectKeyFromObject(secret), secret)) + }) + + // We aren't currently using setOwnerReference in the pgAdmin controller + // If that changes we can uncomment this code + // t.Run("Owned", func(t *testing.T) { + // secret := &corev1.Secret{} + // secret.Namespace = ns.Name + // secret.Name = "owned" + + // assert.NilError(t, reconciler.setOwnerReference(pgadmin, secret)) + // assert.NilError(t, cc.Create(ctx, secret)) + + // // No-op when not controlled by cluster. + // assert.NilError(t, reconciler.deleteControlled(ctx, pgadmin, secret)) + // assert.NilError(t, cc.Get(ctx, client.ObjectKeyFromObject(secret), secret)) + // }) + + t.Run("Controlled", func(t *testing.T) { + secret := &corev1.Secret{} + secret.Namespace = ns.Name + secret.Name = "controlled" + + assert.NilError(t, reconciler.setControllerReference(pgadmin, secret)) + assert.NilError(t, cc.Create(ctx, secret)) + + // Deletes when controlled by cluster. + assert.NilError(t, reconciler.deleteControlled(ctx, pgadmin, secret)) + + err := cc.Get(ctx, client.ObjectKeyFromObject(secret), secret) + assert.Assert(t, apierrors.IsNotFound(err), "expected NotFound, got %#v", err) + }) +} diff --git a/internal/controller/standalone_pgadmin/helpers_test.go b/internal/controller/standalone_pgadmin/helpers_test.go index 1027cee894..9096edb5a1 100644 --- a/internal/controller/standalone_pgadmin/helpers_test.go +++ b/internal/controller/standalone_pgadmin/helpers_test.go @@ -1,38 +1,21 @@ -//go:build envtest -// +build envtest - -// Copyright 2023 Crunchy Data Solutions, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 +// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. // -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package standalone_pgadmin import ( "context" "os" - "path/filepath" "strconv" - "sync" "testing" "time" - "gotest.tools/v3/assert" corev1 "k8s.io/api/core/v1" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/envtest" "sigs.k8s.io/yaml" - "github.com/crunchydata/postgres-operator/internal/controller/runtime" + "github.com/crunchydata/postgres-operator/internal/testing/require" ) // Scale extends d according to PGO_TEST_TIMEOUT_SCALE. @@ -55,93 +38,39 @@ func init() { } } -var kubernetes struct { - sync.Mutex - - env *envtest.Environment - count int -} - // setupKubernetes starts or connects to a Kubernetes API and returns a client -// that uses it. When starting a local API, the client is a member of the -// "system:masters" group. It also creates any CRDs present in the -// "/config/crd/bases" directory. When any of these fail, it calls t.Fatal. -// It deletes CRDs and stops the local API using t.Cleanup. -// -// TODO(tjmoore4): This function is duplicated from a version that takes a PostgresCluster object. +// that uses it. See [require.Kubernetes] for more details. func setupKubernetes(t testing.TB) client.Client { t.Helper() - kubernetes.Lock() - defer kubernetes.Unlock() - - if kubernetes.env == nil { - env := &envtest.Environment{ - CRDDirectoryPaths: []string{ - filepath.Join("..", "..", "..", "config", "crd", "bases"), - }, - } - - _, err := env.Start() - assert.NilError(t, err) - - kubernetes.env = env - } - - kubernetes.count++ + // Start and/or connect to a Kubernetes API, or Skip when that's not configured. + cc := require.Kubernetes(t) + // Log the status of any test namespaces after this test fails. t.Cleanup(func() { - kubernetes.Lock() - defer kubernetes.Unlock() - if t.Failed() { - if cc, err := client.New(kubernetes.env.Config, client.Options{}); err == nil { - var namespaces corev1.NamespaceList - _ = cc.List(context.Background(), &namespaces, client.HasLabels{"postgres-operator-test"}) + var namespaces corev1.NamespaceList + _ = cc.List(context.Background(), &namespaces, client.HasLabels{"postgres-operator-test"}) - type shaped map[string]corev1.NamespaceStatus - result := make([]shaped, len(namespaces.Items)) + type shaped map[string]corev1.NamespaceStatus + result := make([]shaped, len(namespaces.Items)) - for i, ns := range namespaces.Items { - result[i] = shaped{ns.Labels["postgres-operator-test"]: ns.Status} - } - - formatted, _ := yaml.Marshal(result) - t.Logf("Test Namespaces:\n%s", formatted) + for i, ns := range namespaces.Items { + result[i] = shaped{ns.Labels["postgres-operator-test"]: ns.Status} } - } - - kubernetes.count-- - if kubernetes.count == 0 { - assert.Check(t, kubernetes.env.Stop()) - kubernetes.env = nil + formatted, _ := yaml.Marshal(result) + t.Logf("Test Namespaces:\n%s", formatted) } }) - scheme, err := runtime.CreatePostgresOperatorScheme() - assert.NilError(t, err) - - client, err := client.New(kubernetes.env.Config, client.Options{Scheme: scheme}) - assert.NilError(t, err) - - return client + return cc } // setupNamespace creates a random namespace that will be deleted by t.Cleanup. -// When creation fails, it calls t.Fatal. The caller may delete the namespace -// at any time. // -// TODO(tjmoore4): This function is duplicated from a version that takes a PostgresCluster object. +// Deprecated: Use [require.Namespace] instead. func setupNamespace(t testing.TB, cc client.Client) *corev1.Namespace { t.Helper() - ns := &corev1.Namespace{} - ns.GenerateName = "postgres-operator-test-" - ns.Labels = map[string]string{"postgres-operator-test": t.Name()} - - ctx := context.Background() - assert.NilError(t, cc.Create(ctx, ns)) - t.Cleanup(func() { assert.Check(t, client.IgnoreNotFound(cc.Delete(ctx, ns))) }) - - return ns + return require.Namespace(t, cc) } diff --git a/internal/controller/standalone_pgadmin/helpers_unit_test.go b/internal/controller/standalone_pgadmin/helpers_unit_test.go index c071e402d1..63887385fc 100644 --- a/internal/controller/standalone_pgadmin/helpers_unit_test.go +++ b/internal/controller/standalone_pgadmin/helpers_unit_test.go @@ -1,16 +1,6 @@ -// Copyright 2023 Crunchy Data Solutions, Inc. +// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package standalone_pgadmin @@ -77,7 +67,7 @@ func testVolumeClaimSpec() corev1.PersistentVolumeClaimSpec { // Defines a volume claim spec that can be used to create instances return corev1.PersistentVolumeClaimSpec{ AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, - Resources: corev1.ResourceRequirements{ + Resources: corev1.VolumeResourceRequirements{ Requests: map[corev1.ResourceName]resource.Quantity{ corev1.ResourceStorage: resource.MustParse("1Gi"), }, diff --git a/internal/controller/standalone_pgadmin/pod.go b/internal/controller/standalone_pgadmin/pod.go index 566532bff9..bbb39b9322 100644 --- a/internal/controller/standalone_pgadmin/pod.go +++ b/internal/controller/standalone_pgadmin/pod.go @@ -1,16 +1,6 @@ -// Copyright 2023 Crunchy Data Solutions, Inc. +// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package standalone_pgadmin @@ -20,6 +10,7 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/util/intstr" "github.com/crunchydata/postgres-operator/internal/config" "github.com/crunchydata/postgres-operator/internal/initialize" @@ -28,10 +19,12 @@ import ( ) const ( - configMountPath = "/etc/pgadmin/conf.d" - configFilePath = "~postgres-operator/" + settingsConfigMapKey - clusterFilePath = "~postgres-operator/" + settingsClusterMapKey - ldapFilePath = "~postgres-operator/ldap-bind-password" + configMountPath = "/etc/pgadmin/conf.d" + configFilePath = "~postgres-operator/" + settingsConfigMapKey + clusterFilePath = "~postgres-operator/" + settingsClusterMapKey + configDatabaseURIPath = "~postgres-operator/config-database-uri" + ldapFilePath = "~postgres-operator/ldap-bind-password" + gunicornConfigFilePath = "~postgres-operator/" + gunicornConfigKey // Nothing should be mounted to this location except the script our initContainer writes scriptMountPath = "/etc/pgadmin" @@ -122,18 +115,18 @@ func pod( Name: "PGADMIN_SETUP_EMAIL", Value: fmt.Sprintf("admin@%s.%s.svc", inPGAdmin.Name, inPGAdmin.Namespace), }, + // Setting the KRB5_CONFIG for kerberos + // - https://web.mit.edu/kerberos/krb5-current/doc/admin/conf_files/krb5_conf.html { - Name: "PGADMIN_SETUP_PASSWORD", - ValueFrom: &corev1.EnvVarSource{SecretKeyRef: &corev1.SecretKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: naming.StandalonePGAdmin(inPGAdmin).Name, - }, - Key: "password", - }}, + Name: "KRB5_CONFIG", + Value: configMountPath + "/krb5.conf", }, + // In testing it was determined that we need to set this env var for the replay cache + // otherwise it defaults to the read-only location `/var/tmp/` + // - https://web.mit.edu/kerberos/krb5-current/doc/basic/rcache_def.html#replay-cache-types { - Name: "PGADMIN_LISTEN_PORT", - Value: fmt.Sprintf("%d", pgAdminPort), + Name: "KRB5RCACHEDIR", + Value: "/tmp", }, }, VolumeMounts: []corev1.VolumeMount{ @@ -161,6 +154,26 @@ func pod( }, }, } + + // Creating a readiness probe that will check that the pgAdmin `/login` + // endpoint is reachable at the specified port + readinessProbe := &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Port: intstr.FromInt32(pgAdminPort), + Path: "/login", + Scheme: corev1.URISchemeHTTP, + }, + }, + } + gunicornData := inConfigMap.Data[gunicornConfigKey] + // Check the configmap to see if we think TLS is enabled + // If so, update the readiness check scheme to HTTPS + if strings.Contains(gunicornData, "certfile") && strings.Contains(gunicornData, "keyfile") { + readinessProbe.ProbeHandler.HTTPGet.Scheme = corev1.URISchemeHTTPS + } + container.ReadinessProbe = readinessProbe + startup := corev1.Container{ Name: naming.ContainerPGAdminStartup, Command: startupCommand(), @@ -210,11 +223,30 @@ func podConfigFiles(configmap *corev1.ConfigMap, pgadmin v1beta1.PGAdmin) []core Key: settingsClusterMapKey, Path: clusterFilePath, }, + { + Key: gunicornConfigKey, + Path: gunicornConfigFilePath, + }, }, }, }, }...) + if pgadmin.Spec.Config.ConfigDatabaseURI != nil { + config = append(config, corev1.VolumeProjection{ + Secret: &corev1.SecretProjection{ + LocalObjectReference: pgadmin.Spec.Config.ConfigDatabaseURI.LocalObjectReference, + Optional: pgadmin.Spec.Config.ConfigDatabaseURI.Optional, + Items: []corev1.KeyToPath{ + { + Key: pgadmin.Spec.Config.ConfigDatabaseURI.Key, + Path: configDatabaseURIPath, + }, + }, + }, + }) + } + // To enable LDAP authentication for pgAdmin, various LDAP settings must be configured. // While most of the required configuration can be set using the 'settings' // feature on the spec (.Spec.UserInterface.PGAdmin.Config.Settings), those @@ -242,28 +274,64 @@ func podConfigFiles(configmap *corev1.ConfigMap, pgadmin v1beta1.PGAdmin) []core } func startupScript(pgadmin *v1beta1.PGAdmin) []string { - // loadServerCommand is a python command leveraging the pgadmin setup.py script + // loadServerCommandV7 is a python command leveraging the pgadmin v7 setup.py script // with the `--load-servers` flag to replace the servers registered to the admin user // with the contents of the `settingsClusterMapKey` file - var loadServerCommand = fmt.Sprintf(`python3 ${PGADMIN_DIR}/setup.py --load-servers %s/%s --user %s --replace`, + var loadServerCommandV7 = fmt.Sprintf(`python3 ${PGADMIN_DIR}/setup.py --load-servers %s/%s --user %s --replace`, + configMountPath, + clusterFilePath, + fmt.Sprintf("admin@%s.%s.svc", pgadmin.Name, pgadmin.Namespace)) + + // loadServerCommandV8 is a python command leveraging the pgadmin v8 setup.py script + // with the `load-servers` sub-command to replace the servers registered to the admin user + // with the contents of the `settingsClusterMapKey` file + var loadServerCommandV8 = fmt.Sprintf(`python3 ${PGADMIN_DIR}/setup.py load-servers %s/%s --user %s --replace`, configMountPath, clusterFilePath, fmt.Sprintf("admin@%s.%s.svc", pgadmin.Name, pgadmin.Namespace)) - // This script sets up, starts pgadmin, and runs the `loadServerCommand` to register the discovered servers. + // setupCommands (v8 requires the 'setup-db' sub-command) + var setupCommandV7 = "python3 ${PGADMIN_DIR}/setup.py" + var setupCommandV8 = setupCommandV7 + " setup-db" + + // startCommands (v8 image includes Gunicorn) + var startCommandV7 = "pgadmin4 &" + var startCommandV8 = "gunicorn -c /etc/pgadmin/gunicorn_config.py --chdir $PGADMIN_DIR pgAdmin4:app &" + + // This script sets up, starts pgadmin, and runs the appropriate `loadServerCommand` to register the discovered servers. + // pgAdmin is hosted by Gunicorn and uses a config file. + // - https://www.pgadmin.org/docs/pgadmin4/development/server_deployment.html#standalone-gunicorn-configuration + // - https://docs.gunicorn.org/en/latest/configure.html var startScript = fmt.Sprintf(` -PGADMIN_DIR=/usr/local/lib/python3.11/site-packages/pgadmin4 +export PGADMIN_SETUP_PASSWORD="$(date +%%s | sha256sum | base64 | head -c 32)" +PGADMIN_DIR=%s +APP_RELEASE=$(cd $PGADMIN_DIR && python3 -c "import config; print(config.APP_RELEASE)") echo "Running pgAdmin4 Setup" -python3 ${PGADMIN_DIR}/setup.py +if [ $APP_RELEASE -eq 7 ]; then + %s +else + %s +fi echo "Starting pgAdmin4" PGADMIN4_PIDFILE=/tmp/pgadmin4.pid -pgadmin4 & +if [ $APP_RELEASE -eq 7 ]; then + %s +else + %s +fi echo $! > $PGADMIN4_PIDFILE -%s -`, loadServerCommand) +loadServerCommand() { + if [ $APP_RELEASE -eq 7 ]; then + %s + else + %s + fi +} +loadServerCommand +`, pgAdminDir, setupCommandV7, setupCommandV8, startCommandV7, startCommandV8, loadServerCommandV7, loadServerCommandV8) // Use a Bash loop to periodically check: // 1. the mtime of the mounted configuration volume for shared/discovered servers. @@ -276,22 +344,26 @@ echo $! > $PGADMIN4_PIDFILE // descriptor and uses the timeout of the builtin `read` to wait. That same // descriptor gets closed and reopened to use the builtin `[ -nt` to check mtimes. // - https://unix.stackexchange.com/a/407383 - var reloadScript = fmt.Sprintf(` -exec {fd}<> <(:) -while read -r -t 5 -u "${fd}" || true; do - if [ "${cluster_file}" -nt "/proc/self/fd/${fd}" ] && %s - then - exec {fd}>&- && exec {fd}<> <(:) - stat --format='Loaded shared servers dated %%y' "${cluster_file}" - fi - if [ ! -d /proc/$(cat $PGADMIN4_PIDFILE) ] - then - pgadmin4 & - echo $! > $PGADMIN4_PIDFILE - echo "Restarting pgAdmin4" - fi + var reloadScript = ` +exec {fd}<> <(:||:) +while read -r -t 5 -u "${fd}" ||:; do + if [[ "${cluster_file}" -nt "/proc/self/fd/${fd}" ]] && loadServerCommand + then + exec {fd}>&- && exec {fd}<> <(:||:) + stat --format='Loaded shared servers dated %y' "${cluster_file}" + fi + if [[ ! -d /proc/$(cat $PGADMIN4_PIDFILE) ]] + then + if [[ $APP_RELEASE -eq 7 ]]; then + ` + startCommandV7 + ` + else + ` + startCommandV8 + ` + fi + echo $! > $PGADMIN4_PIDFILE + echo "Restarting pgAdmin4" + fi done -`, loadServerCommand) +` wrapper := `monitor() {` + startScript + reloadScript + `}; export cluster_file="$1"; export -f monitor; exec -a "$0" bash -ceu monitor` @@ -305,40 +377,86 @@ func startupCommand() []string { // - https://github.com/pgadmin-org/pgadmin4/blob/REL-7_7/docs/en_US/config_py.rst // // This command writes a script in `/etc/pgadmin/config_system.py` that reads from - // the `pgadmin-settings.json` file and the `ldap-bind-password` file (if it exists) - // and sets those variables globally. That way those values are available as pgAdmin - // configurations when pgAdmin starts. + // the `pgadmin-settings.json` file and the config-database-uri and/or + // `ldap-bind-password` files (if either exists) and sets those variables globally. + // That way those values are available as pgAdmin configurations when pgAdmin starts. // - // Note: All pgAdmin settings are uppercase with underscores, so ignore any keys/names - // that are not. + // Note: All pgAdmin settings are uppercase alphanumeric with underscores, so ignore + // any keys/names that are not. // - // Note: set pgAdmin's LDAP_BIND_PASSWORD setting from the Secret last - // in order to overwrite configuration of LDAP_BIND_PASSWORD via ConfigMap JSON. + // Note: set the pgAdmin LDAP_BIND_PASSWORD and CONFIG_DATABASE_URI settings from the + // Secrets last in order to overwrite the respective configurations set via ConfigMap JSON. + const ( // ldapFilePath is the path for mounting the LDAP Bind Password ldapPasswordAbsolutePath = configMountPath + "/" + ldapFilePath + // configDatabaseURIPath is the path for mounting the database URI connection string + configDatabaseURIPathAbsolutePath = configMountPath + "/" + configDatabaseURIPath + configSystem = ` -import json, re, os +import glob, json, re, os +DEFAULT_BINARY_PATHS = {'pg': sorted([''] + glob.glob('/usr/pgsql-*/bin')).pop()} with open('` + configMountPath + `/` + configFilePath + `') as _f: - _conf, _data = re.compile(r'[A-Z_]+'), json.load(_f) + _conf, _data = re.compile(r'[A-Z_0-9]+'), json.load(_f) if type(_data) is dict: globals().update({k: v for k, v in _data.items() if _conf.fullmatch(k)}) if os.path.isfile('` + ldapPasswordAbsolutePath + `'): with open('` + ldapPasswordAbsolutePath + `') as _f: LDAP_BIND_PASSWORD = _f.read() +if os.path.isfile('` + configDatabaseURIPathAbsolutePath + `'): + with open('` + configDatabaseURIPathAbsolutePath + `') as _f: + CONFIG_DATABASE_URI = _f.read() +` + // gunicorn reads from the `/etc/pgadmin/gunicorn_config.py` file during startup + // after all other config files. + // - https://docs.gunicorn.org/en/latest/configure.html#configuration-file + // + // This command writes a script in `/etc/pgadmin/gunicorn_config.py` that reads + // from the `gunicorn-config.json` file and sets those variables globally. + // That way those values are available as settings when gunicorn starts. + // + // Note: All gunicorn settings are lowercase with underscores, so ignore + // any keys/names that are not. + gunicornConfig = ` +import json, re +with open('` + configMountPath + `/` + gunicornConfigFilePath + `') as _f: + _conf, _data = re.compile(r'[a-z_]+'), json.load(_f) + if type(_data) is dict: + globals().update({k: v for k, v in _data.items() if _conf.fullmatch(k)}) ` ) - args := []string{strings.TrimLeft(configSystem, "\n")} + args := []string{strings.TrimLeft(configSystem, "\n"), strings.TrimLeft(gunicornConfig, "\n")} script := strings.Join([]string{ // Use the initContainer to create this path to avoid the error noted here: - // - https://github.com/kubernetes/kubernetes/issues/121294 - `mkdir -p /etc/pgadmin/conf.d`, - // Write the system configuration into a read-only file. - `(umask a-w && echo "$1" > ` + scriptMountPath + `/config_system.py` + `)`, + // - https://issue.k8s.io/121294 + `mkdir -p ` + configMountPath, + // Write the system and server configurations. + `echo "$1" > ` + scriptMountPath + `/config_system.py`, + `echo "$2" > ` + scriptMountPath + `/gunicorn_config.py`, }, "\n") return append([]string{"bash", "-ceu", "--", script, "startup"}, args...) } + +// podSecurityContext returns a v1.PodSecurityContext for pgadmin that can write +// to PersistentVolumes. +func podSecurityContext(r *PGAdminReconciler) *corev1.PodSecurityContext { + podSecurityContext := initialize.PodSecurityContext() + + // TODO (dsessler7): Add ability to add supplemental groups + + // OpenShift assigns a filesystem group based on a SecurityContextConstraint. + // Otherwise, set a filesystem group so pgAdmin can write to files + // regardless of the UID or GID of a container. + // - https://cloud.redhat.com/blog/a-guide-to-openshift-and-uids + // - https://docs.k8s.io/tasks/configure-pod-container/security-context/ + // - https://docs.openshift.com/container-platform/4.14/authentication/managing-security-context-constraints.html + if !r.IsOpenShift { + podSecurityContext.FSGroup = initialize.Int64(2) + } + + return podSecurityContext +} diff --git a/internal/controller/standalone_pgadmin/pod_test.go b/internal/controller/standalone_pgadmin/pod_test.go index 51a0b7ea2c..19cee52882 100644 --- a/internal/controller/standalone_pgadmin/pod_test.go +++ b/internal/controller/standalone_pgadmin/pod_test.go @@ -1,16 +1,6 @@ -// Copyright 2023 Crunchy Data Solutions, Inc. +// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package standalone_pgadmin @@ -49,35 +39,75 @@ containers: - bash - -ceu - -- - - "monitor() {\nPGADMIN_DIR=/usr/local/lib/python3.11/site-packages/pgadmin4\n\necho - \"Running pgAdmin4 Setup\"\npython3 ${PGADMIN_DIR}/setup.py\n\necho \"Starting - pgAdmin4\"\nPGADMIN4_PIDFILE=/tmp/pgadmin4.pid\npgadmin4 &\necho $! > $PGADMIN4_PIDFILE\n\npython3 - ${PGADMIN_DIR}/setup.py --load-servers /etc/pgadmin/conf.d/~postgres-operator/pgadmin-shared-clusters.json - --user admin@pgadmin.postgres-operator.svc --replace\n\nexec {fd}<> <(:)\nwhile - read -r -t 5 -u \"${fd}\" || true; do\n\tif [ \"${cluster_file}\" -nt \"/proc/self/fd/${fd}\" - ] && python3 ${PGADMIN_DIR}/setup.py --load-servers /etc/pgadmin/conf.d/~postgres-operator/pgadmin-shared-clusters.json - --user admin@pgadmin.postgres-operator.svc --replace\n\tthen\n\t\texec {fd}>&- - && exec {fd}<> <(:)\n\t\tstat --format='Loaded shared servers dated %y' \"${cluster_file}\"\n\tfi\n\tif - [ ! -d /proc/$(cat $PGADMIN4_PIDFILE) ]\n\tthen\n\t\tpgadmin4 &\n\t\techo $! > - $PGADMIN4_PIDFILE\n\t\techo \"Restarting pgAdmin4\"\n\tfi\ndone\n}; export cluster_file=\"$1\"; - export -f monitor; exec -a \"$0\" bash -ceu monitor" + - |- + monitor() { + export PGADMIN_SETUP_PASSWORD="$(date +%s | sha256sum | base64 | head -c 32)" + PGADMIN_DIR=/usr/local/lib/python3.11/site-packages/pgadmin4 + APP_RELEASE=$(cd $PGADMIN_DIR && python3 -c "import config; print(config.APP_RELEASE)") + + echo "Running pgAdmin4 Setup" + if [ $APP_RELEASE -eq 7 ]; then + python3 ${PGADMIN_DIR}/setup.py + else + python3 ${PGADMIN_DIR}/setup.py setup-db + fi + + echo "Starting pgAdmin4" + PGADMIN4_PIDFILE=/tmp/pgadmin4.pid + if [ $APP_RELEASE -eq 7 ]; then + pgadmin4 & + else + gunicorn -c /etc/pgadmin/gunicorn_config.py --chdir $PGADMIN_DIR pgAdmin4:app & + fi + echo $! > $PGADMIN4_PIDFILE + + loadServerCommand() { + if [ $APP_RELEASE -eq 7 ]; then + python3 ${PGADMIN_DIR}/setup.py --load-servers /etc/pgadmin/conf.d/~postgres-operator/pgadmin-shared-clusters.json --user admin@pgadmin.postgres-operator.svc --replace + else + python3 ${PGADMIN_DIR}/setup.py load-servers /etc/pgadmin/conf.d/~postgres-operator/pgadmin-shared-clusters.json --user admin@pgadmin.postgres-operator.svc --replace + fi + } + loadServerCommand + + exec {fd}<> <(:||:) + while read -r -t 5 -u "${fd}" ||:; do + if [[ "${cluster_file}" -nt "/proc/self/fd/${fd}" ]] && loadServerCommand + then + exec {fd}>&- && exec {fd}<> <(:||:) + stat --format='Loaded shared servers dated %y' "${cluster_file}" + fi + if [[ ! -d /proc/$(cat $PGADMIN4_PIDFILE) ]] + then + if [[ $APP_RELEASE -eq 7 ]]; then + pgadmin4 & + else + gunicorn -c /etc/pgadmin/gunicorn_config.py --chdir $PGADMIN_DIR pgAdmin4:app & + fi + echo $! > $PGADMIN4_PIDFILE + echo "Restarting pgAdmin4" + fi + done + }; export cluster_file="$1"; export -f monitor; exec -a "$0" bash -ceu monitor - pgadmin - /etc/pgadmin/conf.d/~postgres-operator/pgadmin-shared-clusters.json env: - name: PGADMIN_SETUP_EMAIL value: admin@pgadmin.postgres-operator.svc - - name: PGADMIN_SETUP_PASSWORD - valueFrom: - secretKeyRef: - key: password - name: pgadmin- - - name: PGADMIN_LISTEN_PORT - value: "5050" + - name: KRB5_CONFIG + value: /etc/pgadmin/conf.d/krb5.conf + - name: KRB5RCACHEDIR + value: /tmp name: pgadmin ports: - containerPort: 5050 name: pgadmin protocol: TCP + readinessProbe: + httpGet: + path: /login + port: 5050 + scheme: HTTP resources: {} securityContext: allowPrivilegeEscalation: false @@ -87,6 +117,8 @@ containers: privileged: false readOnlyRootFilesystem: true runAsNonRoot: true + seccompProfile: + type: RuntimeDefault volumeMounts: - mountPath: /etc/pgadmin/conf.d name: pgadmin-config @@ -107,17 +139,28 @@ initContainers: - -- - |- mkdir -p /etc/pgadmin/conf.d - (umask a-w && echo "$1" > /etc/pgadmin/config_system.py) + echo "$1" > /etc/pgadmin/config_system.py + echo "$2" > /etc/pgadmin/gunicorn_config.py - startup - | - import json, re, os + import glob, json, re, os + DEFAULT_BINARY_PATHS = {'pg': sorted([''] + glob.glob('/usr/pgsql-*/bin')).pop()} with open('/etc/pgadmin/conf.d/~postgres-operator/pgadmin-settings.json') as _f: - _conf, _data = re.compile(r'[A-Z_]+'), json.load(_f) + _conf, _data = re.compile(r'[A-Z_0-9]+'), json.load(_f) if type(_data) is dict: globals().update({k: v for k, v in _data.items() if _conf.fullmatch(k)}) if os.path.isfile('/etc/pgadmin/conf.d/~postgres-operator/ldap-bind-password'): with open('/etc/pgadmin/conf.d/~postgres-operator/ldap-bind-password') as _f: LDAP_BIND_PASSWORD = _f.read() + if os.path.isfile('/etc/pgadmin/conf.d/~postgres-operator/config-database-uri'): + with open('/etc/pgadmin/conf.d/~postgres-operator/config-database-uri') as _f: + CONFIG_DATABASE_URI = _f.read() + - | + import json, re + with open('/etc/pgadmin/conf.d/~postgres-operator/gunicorn-config.json') as _f: + _conf, _data = re.compile(r'[a-z_]+'), json.load(_f) + if type(_data) is dict: + globals().update({k: v for k, v in _data.items() if _conf.fullmatch(k)}) name: pgadmin-startup resources: {} securityContext: @@ -128,6 +171,8 @@ initContainers: privileged: false readOnlyRootFilesystem: true runAsNonRoot: true + seccompProfile: + type: RuntimeDefault volumeMounts: - mountPath: /etc/pgadmin name: pgadmin-config-system @@ -141,6 +186,8 @@ volumes: path: ~postgres-operator/pgadmin-settings.json - key: pgadmin-shared-clusters.json path: ~postgres-operator/pgadmin-shared-clusters.json + - key: gunicorn-config.json + path: ~postgres-operator/gunicorn-config.json - name: pgadmin-data persistentVolumeClaim: claimName: "" @@ -177,30 +224,65 @@ containers: - bash - -ceu - -- - - "monitor() {\nPGADMIN_DIR=/usr/local/lib/python3.11/site-packages/pgadmin4\n\necho - \"Running pgAdmin4 Setup\"\npython3 ${PGADMIN_DIR}/setup.py\n\necho \"Starting - pgAdmin4\"\nPGADMIN4_PIDFILE=/tmp/pgadmin4.pid\npgadmin4 &\necho $! > $PGADMIN4_PIDFILE\n\npython3 - ${PGADMIN_DIR}/setup.py --load-servers /etc/pgadmin/conf.d/~postgres-operator/pgadmin-shared-clusters.json - --user admin@pgadmin.postgres-operator.svc --replace\n\nexec {fd}<> <(:)\nwhile - read -r -t 5 -u \"${fd}\" || true; do\n\tif [ \"${cluster_file}\" -nt \"/proc/self/fd/${fd}\" - ] && python3 ${PGADMIN_DIR}/setup.py --load-servers /etc/pgadmin/conf.d/~postgres-operator/pgadmin-shared-clusters.json - --user admin@pgadmin.postgres-operator.svc --replace\n\tthen\n\t\texec {fd}>&- - && exec {fd}<> <(:)\n\t\tstat --format='Loaded shared servers dated %y' \"${cluster_file}\"\n\tfi\n\tif - [ ! -d /proc/$(cat $PGADMIN4_PIDFILE) ]\n\tthen\n\t\tpgadmin4 &\n\t\techo $! > - $PGADMIN4_PIDFILE\n\t\techo \"Restarting pgAdmin4\"\n\tfi\ndone\n}; export cluster_file=\"$1\"; - export -f monitor; exec -a \"$0\" bash -ceu monitor" + - |- + monitor() { + export PGADMIN_SETUP_PASSWORD="$(date +%s | sha256sum | base64 | head -c 32)" + PGADMIN_DIR=/usr/local/lib/python3.11/site-packages/pgadmin4 + APP_RELEASE=$(cd $PGADMIN_DIR && python3 -c "import config; print(config.APP_RELEASE)") + + echo "Running pgAdmin4 Setup" + if [ $APP_RELEASE -eq 7 ]; then + python3 ${PGADMIN_DIR}/setup.py + else + python3 ${PGADMIN_DIR}/setup.py setup-db + fi + + echo "Starting pgAdmin4" + PGADMIN4_PIDFILE=/tmp/pgadmin4.pid + if [ $APP_RELEASE -eq 7 ]; then + pgadmin4 & + else + gunicorn -c /etc/pgadmin/gunicorn_config.py --chdir $PGADMIN_DIR pgAdmin4:app & + fi + echo $! > $PGADMIN4_PIDFILE + + loadServerCommand() { + if [ $APP_RELEASE -eq 7 ]; then + python3 ${PGADMIN_DIR}/setup.py --load-servers /etc/pgadmin/conf.d/~postgres-operator/pgadmin-shared-clusters.json --user admin@pgadmin.postgres-operator.svc --replace + else + python3 ${PGADMIN_DIR}/setup.py load-servers /etc/pgadmin/conf.d/~postgres-operator/pgadmin-shared-clusters.json --user admin@pgadmin.postgres-operator.svc --replace + fi + } + loadServerCommand + + exec {fd}<> <(:||:) + while read -r -t 5 -u "${fd}" ||:; do + if [[ "${cluster_file}" -nt "/proc/self/fd/${fd}" ]] && loadServerCommand + then + exec {fd}>&- && exec {fd}<> <(:||:) + stat --format='Loaded shared servers dated %y' "${cluster_file}" + fi + if [[ ! -d /proc/$(cat $PGADMIN4_PIDFILE) ]] + then + if [[ $APP_RELEASE -eq 7 ]]; then + pgadmin4 & + else + gunicorn -c /etc/pgadmin/gunicorn_config.py --chdir $PGADMIN_DIR pgAdmin4:app & + fi + echo $! > $PGADMIN4_PIDFILE + echo "Restarting pgAdmin4" + fi + done + }; export cluster_file="$1"; export -f monitor; exec -a "$0" bash -ceu monitor - pgadmin - /etc/pgadmin/conf.d/~postgres-operator/pgadmin-shared-clusters.json env: - name: PGADMIN_SETUP_EMAIL value: admin@pgadmin.postgres-operator.svc - - name: PGADMIN_SETUP_PASSWORD - valueFrom: - secretKeyRef: - key: password - name: pgadmin- - - name: PGADMIN_LISTEN_PORT - value: "5050" + - name: KRB5_CONFIG + value: /etc/pgadmin/conf.d/krb5.conf + - name: KRB5RCACHEDIR + value: /tmp image: new-image imagePullPolicy: Always name: pgadmin @@ -208,6 +290,11 @@ containers: - containerPort: 5050 name: pgadmin protocol: TCP + readinessProbe: + httpGet: + path: /login + port: 5050 + scheme: HTTP resources: requests: cpu: 100m @@ -219,6 +306,8 @@ containers: privileged: false readOnlyRootFilesystem: true runAsNonRoot: true + seccompProfile: + type: RuntimeDefault volumeMounts: - mountPath: /etc/pgadmin/conf.d name: pgadmin-config @@ -239,17 +328,28 @@ initContainers: - -- - |- mkdir -p /etc/pgadmin/conf.d - (umask a-w && echo "$1" > /etc/pgadmin/config_system.py) + echo "$1" > /etc/pgadmin/config_system.py + echo "$2" > /etc/pgadmin/gunicorn_config.py - startup - | - import json, re, os + import glob, json, re, os + DEFAULT_BINARY_PATHS = {'pg': sorted([''] + glob.glob('/usr/pgsql-*/bin')).pop()} with open('/etc/pgadmin/conf.d/~postgres-operator/pgadmin-settings.json') as _f: - _conf, _data = re.compile(r'[A-Z_]+'), json.load(_f) + _conf, _data = re.compile(r'[A-Z_0-9]+'), json.load(_f) if type(_data) is dict: globals().update({k: v for k, v in _data.items() if _conf.fullmatch(k)}) if os.path.isfile('/etc/pgadmin/conf.d/~postgres-operator/ldap-bind-password'): with open('/etc/pgadmin/conf.d/~postgres-operator/ldap-bind-password') as _f: LDAP_BIND_PASSWORD = _f.read() + if os.path.isfile('/etc/pgadmin/conf.d/~postgres-operator/config-database-uri'): + with open('/etc/pgadmin/conf.d/~postgres-operator/config-database-uri') as _f: + CONFIG_DATABASE_URI = _f.read() + - | + import json, re + with open('/etc/pgadmin/conf.d/~postgres-operator/gunicorn-config.json') as _f: + _conf, _data = re.compile(r'[a-z_]+'), json.load(_f) + if type(_data) is dict: + globals().update({k: v for k, v in _data.items() if _conf.fullmatch(k)}) image: new-image imagePullPolicy: Always name: pgadmin-startup @@ -264,6 +364,8 @@ initContainers: privileged: false readOnlyRootFilesystem: true runAsNonRoot: true + seccompProfile: + type: RuntimeDefault volumeMounts: - mountPath: /etc/pgadmin name: pgadmin-config-system @@ -277,6 +379,8 @@ volumes: path: ~postgres-operator/pgadmin-settings.json - key: pgadmin-shared-clusters.json path: ~postgres-operator/pgadmin-shared-clusters.json + - key: gunicorn-config.json + path: ~postgres-operator/gunicorn-config.json - name: pgadmin-data persistentVolumeClaim: claimName: "" @@ -323,6 +427,21 @@ func TestPodConfigFiles(t *testing.T) { path: ~postgres-operator/pgadmin-settings.json - key: pgadmin-shared-clusters.json path: ~postgres-operator/pgadmin-shared-clusters.json + - key: gunicorn-config.json + path: ~postgres-operator/gunicorn-config.json name: some-cm `)) } + +func TestPodSecurityContext(t *testing.T) { + pgAdminReconciler := &PGAdminReconciler{} + + assert.Assert(t, cmp.MarshalMatches(podSecurityContext(pgAdminReconciler), ` +fsGroup: 2 +fsGroupChangePolicy: OnRootMismatch + `)) + + pgAdminReconciler.IsOpenShift = true + assert.Assert(t, cmp.MarshalMatches(podSecurityContext(pgAdminReconciler), + `fsGroupChangePolicy: OnRootMismatch`)) +} diff --git a/internal/controller/standalone_pgadmin/postgrescluster.go b/internal/controller/standalone_pgadmin/postgrescluster.go index c8e59e5dc4..5327b8ae70 100644 --- a/internal/controller/standalone_pgadmin/postgrescluster.go +++ b/internal/controller/standalone_pgadmin/postgrescluster.go @@ -1,16 +1,6 @@ -// Copyright 2023 Crunchy Data Solutions, Inc. +// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package standalone_pgadmin @@ -21,6 +11,7 @@ import ( "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -44,6 +35,10 @@ func (r *PGAdminReconciler) findPGAdminsForPostgresCluster( }) == nil { for i := range pgadmins.Items { for _, serverGroup := range pgadmins.Items[i].Spec.ServerGroups { + if serverGroup.PostgresClusterName == cluster.GetName() { + matching = append(matching, &pgadmins.Items[i]) + continue + } if selector, err := naming.AsSelector(serverGroup.PostgresClusterSelector); err == nil { if selector.Matches(labels.Set(cluster.GetLabels())) { matching = append(matching, &pgadmins.Items[i]) @@ -67,6 +62,19 @@ func (r *PGAdminReconciler) getClustersForPGAdmin( var selector labels.Selector for _, serverGroup := range pgAdmin.Spec.ServerGroups { + cluster := &v1beta1.PostgresCluster{} + if serverGroup.PostgresClusterName != "" { + err = r.Get(ctx, types.NamespacedName{ + Name: serverGroup.PostgresClusterName, + Namespace: pgAdmin.GetNamespace(), + }, cluster) + if err == nil { + matching[serverGroup.Name] = &v1beta1.PostgresClusterList{ + Items: []v1beta1.PostgresCluster{*cluster}, + } + } + continue + } if selector, err = naming.AsSelector(serverGroup.PostgresClusterSelector); err == nil { var filteredList v1beta1.PostgresClusterList err = r.List(ctx, &filteredList, diff --git a/internal/controller/standalone_pgadmin/secret.go b/internal/controller/standalone_pgadmin/secret.go deleted file mode 100644 index 04b7979ac3..0000000000 --- a/internal/controller/standalone_pgadmin/secret.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2023 Crunchy Data Solutions, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package standalone_pgadmin - -import ( - "context" - "fmt" - - "github.com/pkg/errors" - corev1 "k8s.io/api/core/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - - "github.com/crunchydata/postgres-operator/internal/naming" - "github.com/crunchydata/postgres-operator/internal/util" - "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" -) - -// +kubebuilder:rbac:groups="",resources="secrets",verbs={get} -// +kubebuilder:rbac:groups="",resources="secrets",verbs={create,delete,patch} - -// reconcilePGAdminSecret reconciles the secret containing authentication -// for the pgAdmin administrator account -func (r *PGAdminReconciler) reconcilePGAdminSecret( - ctx context.Context, - pgadmin *v1beta1.PGAdmin) (*corev1.Secret, error) { - - existing := &corev1.Secret{ObjectMeta: naming.StandalonePGAdmin(pgadmin)} - err := errors.WithStack( - r.Client.Get(ctx, client.ObjectKeyFromObject(existing), existing)) - if client.IgnoreNotFound(err) != nil { - return nil, err - } - - secret, err := secret(pgadmin, existing) - - if err == nil { - err = errors.WithStack(r.setControllerReference(pgadmin, secret)) - } - - if err == nil { - err = errors.WithStack(r.apply(ctx, secret)) - } - - return secret, err -} - -func secret(pgadmin *v1beta1.PGAdmin, existing *corev1.Secret) (*corev1.Secret, error) { - - intent := &corev1.Secret{ObjectMeta: naming.StandalonePGAdmin(pgadmin)} - intent.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("Secret")) - - intent.Annotations = naming.Merge( - pgadmin.Spec.Metadata.GetAnnotationsOrNil(), - ) - intent.Labels = naming.Merge( - pgadmin.Spec.Metadata.GetLabelsOrNil(), - map[string]string{ - naming.LabelStandalonePGAdmin: pgadmin.Name, - naming.LabelRole: naming.RolePGAdmin, - }) - - intent.Data = make(map[string][]byte) - intent.StringData = make(map[string]string) - - // The username format is hardcoded, - // but append the full username to the secret for visibility - intent.StringData["username"] = fmt.Sprintf("admin@%s.%s.svc", - pgadmin.Name, pgadmin.Namespace) - - // Copy existing password into the intent - if existing.Data != nil { - intent.Data["password"] = existing.Data["password"] - } - - // When password is unset, generate a new one - if len(intent.Data["password"]) == 0 { - password, err := util.GenerateASCIIPassword(util.DefaultGeneratedPasswordLength) - if err != nil { - return nil, err - } - intent.Data["password"] = []byte(password) - } - - return intent, nil -} diff --git a/internal/controller/standalone_pgadmin/service.go b/internal/controller/standalone_pgadmin/service.go new file mode 100644 index 0000000000..2453a6a1fa --- /dev/null +++ b/internal/controller/standalone_pgadmin/service.go @@ -0,0 +1,140 @@ +// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package standalone_pgadmin + +import ( + "context" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + "sigs.k8s.io/controller-runtime/pkg/client" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + + "github.com/pkg/errors" + + "github.com/crunchydata/postgres-operator/internal/logging" + "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +// +kubebuilder:rbac:groups="",resources="services",verbs={get} +// +kubebuilder:rbac:groups="",resources="services",verbs={create,delete,patch} + +// reconcilePGAdminService will reconcile a ClusterIP service that points to +// pgAdmin. +func (r *PGAdminReconciler) reconcilePGAdminService( + ctx context.Context, + pgadmin *v1beta1.PGAdmin, +) error { + log := logging.FromContext(ctx) + + // Since spec.Service only accepts a single service name, we shouldn't ever + // have more than one service. However, if the user changes ServiceName, we + // need to delete any existing service(s). At the start of every reconcile + // get all services that match the current pgAdmin labels. + services := corev1.ServiceList{} + if err := r.Client.List(ctx, &services, + client.InNamespace(pgadmin.Namespace), + client.MatchingLabels{ + naming.LabelStandalonePGAdmin: pgadmin.Name, + naming.LabelRole: naming.RolePGAdmin, + }); err != nil { + return err + } + + // Delete any controlled and labeled service that is not defined in the spec. + for i := range services.Items { + if services.Items[i].Name != pgadmin.Spec.ServiceName { + log.V(1).Info( + "Deleting service(s) not defined in spec.ServiceName that are owned by pgAdmin", + "serviceName", services.Items[i].Name) + if err := r.deleteControlled(ctx, pgadmin, &services.Items[i]); err != nil { + return err + } + } + } + + // At this point only a service defined by spec.ServiceName should exist. + // Check if the user has requested a service through ServiceName + if pgadmin.Spec.ServiceName != "" { + // Look for an existing service with name ServiceName in the namespace + existingService := &corev1.Service{} + err := r.Client.Get(ctx, types.NamespacedName{ + Name: pgadmin.Spec.ServiceName, + Namespace: pgadmin.GetNamespace(), + }, existingService) + if client.IgnoreNotFound(err) != nil { + return err + } + + // If we found an existing service in our namespace with ServiceName + if !apierrors.IsNotFound(err) { + + // Check if the existing service has ownerReferences. + // If it doesn't we can go ahead and reconcile the service. + // If it does then we need to check if we are the controller. + if len(existingService.OwnerReferences) != 0 { + + // If the service is not controlled by this pgAdmin then we shouldn't reconcile + if !metav1.IsControlledBy(existingService, pgadmin) { + err := errors.New("Service is controlled by another object") + log.V(1).Error(err, "PGO does not force ownership on existing services", + "ServiceName", pgadmin.Spec.ServiceName) + r.Recorder.Event(pgadmin, + corev1.EventTypeWarning, "InvalidServiceWarning", + "Failed to reconcile Service ServiceName: "+pgadmin.Spec.ServiceName) + + return err + } + } + } + + // A service has been requested and we are allowed to create or reconcile + service := service(pgadmin) + + // Set the controller reference on the service + if err := errors.WithStack(r.setControllerReference(pgadmin, service)); err != nil { + return err + } + + return errors.WithStack(r.apply(ctx, service)) + } + + // If we get here then ServiceName was not provided through the spec + return nil +} + +// Generate a corev1.Service for pgAdmin +func service(pgadmin *v1beta1.PGAdmin) *corev1.Service { + + service := &corev1.Service{} + service.ObjectMeta = metav1.ObjectMeta{ + Name: pgadmin.Spec.ServiceName, + Namespace: pgadmin.Namespace, + } + service.SetGroupVersionKind( + corev1.SchemeGroupVersion.WithKind("Service")) + + service.Annotations = pgadmin.Spec.Metadata.GetAnnotationsOrNil() + service.Labels = naming.Merge( + pgadmin.Spec.Metadata.GetLabelsOrNil(), + naming.StandalonePGAdminLabels(pgadmin.Name)) + + service.Spec.Type = corev1.ServiceTypeClusterIP + service.Spec.Selector = map[string]string{ + naming.LabelStandalonePGAdmin: pgadmin.Name, + } + service.Spec.Ports = []corev1.ServicePort{{ + Name: "pgadmin-port", + Port: pgAdminPort, + Protocol: corev1.ProtocolTCP, + TargetPort: intstr.FromInt(pgAdminPort), + }} + + return service +} diff --git a/internal/controller/standalone_pgadmin/service_test.go b/internal/controller/standalone_pgadmin/service_test.go new file mode 100644 index 0000000000..24b20c8247 --- /dev/null +++ b/internal/controller/standalone_pgadmin/service_test.go @@ -0,0 +1,61 @@ +// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package standalone_pgadmin + +import ( + "testing" + + "gotest.tools/v3/assert" + + "github.com/crunchydata/postgres-operator/internal/testing/cmp" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +func TestService(t *testing.T) { + pgadmin := new(v1beta1.PGAdmin) + pgadmin.Name = "daisy" + pgadmin.Namespace = "daisy-service-ns" + pgadmin.Spec.ServiceName = "daisy-service" + pgadmin.Spec.Metadata = &v1beta1.Metadata{ + Labels: map[string]string{ + "test-label": "test-label-val", + "postgres-operator.crunchydata.com/pgadmin": "bad-val", + "postgres-operator.crunchydata.com/role": "bad-val", + }, + Annotations: map[string]string{ + "test-annotation": "test-annotation-val", + }, + } + + service := service(pgadmin) + assert.Assert(t, service != nil) + assert.Assert(t, cmp.MarshalMatches(service.TypeMeta, ` +apiVersion: v1 +kind: Service + `)) + + assert.Assert(t, cmp.MarshalMatches(service.ObjectMeta, ` +annotations: + test-annotation: test-annotation-val +creationTimestamp: null +labels: + postgres-operator.crunchydata.com/pgadmin: daisy + postgres-operator.crunchydata.com/role: pgadmin + test-label: test-label-val +name: daisy-service +namespace: daisy-service-ns + `)) + + assert.Assert(t, cmp.MarshalMatches(service.Spec, ` +ports: +- name: pgadmin-port + port: 5050 + protocol: TCP + targetPort: 5050 +selector: + postgres-operator.crunchydata.com/pgadmin: daisy +type: ClusterIP + `)) +} diff --git a/internal/controller/standalone_pgadmin/statefulset.go b/internal/controller/standalone_pgadmin/statefulset.go index 116dab036c..e086e333f4 100644 --- a/internal/controller/standalone_pgadmin/statefulset.go +++ b/internal/controller/standalone_pgadmin/statefulset.go @@ -1,16 +1,6 @@ -// Copyright 2023 Crunchy Data Solutions, Inc. +// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package standalone_pgadmin @@ -19,7 +9,9 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" "github.com/pkg/errors" @@ -33,7 +25,31 @@ func (r *PGAdminReconciler) reconcilePGAdminStatefulSet( ctx context.Context, pgadmin *v1beta1.PGAdmin, configmap *corev1.ConfigMap, dataVolume *corev1.PersistentVolumeClaim, ) error { - sts := statefulset(pgadmin, configmap, dataVolume) + sts := statefulset(r, pgadmin, configmap, dataVolume) + + // Previous versions of PGO used a StatefulSet Pod Management Policy that could leave the Pod + // in a failed state. When we see that it has the wrong policy, we will delete the StatefulSet + // and then recreate it with the correct policy, as this is not a property that can be patched. + // When we delete the StatefulSet, we will leave its Pods in place. They will be claimed by + // the StatefulSet that gets created in the next reconcile. + existing := &appsv1.StatefulSet{} + if err := errors.WithStack(r.Client.Get(ctx, client.ObjectKeyFromObject(sts), existing)); err != nil { + if !apierrors.IsNotFound(err) { + return err + } + } else { + if existing.Spec.PodManagementPolicy != sts.Spec.PodManagementPolicy { + // We want to delete the STS without affecting the Pods, so we set the PropagationPolicy to Orphan. + // The orphaned Pods will be claimed by the StatefulSet that will be created in the next reconcile. + uid := existing.GetUID() + version := existing.GetResourceVersion() + exactly := client.Preconditions{UID: &uid, ResourceVersion: &version} + propagate := client.PropagationPolicy(metav1.DeletePropagationOrphan) + + return errors.WithStack(client.IgnoreNotFound(r.Client.Delete(ctx, existing, exactly, propagate))) + } + } + if err := errors.WithStack(r.setControllerReference(pgadmin, sts)); err != nil { return err } @@ -42,6 +58,7 @@ func (r *PGAdminReconciler) reconcilePGAdminStatefulSet( // statefulset defines the StatefulSet needed to run pgAdmin. func statefulset( + r *PGAdminReconciler, pgadmin *v1beta1.PGAdmin, configmap *corev1.ConfigMap, dataVolume *corev1.PersistentVolumeClaim, @@ -52,40 +69,32 @@ func statefulset( sts.Annotations = pgadmin.Spec.Metadata.GetAnnotationsOrNil() sts.Labels = naming.Merge( pgadmin.Spec.Metadata.GetLabelsOrNil(), - naming.StandalonePGAdminCommonLabels(pgadmin), + naming.StandalonePGAdminDataLabels(pgadmin.Name), ) sts.Spec.Selector = &metav1.LabelSelector{ - MatchLabels: map[string]string{ - naming.LabelStandalonePGAdmin: pgadmin.Name, - naming.LabelRole: naming.RolePGAdmin, - }, + MatchLabels: naming.StandalonePGAdminLabels(pgadmin.Name), } sts.Spec.Template.Annotations = pgadmin.Spec.Metadata.GetAnnotationsOrNil() sts.Spec.Template.Labels = naming.Merge( pgadmin.Spec.Metadata.GetLabelsOrNil(), - naming.StandalonePGAdminCommonLabels(pgadmin), + naming.StandalonePGAdminDataLabels(pgadmin.Name), ) // Don't clutter the namespace with extra ControllerRevisions. sts.Spec.RevisionHistoryLimit = initialize.Int32(0) - // Set the StatefulSet update strategy to "RollingUpdate", and the Partition size for the - // update strategy to 0 (note that these are the defaults for a StatefulSet). This means - // every pod of the StatefulSet will be deleted and recreated when the Pod template changes. - // - https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#rolling-updates - // - https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#forced-rollback + // Use StatefulSet's "RollingUpdate" strategy and "Parallel" policy to roll + // out changes to pods even when not Running or not Ready. + // - https://docs.k8s.io/concepts/workloads/controllers/statefulset/#rolling-updates + // - https://docs.k8s.io/concepts/workloads/controllers/statefulset/#forced-rollback + // - https://kep.k8s.io/3541 + sts.Spec.PodManagementPolicy = appsv1.ParallelPodManagement sts.Spec.UpdateStrategy.Type = appsv1.RollingUpdateStatefulSetStrategyType - sts.Spec.UpdateStrategy.RollingUpdate = &appsv1.RollingUpdateStatefulSetStrategy{ - Partition: initialize.Int32(0), - } // Use scheduling constraints from the cluster spec. sts.Spec.Template.Spec.Affinity = pgadmin.Spec.Affinity sts.Spec.Template.Spec.Tolerations = pgadmin.Spec.Tolerations - - if pgadmin.Spec.PriorityClassName != nil { - sts.Spec.Template.Spec.PriorityClassName = *pgadmin.Spec.PriorityClassName - } + sts.Spec.Template.Spec.PriorityClassName = initialize.FromPointer(pgadmin.Spec.PriorityClassName) // Restart containers any time they stop, die, are killed, etc. // - https://docs.k8s.io/concepts/workloads/pods/pod-lifecycle/#restart-policy @@ -101,6 +110,8 @@ func statefulset( // set the image pull secrets, if any exist sts.Spec.Template.Spec.ImagePullSecrets = pgadmin.Spec.ImagePullSecrets + sts.Spec.Template.Spec.SecurityContext = podSecurityContext(r) + pod(pgadmin, configmap, &sts.Spec.Template.Spec, dataVolume) return sts diff --git a/internal/controller/standalone_pgadmin/statefulset_test.go b/internal/controller/standalone_pgadmin/statefulset_test.go index 709a77a9aa..52c501b357 100644 --- a/internal/controller/standalone_pgadmin/statefulset_test.go +++ b/internal/controller/standalone_pgadmin/statefulset_test.go @@ -1,19 +1,6 @@ -//go:build envtest -// +build envtest - -// Copyright 2023 Crunchy Data Solutions, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 +// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. // -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package standalone_pgadmin @@ -100,7 +87,9 @@ dnsPolicy: ClusterFirst enableServiceLinks: false restartPolicy: Always schedulerName: default-scheduler -securityContext: {} +securityContext: + fsGroup: 2 + fsGroupChangePolicy: OnRootMismatch terminationGracePeriodSeconds: 30 ` @@ -205,7 +194,9 @@ imagePullSecrets: - name: myImagePullSecret restartPolicy: Always schedulerName: default-scheduler -securityContext: {} +securityContext: + fsGroup: 2 + fsGroupChangePolicy: OnRootMismatch terminationGracePeriodSeconds: 30 tolerations: - key: sometoleration diff --git a/internal/controller/standalone_pgadmin/users.go b/internal/controller/standalone_pgadmin/users.go new file mode 100644 index 0000000000..3c9a3ce05b --- /dev/null +++ b/internal/controller/standalone_pgadmin/users.go @@ -0,0 +1,308 @@ +// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package standalone_pgadmin + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "strconv" + "strings" + + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/crunchydata/postgres-operator/internal/logging" + "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +type Executor func( + ctx context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, +) error + +// pgAdminUserForJson is used for user data that is put in the users.json file in the +// pgAdmin secret. IsAdmin and Username come from the user spec, whereas Password is +// generated when the user is created. +type pgAdminUserForJson struct { + // Whether the user has admin privileges or not. + IsAdmin bool `json:"isAdmin"` + + // The user's password + Password string `json:"password"` + + // The username for User in pgAdmin. + // Must be unique in the pgAdmin's users list. + Username string `json:"username"` +} + +// reconcilePGAdminUsers reconciles the users listed in the pgAdmin spec, adding them +// to the pgAdmin secret, and creating/updating them in pgAdmin when appropriate. +func (r *PGAdminReconciler) reconcilePGAdminUsers(ctx context.Context, pgadmin *v1beta1.PGAdmin) error { + const container = naming.ContainerPGAdmin + var podExecutor Executor + log := logging.FromContext(ctx) + + // Find the running pgAdmin container. When there is none, return early. + pod := &corev1.Pod{ObjectMeta: naming.StandalonePGAdmin(pgadmin)} + pod.Name += "-0" + + err := errors.WithStack(r.Client.Get(ctx, client.ObjectKeyFromObject(pod), pod)) + if err != nil { + return client.IgnoreNotFound(err) + } + + var running bool + var pgAdminImageSha string + for _, status := range pod.Status.ContainerStatuses { + if status.Name == container { + running = status.State.Running != nil + pgAdminImageSha = status.ImageID + } + } + if terminating := pod.DeletionTimestamp != nil; running && !terminating { + ctx = logging.NewContext(ctx, logging.FromContext(ctx).WithValues("pod", pod.Name)) + + podExecutor = func( + ctx context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, + ) error { + return r.PodExec(ctx, pod.Namespace, pod.Name, container, stdin, stdout, stderr, command...) + } + } + if podExecutor == nil { + return nil + } + + // If the pgAdmin version is not in the status or the image SHA has changed, get + // the pgAdmin version and store it in the status. + var pgadminVersion int + if pgadmin.Status.MajorVersion == 0 || pgadmin.Status.ImageSHA != pgAdminImageSha { + pgadminVersion, err = r.reconcilePGAdminMajorVersion(ctx, podExecutor) + if err != nil { + return err + } + pgadmin.Status.MajorVersion = pgadminVersion + pgadmin.Status.ImageSHA = pgAdminImageSha + } else { + pgadminVersion = pgadmin.Status.MajorVersion + } + + // If the pgAdmin version is not v8 or higher, return early as user management is + // only supported for pgAdmin v8 and higher. + if pgadminVersion < 8 { + // If pgAdmin version is less than v8 and user management is being attempted, + // log a message clarifying that it is only supported for pgAdmin v8 and higher. + if len(pgadmin.Spec.Users) > 0 { + log.Info("User management is only supported for pgAdmin v8 and higher.", + "pgadminVersion", pgadminVersion) + } + return err + } + + return r.writePGAdminUsers(ctx, pgadmin, podExecutor) +} + +// reconcilePGAdminMajorVersion execs into the pgAdmin pod and retrieves the pgAdmin major version +func (r *PGAdminReconciler) reconcilePGAdminMajorVersion(ctx context.Context, exec Executor) (int, error) { + script := fmt.Sprintf(` +PGADMIN_DIR=%s +cd $PGADMIN_DIR && python3 -c "import config; print(config.APP_RELEASE)" +`, pgAdminDir) + + var stdin, stdout, stderr bytes.Buffer + + err := exec(ctx, &stdin, &stdout, &stderr, + []string{"bash", "-ceu", "--", script}...) + + if err != nil { + return 0, err + } + + return strconv.Atoi(strings.TrimSpace(stdout.String())) +} + +// writePGAdminUsers takes the users in the pgAdmin spec and writes (adds or updates) their data +// to both pgAdmin and the users.json file that is stored in the pgAdmin secret. If a user is +// removed from the spec, its data is removed from users.json, but it is not deleted from pgAdmin. +func (r *PGAdminReconciler) writePGAdminUsers(ctx context.Context, pgadmin *v1beta1.PGAdmin, + exec Executor) error { + log := logging.FromContext(ctx) + + existingUserSecret := &corev1.Secret{ObjectMeta: naming.StandalonePGAdmin(pgadmin)} + err := errors.WithStack( + r.Client.Get(ctx, client.ObjectKeyFromObject(existingUserSecret), existingUserSecret)) + if client.IgnoreNotFound(err) != nil { + return err + } + + intentUserSecret := &corev1.Secret{ObjectMeta: naming.StandalonePGAdmin(pgadmin)} + intentUserSecret.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("Secret")) + + intentUserSecret.Annotations = naming.Merge( + pgadmin.Spec.Metadata.GetAnnotationsOrNil(), + ) + intentUserSecret.Labels = naming.Merge( + pgadmin.Spec.Metadata.GetLabelsOrNil(), + naming.StandalonePGAdminLabels(pgadmin.Name)) + + // Initialize secret data map, or copy existing data if not nil + intentUserSecret.Data = make(map[string][]byte) + + setupScript := fmt.Sprintf(` +PGADMIN_DIR=%s +cd $PGADMIN_DIR +`, pgAdminDir) + + var existingUsersArr []pgAdminUserForJson + if existingUserSecret.Data["users.json"] != nil { + err := json.Unmarshal(existingUserSecret.Data["users.json"], &existingUsersArr) + if err != nil { + return err + } + } + existingUsersMap := make(map[string]pgAdminUserForJson) + for _, user := range existingUsersArr { + existingUsersMap[user.Username] = user + } + intentUsers := []pgAdminUserForJson{} + for _, user := range pgadmin.Spec.Users { + var stdin, stdout, stderr bytes.Buffer + typeFlag := "--nonadmin" + isAdmin := false + if user.Role == "Administrator" { + typeFlag = "--admin" + isAdmin = true + } + + // Get password from secret + userPasswordSecret := &corev1.Secret{ObjectMeta: metav1.ObjectMeta{ + Namespace: pgadmin.Namespace, + Name: user.PasswordRef.LocalObjectReference.Name, + }} + err := errors.WithStack( + r.Client.Get(ctx, client.ObjectKeyFromObject(userPasswordSecret), userPasswordSecret)) + if err != nil { + log.Error(err, "Could not get user password secret") + continue + } + + // Make sure the password isn't nil or empty + password := userPasswordSecret.Data[user.PasswordRef.Key] + if password == nil { + log.Error(nil, `Could not retrieve password from secret. Make sure secret name and key are correct.`) + continue + } + if len(password) == 0 { + log.Error(nil, `Password must not be empty.`) + continue + } + + // Assemble user that will be used in add/update command and in updating + // the users.json file in the secret + intentUser := pgAdminUserForJson{ + Username: user.Username, + Password: string(password), + IsAdmin: isAdmin, + } + // If the user already exists in users.json and isAdmin or password has + // changed, run the update-user command. If the user already exists in + // users.json, but it hasn't changed, do nothing. If the user doesn't + // exist in users.json, run the add-user command. + if existingUser, present := existingUsersMap[user.Username]; present { + // If Password or IsAdmin have changed, attempt update-user command + if intentUser.IsAdmin != existingUser.IsAdmin || intentUser.Password != existingUser.Password { + script := setupScript + fmt.Sprintf(`python3 setup.py update-user %s --password "%s" "%s"`, + typeFlag, intentUser.Password, intentUser.Username) + "\n" + err = exec(ctx, &stdin, &stdout, &stderr, + []string{"bash", "-ceu", "--", script}...) + + // If any errors occurred during update, we want to log a message, + // add the existing user to users.json since the update was + // unsuccessful, and continue reconciling users. + if err != nil { + log.Error(err, "PodExec failed: ") + intentUsers = append(intentUsers, existingUser) + continue + } else if strings.TrimSpace(stderr.String()) != "" { + log.Error(errors.New(stderr.String()), fmt.Sprintf("pgAdmin setup.py error for %s: ", + intentUser.Username)) + intentUsers = append(intentUsers, existingUser) + continue + } + // If update user fails due to user not found or password length: + // https://github.com/pgadmin-org/pgadmin4/blob/REL-8_5/web/setup.py#L263 + // https://github.com/pgadmin-org/pgadmin4/blob/REL-8_5/web/setup.py#L246 + if strings.Contains(stdout.String(), "User not found") || + strings.Contains(stdout.String(), "Password must be") { + + log.Info("Failed to update pgAdmin user", "user", intentUser.Username, "error", stdout.String()) + r.Recorder.Event(pgadmin, + corev1.EventTypeWarning, "InvalidUserWarning", + fmt.Sprintf("Failed to update pgAdmin user %s: %s", + intentUser.Username, stdout.String())) + intentUsers = append(intentUsers, existingUser) + continue + } + } + } else { + // New user, so attempt add-user command + script := setupScript + fmt.Sprintf(`python3 setup.py add-user %s -- "%s" "%s"`, + typeFlag, intentUser.Username, intentUser.Password) + "\n" + err = exec(ctx, &stdin, &stdout, &stderr, + []string{"bash", "-ceu", "--", script}...) + + // If any errors occurred when attempting to add user, we want to log a message, + // and continue reconciling users. + if err != nil { + log.Error(err, "PodExec failed: ") + continue + } + if strings.TrimSpace(stderr.String()) != "" { + log.Error(errors.New(stderr.String()), fmt.Sprintf("pgAdmin setup.py error for %s: ", + intentUser.Username)) + continue + } + // If add user fails due to invalid username or password length: + // https://github.com/pgadmin-org/pgadmin4/blob/REL-8_5/web/pgadmin/tools/user_management/__init__.py#L457 + // https://github.com/pgadmin-org/pgadmin4/blob/REL-8_5/web/setup.py#L374 + if strings.Contains(stdout.String(), "Invalid email address") || + strings.Contains(stdout.String(), "Password must be") { + + log.Info(fmt.Sprintf("Failed to create pgAdmin user %s: %s", + intentUser.Username, stdout.String())) + r.Recorder.Event(pgadmin, + corev1.EventTypeWarning, "InvalidUserWarning", + fmt.Sprintf("Failed to create pgAdmin user %s: %s", + intentUser.Username, stdout.String())) + continue + } + } + // If we've gotten here, the user was successfully added or updated or nothing was done + // to the user at all, so we want to add it to the slice of users that will be put in the + // users.json file in the secret. + intentUsers = append(intentUsers, intentUser) + } + + // We've at least attempted to reconcile all users in the spec. If errors occurred when attempting + // to add a user, that user will not be in intentUsers. If errors occurred when attempting to + // update a user, the user will be in intentUsers as it existed before. We now want to marshal the + // intentUsers to json and write the users.json file to the secret. + usersJSON, err := json.Marshal(intentUsers) + if err != nil { + return err + } + intentUserSecret.Data["users.json"] = usersJSON + + err = errors.WithStack(r.setControllerReference(pgadmin, intentUserSecret)) + if err == nil { + err = errors.WithStack(r.apply(ctx, intentUserSecret)) + } + + return err +} diff --git a/internal/controller/standalone_pgadmin/users_test.go b/internal/controller/standalone_pgadmin/users_test.go new file mode 100644 index 0000000000..409fcea701 --- /dev/null +++ b/internal/controller/standalone_pgadmin/users_test.go @@ -0,0 +1,709 @@ +// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package standalone_pgadmin + +import ( + "context" + "encoding/json" + "fmt" + "io" + "strings" + "testing" + + "github.com/pkg/errors" + "gotest.tools/v3/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + "github.com/crunchydata/postgres-operator/internal/controller/runtime" + "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/internal/testing/events" + "github.com/crunchydata/postgres-operator/internal/testing/require" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +func TestReconcilePGAdminUsers(t *testing.T) { + ctx := context.Background() + + pgadmin := &v1beta1.PGAdmin{} + pgadmin.Namespace = "ns1" + pgadmin.Name = "pgadmin1" + pgadmin.UID = "123" + pgadmin.Spec.Users = []v1beta1.PGAdminUser{ + { + Username: "testuser", + Role: "Administrator", + }, + } + + t.Run("NoPods", func(t *testing.T) { + r := new(PGAdminReconciler) + r.Client = fake.NewClientBuilder().Build() + assert.NilError(t, r.reconcilePGAdminUsers(ctx, pgadmin)) + }) + + // Pod in the namespace + pod := corev1.Pod{} + pod.Namespace = pgadmin.Namespace + pod.Name = fmt.Sprintf("pgadmin-%s-0", pgadmin.UID) + + t.Run("ContainerNotRunning", func(t *testing.T) { + pod := pod.DeepCopy() + + pod.DeletionTimestamp = nil + pod.Status.ContainerStatuses = nil + + r := new(PGAdminReconciler) + r.Client = fake.NewClientBuilder().WithObjects(pod).Build() + + assert.NilError(t, r.reconcilePGAdminUsers(ctx, pgadmin)) + }) + + t.Run("PodTerminating", func(t *testing.T) { + pod := pod.DeepCopy() + + // Must add finalizer when adding deletion timestamp otherwise fake client will panic: + // https://github.com/kubernetes-sigs/controller-runtime/pull/2316 + pod.Finalizers = append(pod.Finalizers, "some-finalizer") + + pod.DeletionTimestamp = new(metav1.Time) + *pod.DeletionTimestamp = metav1.Now() + pod.Status.ContainerStatuses = + []corev1.ContainerStatus{{Name: naming.ContainerPGAdmin}} + pod.Status.ContainerStatuses[0].State.Running = + new(corev1.ContainerStateRunning) + + r := new(PGAdminReconciler) + r.Client = fake.NewClientBuilder().WithObjects(pod).Build() + + assert.NilError(t, r.reconcilePGAdminUsers(ctx, pgadmin)) + }) + + // We only test v7 because if we did v8 then the writePGAdminUsers would + // be called and that method has its own tests later in this file + t.Run("PodHealthyVersionNotSet", func(t *testing.T) { + pgadmin := pgadmin.DeepCopy() + pod := pod.DeepCopy() + + pod.DeletionTimestamp = nil + pod.Status.ContainerStatuses = + []corev1.ContainerStatus{{Name: naming.ContainerPGAdmin}} + pod.Status.ContainerStatuses[0].State.Running = + new(corev1.ContainerStateRunning) + pod.Status.ContainerStatuses[0].ImageID = "fakeSHA" + + r := new(PGAdminReconciler) + r.Client = fake.NewClientBuilder().WithObjects(pod).Build() + + calls := 0 + r.PodExec = func( + ctx context.Context, namespace, pod, container string, + stdin io.Reader, stdout, stderr io.Writer, command ...string, + ) error { + calls++ + + assert.Equal(t, pod, "pgadmin-123-0") + assert.Equal(t, namespace, pgadmin.Namespace) + assert.Equal(t, container, naming.ContainerPGAdmin) + + // Simulate a v7 version of pgAdmin by setting stdout to "7" for + // podexec call in reconcilePGAdminMajorVersion + stdout.Write([]byte("7")) + return nil + } + + assert.NilError(t, r.reconcilePGAdminUsers(ctx, pgadmin)) + assert.Equal(t, calls, 1, "PodExec should be called once") + assert.Equal(t, pgadmin.Status.MajorVersion, 7) + assert.Equal(t, pgadmin.Status.ImageSHA, "fakeSHA") + }) + + t.Run("PodHealthyShaChanged", func(t *testing.T) { + pgadmin := pgadmin.DeepCopy() + pgadmin.Status.MajorVersion = 7 + pgadmin.Status.ImageSHA = "fakeSHA" + pod := pod.DeepCopy() + + pod.DeletionTimestamp = nil + pod.Status.ContainerStatuses = + []corev1.ContainerStatus{{Name: naming.ContainerPGAdmin}} + pod.Status.ContainerStatuses[0].State.Running = + new(corev1.ContainerStateRunning) + pod.Status.ContainerStatuses[0].ImageID = "newFakeSHA" + + r := new(PGAdminReconciler) + r.Client = fake.NewClientBuilder().WithObjects(pod).Build() + + calls := 0 + r.PodExec = func( + ctx context.Context, namespace, pod, container string, + stdin io.Reader, stdout, stderr io.Writer, command ...string, + ) error { + calls++ + + // Simulate a v7 version of pgAdmin by setting stdout to "7" for + // podexec call in reconcilePGAdminMajorVersion + stdout.Write([]byte("7")) + return nil + } + + assert.NilError(t, r.reconcilePGAdminUsers(ctx, pgadmin)) + assert.Equal(t, calls, 1, "PodExec should be called once") + assert.Equal(t, pgadmin.Status.MajorVersion, 7) + assert.Equal(t, pgadmin.Status.ImageSHA, "newFakeSHA") + }) +} + +func TestReconcilePGAdminMajorVersion(t *testing.T) { + ctx := context.Background() + pod := corev1.Pod{} + pod.Namespace = "test-namespace" + pod.Name = "pgadmin-123-0" + reconciler := &PGAdminReconciler{} + + podExecutor := func( + ctx context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, + ) error { + return reconciler.PodExec(ctx, pod.Namespace, pod.Name, "pgadmin", stdin, stdout, stderr, command...) + } + + t.Run("SuccessfulRetrieval", func(t *testing.T) { + reconciler.PodExec = func( + ctx context.Context, namespace, pod, container string, + stdin io.Reader, stdout, stderr io.Writer, command ...string, + ) error { + assert.Equal(t, pod, "pgadmin-123-0") + assert.Equal(t, namespace, "test-namespace") + assert.Equal(t, container, naming.ContainerPGAdmin) + + // Simulate a v7 version of pgAdmin by setting stdout to "7" for + // podexec call in reconcilePGAdminMajorVersion + stdout.Write([]byte("7")) + return nil + } + + version, err := reconciler.reconcilePGAdminMajorVersion(ctx, podExecutor) + assert.NilError(t, err) + assert.Equal(t, version, 7) + }) + + t.Run("FailedRetrieval", func(t *testing.T) { + reconciler.PodExec = func( + ctx context.Context, namespace, pod, container string, + stdin io.Reader, stdout, stderr io.Writer, command ...string, + ) error { + // Simulate the python call giving bad data (not a version int) + stdout.Write([]byte("asdfjkl;")) + return nil + } + + version, err := reconciler.reconcilePGAdminMajorVersion(ctx, podExecutor) + assert.Check(t, err != nil) + assert.Equal(t, version, 0) + }) + + t.Run("PodExecError", func(t *testing.T) { + reconciler.PodExec = func( + ctx context.Context, namespace, pod, container string, + stdin io.Reader, stdout, stderr io.Writer, command ...string, + ) error { + return errors.New("PodExecError") + } + + version, err := reconciler.reconcilePGAdminMajorVersion(ctx, podExecutor) + assert.Check(t, err != nil) + assert.Equal(t, version, 0) + }) +} + +func TestWritePGAdminUsers(t *testing.T) { + ctx := context.Background() + cc := setupKubernetes(t) + require.ParallelCapacity(t, 1) + + recorder := events.NewRecorder(t, runtime.Scheme) + reconciler := &PGAdminReconciler{ + Client: cc, + Owner: client.FieldOwner(t.Name()), + Recorder: recorder, + } + + ns := setupNamespace(t, cc) + pgadmin := new(v1beta1.PGAdmin) + pgadmin.Name = "test-standalone-pgadmin" + pgadmin.Namespace = ns.Name + assert.NilError(t, cc.Create(ctx, pgadmin)) + + userPasswordSecret1 := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "user-password-secret1", + Namespace: ns.Name, + }, + Data: map[string][]byte{ + "password": []byte(`asdf`), + }, + } + assert.NilError(t, cc.Create(ctx, userPasswordSecret1)) + + userPasswordSecret2 := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "user-password-secret2", + Namespace: ns.Name, + }, + Data: map[string][]byte{ + "password": []byte(`qwer`), + }, + } + assert.NilError(t, cc.Create(ctx, userPasswordSecret2)) + + t.Cleanup(func() { + assert.Check(t, cc.Delete(ctx, pgadmin)) + assert.Check(t, cc.Delete(ctx, userPasswordSecret1)) + assert.Check(t, cc.Delete(ctx, userPasswordSecret2)) + }) + + pod := corev1.Pod{} + pod.Namespace = pgadmin.Namespace + pod.Name = fmt.Sprintf("pgadmin-%s-0", pgadmin.UID) + + podExecutor := func( + ctx context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, + ) error { + return reconciler.PodExec(ctx, pod.Namespace, pod.Name, "pgadmin", stdin, stdout, stderr, command...) + } + + t.Run("CreateOneUser", func(t *testing.T) { + pgadmin.Spec.Users = []v1beta1.PGAdminUser{ + { + PasswordRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "user-password-secret1", + }, + Key: "password", + }, + Username: "testuser1", + Role: "Administrator", + }, + } + + calls := 0 + reconciler.PodExec = func( + ctx context.Context, namespace, pod, container string, + stdin io.Reader, stdout, stderr io.Writer, command ...string, + ) error { + calls++ + + assert.Equal(t, pod, fmt.Sprintf("pgadmin-%s-0", pgadmin.UID)) + assert.Equal(t, namespace, pgadmin.Namespace) + assert.Equal(t, container, naming.ContainerPGAdmin) + assert.Equal(t, strings.Contains(strings.Join(command, " "), + `python3 setup.py add-user --admin -- "testuser1" "asdf"`), true) + + return nil + } + + assert.NilError(t, reconciler.writePGAdminUsers(ctx, pgadmin, podExecutor)) + assert.Equal(t, calls, 1, "PodExec should be called once") + + secret := &corev1.Secret{ObjectMeta: naming.StandalonePGAdmin(pgadmin)} + assert.NilError(t, errors.WithStack( + reconciler.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret))) + if assert.Check(t, secret.Data["users.json"] != nil) { + var usersArr []pgAdminUserForJson + assert.NilError(t, json.Unmarshal(secret.Data["users.json"], &usersArr)) + assert.Equal(t, len(usersArr), 1) + assert.Equal(t, usersArr[0].Username, "testuser1") + assert.Equal(t, usersArr[0].IsAdmin, true) + assert.Equal(t, usersArr[0].Password, "asdf") + } + }) + + t.Run("AddAnotherUserEditExistingUser", func(t *testing.T) { + pgadmin.Spec.Users = []v1beta1.PGAdminUser{ + { + PasswordRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "user-password-secret1", + }, + Key: "password", + }, + Username: "testuser1", + Role: "User", + }, + { + PasswordRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "user-password-secret2", + }, + Key: "password", + }, + Username: "testuser2", + Role: "Administrator", + }, + } + + calls := 0 + addUserCalls := 0 + updateUserCalls := 0 + reconciler.PodExec = func( + ctx context.Context, namespace, pod, container string, + stdin io.Reader, stdout, stderr io.Writer, command ...string, + ) error { + calls++ + if strings.Contains(strings.Join(command, " "), "python3 setup.py add-user") { + addUserCalls++ + } + if strings.Contains(strings.Join(command, " "), "python3 setup.py update-user") { + updateUserCalls++ + } + + return nil + } + + assert.NilError(t, reconciler.writePGAdminUsers(ctx, pgadmin, podExecutor)) + assert.Equal(t, calls, 2, "PodExec should be called twice") + assert.Equal(t, addUserCalls, 1, "The add-user command should be executed once") + assert.Equal(t, updateUserCalls, 1, "The update-user command should be executed once") + + secret := &corev1.Secret{ObjectMeta: naming.StandalonePGAdmin(pgadmin)} + assert.NilError(t, errors.WithStack( + reconciler.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret))) + if assert.Check(t, secret.Data["users.json"] != nil) { + var usersArr []pgAdminUserForJson + assert.NilError(t, json.Unmarshal(secret.Data["users.json"], &usersArr)) + assert.Equal(t, len(usersArr), 2) + assert.Equal(t, usersArr[0].Username, "testuser1") + assert.Equal(t, usersArr[0].IsAdmin, false) + assert.Equal(t, usersArr[0].Password, "asdf") + assert.Equal(t, usersArr[1].Username, "testuser2") + assert.Equal(t, usersArr[1].IsAdmin, true) + assert.Equal(t, usersArr[1].Password, "qwer") + } + }) + + t.Run("AddOneEditOneLeaveOneAlone", func(t *testing.T) { + pgadmin.Spec.Users = []v1beta1.PGAdminUser{ + { + PasswordRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "user-password-secret1", + }, + Key: "password", + }, + Username: "testuser1", + Role: "User", + }, + { + PasswordRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "user-password-secret1", + }, + Key: "password", + }, + Username: "testuser2", + Role: "User", + }, + { + PasswordRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "user-password-secret2", + }, + Key: "password", + }, + Username: "testuser3", + Role: "Administrator", + }, + } + calls := 0 + addUserCalls := 0 + updateUserCalls := 0 + reconciler.PodExec = func( + ctx context.Context, namespace, pod, container string, + stdin io.Reader, stdout, stderr io.Writer, command ...string, + ) error { + calls++ + if strings.Contains(strings.Join(command, " "), "python3 setup.py add-user") { + addUserCalls++ + } + if strings.Contains(strings.Join(command, " "), "python3 setup.py update-user") { + updateUserCalls++ + } + + return nil + } + + assert.NilError(t, reconciler.writePGAdminUsers(ctx, pgadmin, podExecutor)) + assert.Equal(t, calls, 2, "PodExec should be called twice") + assert.Equal(t, addUserCalls, 1, "The add-user command should be executed once") + assert.Equal(t, updateUserCalls, 1, "The update-user command should be executed once") + + secret := &corev1.Secret{ObjectMeta: naming.StandalonePGAdmin(pgadmin)} + assert.NilError(t, errors.WithStack( + reconciler.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret))) + if assert.Check(t, secret.Data["users.json"] != nil) { + var usersArr []pgAdminUserForJson + assert.NilError(t, json.Unmarshal(secret.Data["users.json"], &usersArr)) + assert.Equal(t, len(usersArr), 3) + assert.Equal(t, usersArr[0].Username, "testuser1") + assert.Equal(t, usersArr[0].IsAdmin, false) + assert.Equal(t, usersArr[0].Password, "asdf") + assert.Equal(t, usersArr[1].Username, "testuser2") + assert.Equal(t, usersArr[1].IsAdmin, false) + assert.Equal(t, usersArr[1].Password, "asdf") + assert.Equal(t, usersArr[2].Username, "testuser3") + assert.Equal(t, usersArr[2].IsAdmin, true) + assert.Equal(t, usersArr[2].Password, "qwer") + } + }) + + t.Run("DeleteUsers", func(t *testing.T) { + pgadmin.Spec.Users = []v1beta1.PGAdminUser{ + { + PasswordRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "user-password-secret1", + }, + Key: "password", + }, + Username: "testuser1", + Role: "User", + }, + } + calls := 0 + reconciler.PodExec = func( + ctx context.Context, namespace, pod, container string, + stdin io.Reader, stdout, stderr io.Writer, command ...string, + ) error { + calls++ + + return nil + } + + assert.NilError(t, reconciler.writePGAdminUsers(ctx, pgadmin, podExecutor)) + assert.Equal(t, calls, 0, "PodExec should be called zero times") + + secret := &corev1.Secret{ObjectMeta: naming.StandalonePGAdmin(pgadmin)} + assert.NilError(t, errors.WithStack( + reconciler.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret))) + if assert.Check(t, secret.Data["users.json"] != nil) { + var usersArr []pgAdminUserForJson + assert.NilError(t, json.Unmarshal(secret.Data["users.json"], &usersArr)) + assert.Equal(t, len(usersArr), 1) + assert.Equal(t, usersArr[0].Username, "testuser1") + assert.Equal(t, usersArr[0].IsAdmin, false) + assert.Equal(t, usersArr[0].Password, "asdf") + } + }) + + t.Run("ErrorsWhenUpdating", func(t *testing.T) { + pgadmin.Spec.Users = []v1beta1.PGAdminUser{ + { + PasswordRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "user-password-secret1", + }, + Key: "password", + }, + Username: "testuser1", + Role: "Administrator", + }, + } + + // PodExec error + calls := 0 + reconciler.PodExec = func( + ctx context.Context, namespace, pod, container string, + stdin io.Reader, stdout, stderr io.Writer, command ...string, + ) error { + calls++ + + return errors.New("podexec failure") + } + + assert.NilError(t, reconciler.writePGAdminUsers(ctx, pgadmin, podExecutor)) + assert.Equal(t, calls, 1, "PodExec should be called once") + + // User in users.json should be unchanged + secret := &corev1.Secret{ObjectMeta: naming.StandalonePGAdmin(pgadmin)} + assert.NilError(t, errors.WithStack( + reconciler.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret))) + if assert.Check(t, secret.Data["users.json"] != nil) { + var usersArr []pgAdminUserForJson + assert.NilError(t, json.Unmarshal(secret.Data["users.json"], &usersArr)) + assert.Equal(t, len(usersArr), 1) + assert.Equal(t, usersArr[0].Username, "testuser1") + assert.Equal(t, usersArr[0].IsAdmin, false) + assert.Equal(t, usersArr[0].Password, "asdf") + } + + // setup.py error in stderr + reconciler.PodExec = func( + ctx context.Context, namespace, pod, container string, + stdin io.Reader, stdout, stderr io.Writer, command ...string, + ) error { + calls++ + + stderr.Write([]byte("issue running setup.py update-user command")) + + return nil + } + + assert.NilError(t, reconciler.writePGAdminUsers(ctx, pgadmin, podExecutor)) + assert.Equal(t, calls, 2, "PodExec should be called once more") + + // User in users.json should be unchanged + assert.NilError(t, errors.WithStack( + reconciler.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret))) + if assert.Check(t, secret.Data["users.json"] != nil) { + var usersArr []pgAdminUserForJson + assert.NilError(t, json.Unmarshal(secret.Data["users.json"], &usersArr)) + assert.Equal(t, len(usersArr), 1) + assert.Equal(t, usersArr[0].Username, "testuser1") + assert.Equal(t, usersArr[0].IsAdmin, false) + assert.Equal(t, usersArr[0].Password, "asdf") + } + }) + + t.Run("ErrorsWhenAdding", func(t *testing.T) { + pgadmin.Spec.Users = []v1beta1.PGAdminUser{ + { + PasswordRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "user-password-secret1", + }, + Key: "password", + }, + Username: "testuser1", + Role: "User", + }, + { + PasswordRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "user-password-secret2", + }, + Key: "password", + }, + Username: "testuser2", + Role: "Administrator", + }, + } + + // PodExec error + calls := 0 + reconciler.PodExec = func( + ctx context.Context, namespace, pod, container string, + stdin io.Reader, stdout, stderr io.Writer, command ...string, + ) error { + calls++ + + return errors.New("podexec failure") + } + + assert.NilError(t, reconciler.writePGAdminUsers(ctx, pgadmin, podExecutor)) + assert.Equal(t, calls, 1, "PodExec should be called once") + + // User in users.json should be unchanged and attempt to add user should not + // have succeeded + secret := &corev1.Secret{ObjectMeta: naming.StandalonePGAdmin(pgadmin)} + assert.NilError(t, errors.WithStack( + reconciler.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret))) + if assert.Check(t, secret.Data["users.json"] != nil) { + var usersArr []pgAdminUserForJson + assert.NilError(t, json.Unmarshal(secret.Data["users.json"], &usersArr)) + assert.Equal(t, len(usersArr), 1) + assert.Equal(t, usersArr[0].Username, "testuser1") + assert.Equal(t, usersArr[0].IsAdmin, false) + assert.Equal(t, usersArr[0].Password, "asdf") + } + + // setup.py error in stderr + reconciler.PodExec = func( + ctx context.Context, namespace, pod, container string, + stdin io.Reader, stdout, stderr io.Writer, command ...string, + ) error { + calls++ + + stderr.Write([]byte("issue running setup.py add-user command")) + + return nil + } + + assert.NilError(t, reconciler.writePGAdminUsers(ctx, pgadmin, podExecutor)) + assert.Equal(t, calls, 2, "PodExec should be called once more") + + // User in users.json should be unchanged and attempt to add user should not + // have succeeded + assert.NilError(t, errors.WithStack( + reconciler.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret))) + if assert.Check(t, secret.Data["users.json"] != nil) { + var usersArr []pgAdminUserForJson + assert.NilError(t, json.Unmarshal(secret.Data["users.json"], &usersArr)) + assert.Equal(t, len(usersArr), 1) + assert.Equal(t, usersArr[0].Username, "testuser1") + assert.Equal(t, usersArr[0].IsAdmin, false) + assert.Equal(t, usersArr[0].Password, "asdf") + } + + // setup.py error in stdout regarding email address + reconciler.PodExec = func( + ctx context.Context, namespace, pod, container string, + stdin io.Reader, stdout, stderr io.Writer, command ...string, + ) error { + calls++ + + stdout.Write([]byte("Invalid email address")) + + return nil + } + + assert.NilError(t, reconciler.writePGAdminUsers(ctx, pgadmin, podExecutor)) + assert.Equal(t, calls, 3, "PodExec should be called once more") + + // User in users.json should be unchanged and attempt to add user should not + // have succeeded + assert.NilError(t, errors.WithStack( + reconciler.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret))) + if assert.Check(t, secret.Data["users.json"] != nil) { + var usersArr []pgAdminUserForJson + assert.NilError(t, json.Unmarshal(secret.Data["users.json"], &usersArr)) + assert.Equal(t, len(usersArr), 1) + assert.Equal(t, usersArr[0].Username, "testuser1") + assert.Equal(t, usersArr[0].IsAdmin, false) + assert.Equal(t, usersArr[0].Password, "asdf") + } + assert.Equal(t, len(recorder.Events), 1) + + // setup.py error in stdout regarding password + reconciler.PodExec = func( + ctx context.Context, namespace, pod, container string, + stdin io.Reader, stdout, stderr io.Writer, command ...string, + ) error { + calls++ + + stdout.Write([]byte("Password must be at least 6 characters long")) + + return nil + } + + assert.NilError(t, reconciler.writePGAdminUsers(ctx, pgadmin, podExecutor)) + assert.Equal(t, calls, 4, "PodExec should be called once more") + + // User in users.json should be unchanged and attempt to add user should not + // have succeeded + assert.NilError(t, errors.WithStack( + reconciler.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret))) + if assert.Check(t, secret.Data["users.json"] != nil) { + var usersArr []pgAdminUserForJson + assert.NilError(t, json.Unmarshal(secret.Data["users.json"], &usersArr)) + assert.Equal(t, len(usersArr), 1) + assert.Equal(t, usersArr[0].Username, "testuser1") + assert.Equal(t, usersArr[0].IsAdmin, false) + assert.Equal(t, usersArr[0].Password, "asdf") + } + assert.Equal(t, len(recorder.Events), 2) + }) +} diff --git a/internal/controller/standalone_pgadmin/volume.go b/internal/controller/standalone_pgadmin/volume.go index c12950f471..7615f6142b 100644 --- a/internal/controller/standalone_pgadmin/volume.go +++ b/internal/controller/standalone_pgadmin/volume.go @@ -1,16 +1,6 @@ -// Copyright 2023 Crunchy Data Solutions, Inc. +// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package standalone_pgadmin @@ -51,19 +41,15 @@ func (r *PGAdminReconciler) reconcilePGAdminDataVolume( // pvc defines the data volume for pgAdmin. func pvc(pgadmin *v1beta1.PGAdmin) *corev1.PersistentVolumeClaim { - labelMap := map[string]string{ - naming.LabelStandalonePGAdmin: pgadmin.Name, - naming.LabelRole: naming.RolePGAdmin, - naming.LabelData: naming.DataPGAdmin, + pvc := &corev1.PersistentVolumeClaim{ + ObjectMeta: naming.StandalonePGAdmin(pgadmin), } - - pvc := &corev1.PersistentVolumeClaim{ObjectMeta: naming.StandalonePGAdmin(pgadmin)} pvc.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("PersistentVolumeClaim")) pvc.Annotations = pgadmin.Spec.Metadata.GetAnnotationsOrNil() pvc.Labels = naming.Merge( pgadmin.Spec.Metadata.GetLabelsOrNil(), - labelMap, + naming.StandalonePGAdminDataLabels(pgadmin.Name), ) pvc.Spec = pgadmin.Spec.DataVolumeClaimSpec diff --git a/internal/controller/standalone_pgadmin/volume_test.go b/internal/controller/standalone_pgadmin/volume_test.go index 4306977668..645c228277 100644 --- a/internal/controller/standalone_pgadmin/volume_test.go +++ b/internal/controller/standalone_pgadmin/volume_test.go @@ -1,19 +1,6 @@ -//go:build envtest -// +build envtest - -// Copyright 2023 Crunchy Data Solutions, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at +// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. // -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package standalone_pgadmin @@ -59,7 +46,7 @@ func TestReconcilePGAdminDataVolume(t *testing.T) { Spec: v1beta1.PGAdminSpec{ DataVolumeClaimSpec: corev1.PersistentVolumeClaimSpec{ AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, - Resources: corev1.ResourceRequirements{ + Resources: corev1.VolumeResourceRequirements{ Requests: map[corev1.ResourceName]resource.Quantity{ corev1.ResourceStorage: resource.MustParse("1Gi")}}, StorageClassName: initialize.String("storage-class-for-data"), @@ -91,10 +78,7 @@ volumeMode: Filesystem } func TestHandlePersistentVolumeClaimError(t *testing.T) { - scheme, err := runtime.CreatePostgresOperatorScheme() - assert.NilError(t, err) - - recorder := events.NewRecorder(t, scheme) + recorder := events.NewRecorder(t, runtime.Scheme) reconciler := &PGAdminReconciler{ Recorder: recorder, } diff --git a/internal/controller/standalone_pgadmin/watches.go b/internal/controller/standalone_pgadmin/watches.go new file mode 100644 index 0000000000..49ac1ebd29 --- /dev/null +++ b/internal/controller/standalone_pgadmin/watches.go @@ -0,0 +1,102 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package standalone_pgadmin + +import ( + "context" + + "k8s.io/client-go/util/workqueue" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +// watchPostgresClusters returns a [handler.EventHandler] for PostgresClusters. +func (r *PGAdminReconciler) watchPostgresClusters() handler.Funcs { + handle := func(ctx context.Context, cluster client.Object, q workqueue.RateLimitingInterface) { + for _, pgadmin := range r.findPGAdminsForPostgresCluster(ctx, cluster) { + + q.Add(ctrl.Request{ + NamespacedName: client.ObjectKeyFromObject(pgadmin), + }) + } + } + + return handler.Funcs{ + CreateFunc: func(ctx context.Context, e event.CreateEvent, q workqueue.RateLimitingInterface) { + handle(ctx, e.Object, q) + }, + UpdateFunc: func(ctx context.Context, e event.UpdateEvent, q workqueue.RateLimitingInterface) { + handle(ctx, e.ObjectNew, q) + }, + DeleteFunc: func(ctx context.Context, e event.DeleteEvent, q workqueue.RateLimitingInterface) { + handle(ctx, e.Object, q) + }, + } +} + +// watchForRelatedSecret handles create/update/delete events for secrets, +// passing the Secret ObjectKey to findPGAdminsForSecret +func (r *PGAdminReconciler) watchForRelatedSecret() handler.EventHandler { + handle := func(ctx context.Context, secret client.Object, q workqueue.RateLimitingInterface) { + key := client.ObjectKeyFromObject(secret) + + for _, pgadmin := range r.findPGAdminsForSecret(ctx, key) { + q.Add(ctrl.Request{ + NamespacedName: client.ObjectKeyFromObject(pgadmin), + }) + } + } + + return handler.Funcs{ + CreateFunc: func(ctx context.Context, e event.CreateEvent, q workqueue.RateLimitingInterface) { + handle(ctx, e.Object, q) + }, + UpdateFunc: func(ctx context.Context, e event.UpdateEvent, q workqueue.RateLimitingInterface) { + handle(ctx, e.ObjectNew, q) + }, + // If the secret is deleted, we want to reconcile + // in order to emit an event/status about this problem. + // We will also emit a matching event/status about this problem + // when we reconcile the cluster and can't find the secret. + // That way, users will get two alerts: one when the secret is deleted + // and another when the cluster is being reconciled. + DeleteFunc: func(ctx context.Context, e event.DeleteEvent, q workqueue.RateLimitingInterface) { + handle(ctx, e.Object, q) + }, + } +} + +//+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="pgadmins",verbs={list} + +// findPGAdminsForSecret returns PGAdmins that have a user or users that have their password +// stored in the Secret +func (r *PGAdminReconciler) findPGAdminsForSecret( + ctx context.Context, secret client.ObjectKey, +) []*v1beta1.PGAdmin { + var matching []*v1beta1.PGAdmin + var pgadmins v1beta1.PGAdminList + + // NOTE: If this becomes slow due to a large number of PGAdmins in a single + // namespace, we can configure the [ctrl.Manager] field indexer and pass a + // [fields.Selector] here. + // - https://book.kubebuilder.io/reference/watching-resources/externally-managed.html + if err := r.List(ctx, &pgadmins, &client.ListOptions{ + Namespace: secret.Namespace, + }); err == nil { + for i := range pgadmins.Items { + for j := range pgadmins.Items[i].Spec.Users { + if pgadmins.Items[i].Spec.Users[j].PasswordRef.LocalObjectReference.Name == secret.Name { + matching = append(matching, &pgadmins.Items[i]) + break + } + } + } + } + return matching +} diff --git a/internal/controller/standalone_pgadmin/watches_test.go b/internal/controller/standalone_pgadmin/watches_test.go new file mode 100644 index 0000000000..1419eb9efa --- /dev/null +++ b/internal/controller/standalone_pgadmin/watches_test.go @@ -0,0 +1,122 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package standalone_pgadmin + +import ( + "context" + "testing" + + "gotest.tools/v3/assert" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/crunchydata/postgres-operator/internal/testing/require" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +func TestFindPGAdminsForSecret(t *testing.T) { + ctx := context.Background() + tClient := setupKubernetes(t) + require.ParallelCapacity(t, 0) + + ns := setupNamespace(t, tClient) + reconciler := &PGAdminReconciler{Client: tClient} + + secret1 := &corev1.Secret{} + secret1.Namespace = ns.Name + secret1.Name = "first-password-secret" + + assert.NilError(t, tClient.Create(ctx, secret1)) + secretObjectKey := client.ObjectKeyFromObject(secret1) + + t.Run("NoPGAdmins", func(t *testing.T) { + pgadmins := reconciler.findPGAdminsForSecret(ctx, secretObjectKey) + + assert.Equal(t, len(pgadmins), 0) + }) + + t.Run("OnePGAdmin", func(t *testing.T) { + pgadmin1 := new(v1beta1.PGAdmin) + pgadmin1.Namespace = ns.Name + pgadmin1.Name = "first-pgadmin" + pgadmin1.Spec.Users = []v1beta1.PGAdminUser{ + { + PasswordRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "first-password-secret", + }, + Key: "password", + }, + Username: "testuser", + Role: "Administrator", + }, + } + assert.NilError(t, tClient.Create(ctx, pgadmin1)) + + pgadmins := reconciler.findPGAdminsForSecret(ctx, secretObjectKey) + + assert.Equal(t, len(pgadmins), 1) + assert.Equal(t, pgadmins[0].Name, "first-pgadmin") + }) + + t.Run("TwoPGAdmins", func(t *testing.T) { + pgadmin2 := new(v1beta1.PGAdmin) + pgadmin2.Namespace = ns.Name + pgadmin2.Name = "second-pgadmin" + pgadmin2.Spec.Users = []v1beta1.PGAdminUser{ + { + PasswordRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "first-password-secret", + }, + Key: "password", + }, + Username: "testuser2", + Role: "Administrator", + }, + } + assert.NilError(t, tClient.Create(ctx, pgadmin2)) + + pgadmins := reconciler.findPGAdminsForSecret(ctx, secretObjectKey) + + assert.Equal(t, len(pgadmins), 2) + pgadminCount := map[string]int{} + for _, pgadmin := range pgadmins { + pgadminCount[pgadmin.Name] += 1 + } + assert.Equal(t, pgadminCount["first-pgadmin"], 1) + assert.Equal(t, pgadminCount["second-pgadmin"], 1) + }) + + t.Run("PGAdminWithDifferentSecretNameNotIncluded", func(t *testing.T) { + pgadmin3 := new(v1beta1.PGAdmin) + pgadmin3.Namespace = ns.Name + pgadmin3.Name = "third-pgadmin" + pgadmin3.Spec.Users = []v1beta1.PGAdminUser{ + { + PasswordRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "other-password-secret", + }, + Key: "password", + }, + Username: "testuser2", + Role: "Administrator", + }, + } + assert.NilError(t, tClient.Create(ctx, pgadmin3)) + + pgadmins := reconciler.findPGAdminsForSecret(ctx, secretObjectKey) + + assert.Equal(t, len(pgadmins), 2) + pgadminCount := map[string]int{} + for _, pgadmin := range pgadmins { + pgadminCount[pgadmin.Name] += 1 + } + assert.Equal(t, pgadminCount["first-pgadmin"], 1) + assert.Equal(t, pgadminCount["second-pgadmin"], 1) + assert.Equal(t, pgadminCount["third-pgadmin"], 0) + }) +} diff --git a/internal/feature/features.go b/internal/feature/features.go new file mode 100644 index 0000000000..db424ead42 --- /dev/null +++ b/internal/feature/features.go @@ -0,0 +1,132 @@ +// Copyright 2017 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Package feature provides types and functions to enable and disable features +of the Postgres Operator. + +To add a new feature, export its name as a constant string and configure it +in [NewGate]. Choose a name that is clear to end users, as they will use it +to enable or disable the feature. + +# Stages + +Each feature must be configured with a maturity called a stage. We follow the +Kubernetes convention that features in the "Alpha" stage are disabled by default, +while those in the "Beta" stage are enabled by default. + - https://docs.k8s.io/reference/command-line-tools-reference/feature-gates/#feature-stages + +NOTE: Since Kubernetes 1.24, APIs (not features) in the "Beta" stage are disabled by default: + - https://blog.k8s.io/2022/05/03/kubernetes-1-24-release-announcement/#beta-apis-off-by-default + - https://git.k8s.io/enhancements/keps/sig-architecture/3136-beta-apis-off-by-default#goals + +# Using Features + +We initialize and configure one [MutableGate] in main() and add it to the Context +passed to Reconcilers and other Runnables. Those can then interrogate it using [Enabled]: + + if !feature.Enabled(ctx, feature.Excellent) { return } + +Tests should create and configure their own [MutableGate] and inject it using +[NewContext]. For example, the following enables one feature and disables another: + + gate := feature.NewGate() + assert.NilError(t, gate.SetFromMap(map[string]bool{ + feature.Excellent: true, + feature.Uncommon: false, + })) + ctx := feature.NewContext(context.Background(), gate) +*/ +package feature + +import ( + "context" + + "k8s.io/component-base/featuregate" +) + +type Feature = featuregate.Feature + +// Gate indicates what features exist and which are enabled. +type Gate interface { + Enabled(Feature) bool + String() string +} + +// MutableGate contains features that can be enabled or disabled. +type MutableGate interface { + Gate + // Set enables or disables features by parsing a string like "feature1=true,feature2=false". + Set(string) error + // SetFromMap enables or disables features by boolean values. + SetFromMap(map[string]bool) error +} + +const ( + // Support appending custom queries to default PGMonitor queries + AppendCustomQueries = "AppendCustomQueries" + + // Enables automatic creation of user schema + AutoCreateUserSchema = "AutoCreateUserSchema" + + // Support automatically growing volumes + AutoGrowVolumes = "AutoGrowVolumes" + + BridgeIdentifiers = "BridgeIdentifiers" + + // Support custom sidecars for PostgreSQL instance Pods + InstanceSidecars = "InstanceSidecars" + + // Support custom sidecars for pgBouncer Pods + PGBouncerSidecars = "PGBouncerSidecars" + + // Support tablespace volumes + TablespaceVolumes = "TablespaceVolumes" + + // Support VolumeSnapshots + VolumeSnapshots = "VolumeSnapshots" +) + +// NewGate returns a MutableGate with the Features defined in this package. +func NewGate() MutableGate { + gate := featuregate.NewFeatureGate() + + if err := gate.Add(map[Feature]featuregate.FeatureSpec{ + AppendCustomQueries: {Default: false, PreRelease: featuregate.Alpha}, + AutoCreateUserSchema: {Default: true, PreRelease: featuregate.Beta}, + AutoGrowVolumes: {Default: false, PreRelease: featuregate.Alpha}, + BridgeIdentifiers: {Default: false, PreRelease: featuregate.Alpha}, + InstanceSidecars: {Default: false, PreRelease: featuregate.Alpha}, + PGBouncerSidecars: {Default: false, PreRelease: featuregate.Alpha}, + TablespaceVolumes: {Default: false, PreRelease: featuregate.Alpha}, + VolumeSnapshots: {Default: false, PreRelease: featuregate.Alpha}, + }); err != nil { + panic(err) + } + + return gate +} + +type contextKey struct{} + +// Enabled indicates if a Feature is enabled in the Gate contained in ctx. It +// returns false when there is no Gate. +func Enabled(ctx context.Context, f Feature) bool { + gate, ok := ctx.Value(contextKey{}).(Gate) + return ok && gate.Enabled(f) +} + +// NewContext returns a copy of ctx containing gate. Check it using [Enabled]. +func NewContext(ctx context.Context, gate Gate) context.Context { + return context.WithValue(ctx, contextKey{}, gate) +} + +func ShowGates(ctx context.Context) string { + featuresEnabled := "" + gate, ok := ctx.Value(contextKey{}).(Gate) + if ok { + featuresEnabled = gate.String() + } + return featuresEnabled +} diff --git a/internal/feature/features_test.go b/internal/feature/features_test.go new file mode 100644 index 0000000000..f76dd216e6 --- /dev/null +++ b/internal/feature/features_test.go @@ -0,0 +1,65 @@ +// Copyright 2017 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package feature + +import ( + "context" + "testing" + + "gotest.tools/v3/assert" +) + +func TestDefaults(t *testing.T) { + t.Parallel() + gate := NewGate() + + assert.Assert(t, false == gate.Enabled(AppendCustomQueries)) + assert.Assert(t, true == gate.Enabled(AutoCreateUserSchema)) + assert.Assert(t, false == gate.Enabled(AutoGrowVolumes)) + assert.Assert(t, false == gate.Enabled(BridgeIdentifiers)) + assert.Assert(t, false == gate.Enabled(InstanceSidecars)) + assert.Assert(t, false == gate.Enabled(PGBouncerSidecars)) + assert.Assert(t, false == gate.Enabled(TablespaceVolumes)) + assert.Assert(t, false == gate.Enabled(VolumeSnapshots)) + + assert.Equal(t, gate.String(), "") +} + +func TestStringFormat(t *testing.T) { + t.Parallel() + gate := NewGate() + + assert.NilError(t, gate.Set("")) + assert.NilError(t, gate.Set("TablespaceVolumes=true")) + assert.Equal(t, gate.String(), "TablespaceVolumes=true") + assert.Assert(t, true == gate.Enabled(TablespaceVolumes)) + + err := gate.Set("NotAGate=true") + assert.ErrorContains(t, err, "unrecognized feature gate") + assert.ErrorContains(t, err, "NotAGate") + + err = gate.Set("GateNotSet") + assert.ErrorContains(t, err, "missing bool") + assert.ErrorContains(t, err, "GateNotSet") + + err = gate.Set("GateNotSet=foo") + assert.ErrorContains(t, err, "invalid value") + assert.ErrorContains(t, err, "GateNotSet") +} + +func TestContext(t *testing.T) { + t.Parallel() + gate := NewGate() + ctx := NewContext(context.Background(), gate) + assert.Equal(t, ShowGates(ctx), "") + + assert.NilError(t, gate.Set("TablespaceVolumes=true")) + assert.Assert(t, true == Enabled(ctx, TablespaceVolumes)) + assert.Equal(t, ShowGates(ctx), "TablespaceVolumes=true") + + assert.NilError(t, gate.SetFromMap(map[string]bool{TablespaceVolumes: false})) + assert.Assert(t, false == Enabled(ctx, TablespaceVolumes)) + assert.Equal(t, ShowGates(ctx), "TablespaceVolumes=false") +} diff --git a/internal/initialize/doc.go b/internal/initialize/doc.go index 42d347f617..aedd85846f 100644 --- a/internal/initialize/doc.go +++ b/internal/initialize/doc.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 // Package initialize provides functions to initialize some common fields and types. package initialize diff --git a/internal/initialize/intstr.go b/internal/initialize/intstr.go deleted file mode 100644 index 9cfb1e3cbe..0000000000 --- a/internal/initialize/intstr.go +++ /dev/null @@ -1,35 +0,0 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package initialize - -import ( - "k8s.io/apimachinery/pkg/util/intstr" -) - -// IntOrStringInt32 returns an *intstr.IntOrString containing i. -func IntOrStringInt32(i int32) *intstr.IntOrString { - return IntOrString(intstr.FromInt(int(i))) -} - -// IntOrStringString returns an *intstr.IntOrString containing s. -func IntOrStringString(s string) *intstr.IntOrString { - return IntOrString(intstr.FromString(s)) -} - -// IntOrString returns a pointer to the provided IntOrString -func IntOrString(ios intstr.IntOrString) *intstr.IntOrString { - return &ios -} diff --git a/internal/initialize/intstr_test.go b/internal/initialize/intstr_test.go deleted file mode 100644 index 4038bf8787..0000000000 --- a/internal/initialize/intstr_test.go +++ /dev/null @@ -1,46 +0,0 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package initialize_test - -import ( - "testing" - - "gotest.tools/v3/assert" - "k8s.io/apimachinery/pkg/util/intstr" - - "github.com/crunchydata/postgres-operator/internal/initialize" -) - -func TestIntOrStringInt32(t *testing.T) { - // Same content as the upstream constructor. - upstream := intstr.FromInt(42) - n := initialize.IntOrStringInt32(42) - - assert.DeepEqual(t, &upstream, n) -} - -func TestIntOrStringString(t *testing.T) { - upstream := intstr.FromString("50%") - s := initialize.IntOrStringString("50%") - - assert.DeepEqual(t, &upstream, s) -} -func TestIntOrString(t *testing.T) { - upstream := intstr.FromInt(0) - - ios := initialize.IntOrString(intstr.FromInt(0)) - assert.DeepEqual(t, *ios, upstream) -} diff --git a/internal/initialize/metadata.go b/internal/initialize/metadata.go index e0f0460bf8..d62530736a 100644 --- a/internal/initialize/metadata.go +++ b/internal/initialize/metadata.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package initialize diff --git a/internal/initialize/metadata_test.go b/internal/initialize/metadata_test.go index 4cefc6734e..735e455a2e 100644 --- a/internal/initialize/metadata_test.go +++ b/internal/initialize/metadata_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package initialize_test diff --git a/internal/initialize/primitives.go b/internal/initialize/primitives.go index 63b02daf23..9bc264f88c 100644 --- a/internal/initialize/primitives.go +++ b/internal/initialize/primitives.go @@ -1,30 +1,12 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package initialize // Bool returns a pointer to v. func Bool(v bool) *bool { return &v } -// ByteMap initializes m when it points to nil. -func ByteMap(m *map[string][]byte) { - if m != nil && *m == nil { - *m = make(map[string][]byte) - } -} - // FromPointer returns the value that p points to. // When p is nil, it returns the zero value of T. func FromPointer[T any](p *T) T { @@ -41,15 +23,17 @@ func Int32(v int32) *int32 { return &v } // Int64 returns a pointer to v. func Int64(v int64) *int64 { return &v } +// Map initializes m when it points to nil. +func Map[M ~map[K]V, K comparable, V any](m *M) { + // See https://pkg.go.dev/maps for similar type constraints. + + if m != nil && *m == nil { + *m = make(M) + } +} + // Pointer returns a pointer to v. func Pointer[T any](v T) *T { return &v } // String returns a pointer to v. func String(v string) *string { return &v } - -// StringMap initializes m when it points to nil. -func StringMap(m *map[string]string) { - if m != nil && *m == nil { - *m = make(map[string]string) - } -} diff --git a/internal/initialize/primitives_test.go b/internal/initialize/primitives_test.go index 1aea2512fb..e39898b4fe 100644 --- a/internal/initialize/primitives_test.go +++ b/internal/initialize/primitives_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package initialize_test @@ -35,27 +24,6 @@ func TestBool(t *testing.T) { } } -func TestByteMap(t *testing.T) { - // Ignores nil pointer. - initialize.ByteMap(nil) - - var m map[string][]byte - - // Starts nil. - assert.Assert(t, m == nil) - - // Gets initialized. - initialize.ByteMap(&m) - assert.DeepEqual(t, m, map[string][]byte{}) - - // Now writable. - m["x"] = []byte("y") - - // Doesn't overwrite. - initialize.ByteMap(&m) - assert.DeepEqual(t, m, map[string][]byte{"x": []byte("y")}) -} - func TestFromPointer(t *testing.T) { t.Run("bool", func(t *testing.T) { assert.Equal(t, initialize.FromPointer((*bool)(nil)), false) @@ -118,6 +86,50 @@ func TestInt64(t *testing.T) { } } +func TestMap(t *testing.T) { + t.Run("map[string][]byte", func(t *testing.T) { + // Ignores nil pointer. + initialize.Map((*map[string][]byte)(nil)) + + var m map[string][]byte + + // Starts nil. + assert.Assert(t, m == nil) + + // Gets initialized. + initialize.Map(&m) + assert.DeepEqual(t, m, map[string][]byte{}) + + // Now writable. + m["x"] = []byte("y") + + // Doesn't overwrite. + initialize.Map(&m) + assert.DeepEqual(t, m, map[string][]byte{"x": []byte("y")}) + }) + + t.Run("map[string]string", func(t *testing.T) { + // Ignores nil pointer. + initialize.Map((*map[string]string)(nil)) + + var m map[string]string + + // Starts nil. + assert.Assert(t, m == nil) + + // Gets initialized. + initialize.Map(&m) + assert.DeepEqual(t, m, map[string]string{}) + + // Now writable. + m["x"] = "y" + + // Doesn't overwrite. + initialize.Map(&m) + assert.DeepEqual(t, m, map[string]string{"x": "y"}) + }) +} + func TestPointer(t *testing.T) { t.Run("bool", func(t *testing.T) { n := initialize.Pointer(false) @@ -189,24 +201,3 @@ func TestString(t *testing.T) { assert.Equal(t, *n, "sup") } } - -func TestStringMap(t *testing.T) { - // Ignores nil pointer. - initialize.StringMap(nil) - - var m map[string]string - - // Starts nil. - assert.Assert(t, m == nil) - - // Gets initialized. - initialize.StringMap(&m) - assert.DeepEqual(t, m, map[string]string{}) - - // Now writable. - m["x"] = "y" - - // Doesn't overwrite. - initialize.StringMap(&m) - assert.DeepEqual(t, m, map[string]string{"x": "y"}) -} diff --git a/internal/initialize/security.go b/internal/initialize/security.go index 29dc0d03c1..5dd52d7b1e 100644 --- a/internal/initialize/security.go +++ b/internal/initialize/security.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package initialize @@ -51,5 +40,9 @@ func RestrictedSecurityContext() *corev1.SecurityContext { // Fail to start the container if its image runs as UID 0 (root). RunAsNonRoot: Bool(true), + + SeccompProfile: &corev1.SeccompProfile{ + Type: corev1.SeccompProfileTypeRuntimeDefault, + }, } } diff --git a/internal/initialize/security_test.go b/internal/initialize/security_test.go index 821d55cd40..0a6409cf41 100644 --- a/internal/initialize/security_test.go +++ b/internal/initialize/security_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package initialize_test @@ -20,6 +9,7 @@ import ( "testing" "gotest.tools/v3/assert" + corev1 "k8s.io/api/core/v1" "github.com/crunchydata/postgres-operator/internal/initialize" ) @@ -59,9 +49,10 @@ func TestPodSecurityContext(t *testing.T) { assert.Assert(t, psc.RunAsUser == nil, `Containers must not set runAsUser to 0`) - // TODO(cbandy): delegate to v1.SecurityContext - assert.Assert(t, psc.SeccompProfile == nil, - `Seccomp profile must be explicitly set to one of the allowed values. Both the Unconfined profile and the absence of a profile are prohibited.`) + if assert.Check(t, psc.SeccompProfile == nil) { + assert.Assert(t, initialize.RestrictedSecurityContext().SeccompProfile != nil, + `SeccompProfile should be delegated to the container-level v1.SecurityContext`) + } }) } @@ -121,7 +112,7 @@ func TestRestrictedSecurityContext(t *testing.T) { // of OpenShift 4.11 uses the "runtime/default" profile. // - https://docs.openshift.com/container-platform/4.10/security/seccomp-profiles.html // - https://docs.openshift.com/container-platform/4.11/security/seccomp-profiles.html - assert.Assert(t, sc.SeccompProfile == nil, + assert.Assert(t, sc.SeccompProfile.Type == corev1.SeccompProfileTypeRuntimeDefault, `Seccomp profile must be explicitly set to one of the allowed values. Both the Unconfined profile and the absence of a profile are prohibited.`) }) diff --git a/internal/kubeapi/patch.go b/internal/kubeapi/patch.go index 7f9ca1a216..973852c17a 100644 --- a/internal/kubeapi/patch.go +++ b/internal/kubeapi/patch.go @@ -1,19 +1,8 @@ -package kubeapi - -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +package kubeapi import ( "strings" diff --git a/internal/kubeapi/patch_test.go b/internal/kubeapi/patch_test.go index 3121ff9e6e..52f5787b8f 100644 --- a/internal/kubeapi/patch_test.go +++ b/internal/kubeapi/patch_test.go @@ -1,19 +1,8 @@ -package kubeapi - -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +package kubeapi import ( "encoding/json" diff --git a/internal/logging/logr.go b/internal/logging/logr.go index 467fcdab0a..c907997d40 100644 --- a/internal/logging/logr.go +++ b/internal/logging/logr.go @@ -1,17 +1,6 @@ -/* -Copyright 2021 - 2023 Crunchy Data Solutions, Inc. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package logging @@ -24,21 +13,24 @@ import ( var global = logr.Discard() -// Discard returns a logr.Logger that discards all messages logged to it. -func Discard() logr.Logger { return logr.Discard() } +// Logger is an interface to an abstract logging implementation. +type Logger = logr.Logger + +// Discard returns a Logger that discards all messages logged to it. +func Discard() Logger { return logr.Discard() } -// SetLogSink replaces the global logr.Logger with sink. Before this is called, -// the global logr.Logger is a no-op. +// SetLogSink replaces the global Logger with sink. Before this is called, +// the global Logger is a no-op. func SetLogSink(sink logr.LogSink) { global = logr.New(sink) } // NewContext returns a copy of ctx containing logger. Retrieve it using FromContext. -func NewContext(ctx context.Context, logger logr.Logger) context.Context { +func NewContext(ctx context.Context, logger Logger) context.Context { return logr.NewContext(ctx, logger) } -// FromContext returns the global logr.Logger or the one stored by a prior call +// FromContext returns the global Logger or the one stored by a prior call // to NewContext. -func FromContext(ctx context.Context) logr.Logger { +func FromContext(ctx context.Context) Logger { log, err := logr.FromContext(ctx) if err != nil { log = global diff --git a/internal/logging/logr_test.go b/internal/logging/logr_test.go index b7a72ce9d0..1cbc818ad9 100644 --- a/internal/logging/logr_test.go +++ b/internal/logging/logr_test.go @@ -1,17 +1,6 @@ -/* -Copyright 2021 - 2023 Crunchy Data Solutions, Inc. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package logging diff --git a/internal/logging/logrus.go b/internal/logging/logrus.go index 4a92191869..9683a104d1 100644 --- a/internal/logging/logrus.go +++ b/internal/logging/logrus.go @@ -1,17 +1,6 @@ -/* -Copyright 2021 - 2023 Crunchy Data Solutions, Inc. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package logging diff --git a/internal/logging/logrus_test.go b/internal/logging/logrus_test.go index 2f8db184e1..3e73193d1a 100644 --- a/internal/logging/logrus_test.go +++ b/internal/logging/logrus_test.go @@ -1,17 +1,6 @@ -/* -Copyright 2021 - 2023 Crunchy Data Solutions, Inc. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package logging diff --git a/internal/naming/annotations.go b/internal/naming/annotations.go index fc33236188..2179a5f084 100644 --- a/internal/naming/annotations.go +++ b/internal/naming/annotations.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package naming @@ -32,21 +21,19 @@ const ( // ID associated with a specific manual backup Job. PGBackRestBackup = annotationPrefix + "pgbackrest-backup" + // PGBackRestBackupJobCompletion is the annotation that is added to restore jobs, pvcs, and + // VolumeSnapshots that are involved in the volume snapshot creation process. The annotation + // holds a RFC3339 formatted timestamp that corresponds to the completion time of the associated + // backup job. + PGBackRestBackupJobCompletion = annotationPrefix + "pgbackrest-backup-job-completion" + // PGBackRestConfigHash is an annotation used to specify the hash value associated with a // repo configuration as needed to detect configuration changes that invalidate running Jobs // (and therefore must be recreated) PGBackRestConfigHash = annotationPrefix + "pgbackrest-hash" - // PGBackRestCurrentConfig is an annotation used to indicate the name of the pgBackRest - // configuration associated with a specific Job as determined by either the current primary - // (if no dedicated repository host is enabled), or the dedicated repository host. This helps - // in detecting pgBackRest backup Jobs that no longer mount the proper pgBackRest - // configuration, e.g. because a failover has occurred, or because dedicated repo host has been - // enabled or disabled. - PGBackRestCurrentConfig = annotationPrefix + "pgbackrest-config" - // PGBackRestRestore is the annotation that is added to a PostgresCluster to initiate an in-place - // restore. The value of the annotation will be a unique identfier for a restore Job (e.g. a + // restore. The value of the annotation will be a unique identifier for a restore Job (e.g. a // timestamp), which will be stored in the PostgresCluster status to properly track completion // of the Job. PGBackRestRestore = annotationPrefix + "pgbackrest-restore" @@ -58,4 +45,27 @@ const ( // for this annotation is due to an issue in pgBackRest (#1841) where using a wildcard address to // bind all addresses does not work in certain IPv6 environments. PGBackRestIPVersion = annotationPrefix + "pgbackrest-ip-version" + + // PostgresExporterCollectorsAnnotation is an annotation used to allow users to control whether or + // not postgres_exporter default metrics, settings, and collectors are enabled. The value "None" + // disables all postgres_exporter defaults. Disabling the defaults may cause errors in dashboards. + PostgresExporterCollectorsAnnotation = annotationPrefix + "postgres-exporter-collectors" + + // CrunchyBridgeClusterAdoptionAnnotation is an annotation used to allow users to "adopt" or take + // control over an existing Bridge Cluster with a CrunchyBridgeCluster CR. Essentially, if a + // CrunchyBridgeCluster CR does not have a status.ID, but the name matches the name of an existing + // bridge cluster, the user must add this annotation to the CR to allow the CR to take control of + // the Bridge Cluster. The Value assigned to the annotation must be the ID of existing cluster. + CrunchyBridgeClusterAdoptionAnnotation = annotationPrefix + "adopt-bridge-cluster" + + // AutoCreateUserSchemaAnnotation is an annotation used to allow users to control whether the cluster + // has schemas automatically created for the users defined in `spec.users` for all of the databases + // listed for that user. + AutoCreateUserSchemaAnnotation = annotationPrefix + "autoCreateUserSchema" + + // AuthorizeBackupRemovalAnnotation is an annotation used to allow users + // to delete PVC-based backups when changing from a cluster with backups + // to a cluster without backups. As usual with the operator, we do not + // touch cloud-based backups. + AuthorizeBackupRemovalAnnotation = annotationPrefix + "authorizeBackupRemoval" ) diff --git a/internal/naming/annotations_test.go b/internal/naming/annotations_test.go index 7ece23841f..318dd5ab5c 100644 --- a/internal/naming/annotations_test.go +++ b/internal/naming/annotations_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package naming @@ -23,11 +12,15 @@ import ( ) func TestAnnotationsValid(t *testing.T) { + assert.Assert(t, nil == validation.IsQualifiedName(AuthorizeBackupRemovalAnnotation)) + assert.Assert(t, nil == validation.IsQualifiedName(AutoCreateUserSchemaAnnotation)) + assert.Assert(t, nil == validation.IsQualifiedName(CrunchyBridgeClusterAdoptionAnnotation)) assert.Assert(t, nil == validation.IsQualifiedName(Finalizer)) assert.Assert(t, nil == validation.IsQualifiedName(PatroniSwitchover)) assert.Assert(t, nil == validation.IsQualifiedName(PGBackRestBackup)) + assert.Assert(t, nil == validation.IsQualifiedName(PGBackRestBackupJobCompletion)) assert.Assert(t, nil == validation.IsQualifiedName(PGBackRestConfigHash)) - assert.Assert(t, nil == validation.IsQualifiedName(PGBackRestCurrentConfig)) - assert.Assert(t, nil == validation.IsQualifiedName(PGBackRestRestore)) assert.Assert(t, nil == validation.IsQualifiedName(PGBackRestIPVersion)) + assert.Assert(t, nil == validation.IsQualifiedName(PGBackRestRestore)) + assert.Assert(t, nil == validation.IsQualifiedName(PostgresExporterCollectorsAnnotation)) } diff --git a/internal/naming/controllers.go b/internal/naming/controllers.go index 5495f7f77b..3d492e8a3a 100644 --- a/internal/naming/controllers.go +++ b/internal/naming/controllers.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package naming diff --git a/internal/naming/dns.go b/internal/naming/dns.go index c41c8fa381..d3351a5d70 100644 --- a/internal/naming/dns.go +++ b/internal/naming/dns.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package naming diff --git a/internal/naming/dns_test.go b/internal/naming/dns_test.go index 505264a057..e7e2ea9dc6 100644 --- a/internal/naming/dns_test.go +++ b/internal/naming/dns_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package naming diff --git a/internal/naming/doc.go b/internal/naming/doc.go index 625d09bcda..72cab8b0b0 100644 --- a/internal/naming/doc.go +++ b/internal/naming/doc.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 // Package naming provides functions and constants for the postgres-operator // naming and labeling scheme. diff --git a/internal/naming/labels.go b/internal/naming/labels.go index b8596c5d35..f25993122b 100644 --- a/internal/naming/labels.go +++ b/internal/naming/labels.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package naming @@ -119,6 +108,17 @@ const ( // RoleMonitoring is the LabelRole applied to Monitoring resources RoleMonitoring = "monitoring" + + // RoleSnapshot is the LabelRole applied to Snapshot resources. + RoleSnapshot = "snapshot" +) + +const ( + // LabelCrunchyBridgeClusterPostgresRole identifies the PostgreSQL user an object is for or about. + LabelCrunchyBridgeClusterPostgresRole = labelPrefix + "cbc-pgrole" + + // RoleCrunchyBridgeClusterPostgresRole is the LabelRole applied to CBC PostgreSQL role secrets. + RoleCrunchyBridgeClusterPostgresRole = "cbc-pgrole" ) const ( @@ -143,6 +143,9 @@ const ( // BackupReplicaCreate is the backup type for the backup taken to enable pgBackRest replica // creation BackupReplicaCreate BackupJobType = "replica-create" + + // BackupScheduled is the backup type utilized for scheduled backups + BackupScheduled BackupJobType = "scheduled" ) const ( @@ -262,6 +265,7 @@ func PGBackRestCronJobLabels(clusterName, repoName, backupType string) labels.Se cronJobLabels := map[string]string{ LabelPGBackRestRepo: repoName, LabelPGBackRestCronJob: backupType, + LabelPGBackRestBackup: string(BackupScheduled), } return labels.Merge(commonLabels, cronJobLabels) } @@ -290,3 +294,33 @@ func PGBackRestRepoVolumeLabels(clusterName, repoName string) labels.Set { } return labels.Merge(repoLabels, repoVolLabels) } + +// StandalonePGAdminLabels return labels for standalone pgAdmin resources +func StandalonePGAdminLabels(pgAdminName string) labels.Set { + return map[string]string{ + LabelStandalonePGAdmin: pgAdminName, + LabelRole: RolePGAdmin, + } +} + +// StandalonePGAdminSelector provides a selector for standalone pgAdmin resources +func StandalonePGAdminSelector(pgAdminName string) labels.Selector { + return StandalonePGAdminLabels(pgAdminName).AsSelector() +} + +// StandalonePGAdminDataLabels returns the labels for standalone pgAdmin resources +// that contain or mount data +func StandalonePGAdminDataLabels(pgAdminName string) labels.Set { + return labels.Merge( + StandalonePGAdminLabels(pgAdminName), + map[string]string{ + LabelData: DataPGAdmin, + }, + ) +} + +// StandalonePGAdminDataSelector returns a selector for standalone pgAdmin resources +// that contain or mount data +func StandalonePGAdminDataSelector(pgAdmiName string) labels.Selector { + return StandalonePGAdminDataLabels(pgAdmiName).AsSelector() +} diff --git a/internal/naming/labels_test.go b/internal/naming/labels_test.go index 270a5bea61..b8a7779858 100644 --- a/internal/naming/labels_test.go +++ b/internal/naming/labels_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package naming @@ -46,6 +35,7 @@ func TestLabelsValid(t *testing.T) { assert.Assert(t, nil == validation.IsQualifiedName(LabelPostgresUser)) assert.Assert(t, nil == validation.IsQualifiedName(LabelStandalonePGAdmin)) assert.Assert(t, nil == validation.IsQualifiedName(LabelStartupInstance)) + assert.Assert(t, nil == validation.IsQualifiedName(LabelCrunchyBridgeClusterPostgresRole)) } func TestLabelValuesValid(t *testing.T) { @@ -61,8 +51,11 @@ func TestLabelValuesValid(t *testing.T) { assert.Assert(t, nil == validation.IsValidLabelValue(RolePostgresWAL)) assert.Assert(t, nil == validation.IsValidLabelValue(RolePrimary)) assert.Assert(t, nil == validation.IsValidLabelValue(RoleReplica)) + assert.Assert(t, nil == validation.IsValidLabelValue(string(BackupManual))) assert.Assert(t, nil == validation.IsValidLabelValue(string(BackupReplicaCreate))) + assert.Assert(t, nil == validation.IsValidLabelValue(string(BackupScheduled))) assert.Assert(t, nil == validation.IsValidLabelValue(RoleMonitoring)) + assert.Assert(t, nil == validation.IsValidLabelValue(RoleCrunchyBridgeClusterPostgresRole)) } func TestMerge(t *testing.T) { @@ -191,6 +184,7 @@ func TestPGBackRestLabelFuncs(t *testing.T) { assert.Equal(t, pgBackRestCronJobLabels.Get(LabelCluster), clusterName) assert.Check(t, pgBackRestCronJobLabels.Has(LabelPGBackRest)) assert.Equal(t, pgBackRestCronJobLabels.Get(LabelPGBackRestRepo), repoName) + assert.Equal(t, pgBackRestCronJobLabels.Get(LabelPGBackRestBackup), string(BackupScheduled)) // verify the labels that identify pgBackRest dedicated repository host resources pgBackRestDedicatedLabels := PGBackRestDedicatedLabels(clusterName) diff --git a/internal/naming/limitations.md b/internal/naming/limitations.md index 2b43865b62..ba607215f7 100644 --- a/internal/naming/limitations.md +++ b/internal/naming/limitations.md @@ -1,16 +1,7 @@ # Definitions diff --git a/internal/naming/names.go b/internal/naming/names.go index c39d91be68..369591de91 100644 --- a/internal/naming/names.go +++ b/internal/naming/names.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package naming @@ -260,6 +249,24 @@ func ClusterReplicaService(cluster *v1beta1.PostgresCluster) metav1.ObjectMeta { } } +// ClusterDedicatedSnapshotVolume returns the ObjectMeta for the dedicated Snapshot +// volume for a cluster. +func ClusterDedicatedSnapshotVolume(cluster *v1beta1.PostgresCluster) metav1.ObjectMeta { + return metav1.ObjectMeta{ + Namespace: cluster.GetNamespace(), + Name: cluster.GetName() + "-snapshot", + } +} + +// ClusterVolumeSnapshot returns the ObjectMeta, including a random name, for a +// new pgdata VolumeSnapshot. +func ClusterVolumeSnapshot(cluster *v1beta1.PostgresCluster) metav1.ObjectMeta { + return metav1.ObjectMeta{ + Namespace: cluster.Namespace, + Name: cluster.Name + "-pgdata-snapshot-" + rand.String(4), + } +} + // GenerateInstance returns a random name for a member of cluster and set. func GenerateInstance( cluster *v1beta1.PostgresCluster, set *v1beta1.PostgresInstanceSetSpec, @@ -568,16 +575,6 @@ func MovePGBackRestRepoDirJob(cluster *v1beta1.PostgresCluster) metav1.ObjectMet } } -// StandalonePGAdminCommonLabels returns the ObjectMeta used for the standalone -// pgAdmin StatefulSet and Pod. -func StandalonePGAdminCommonLabels(pgadmin *v1beta1.PGAdmin) map[string]string { - return map[string]string{ - LabelStandalonePGAdmin: pgadmin.Name, - LabelData: DataPGAdmin, - LabelRole: RolePGAdmin, - } -} - // StandalonePGAdmin returns the ObjectMeta necessary to lookup the ConfigMap, // Service, StatefulSet, or Volume for the cluster's pgAdmin user interface. func StandalonePGAdmin(pgadmin *v1beta1.PGAdmin) metav1.ObjectMeta { diff --git a/internal/naming/names_test.go b/internal/naming/names_test.go index d9d5f9299e..27835c3e5d 100644 --- a/internal/naming/names_test.go +++ b/internal/naming/names_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package naming @@ -76,8 +65,8 @@ func TestClusterNamesUniqueAndValid(t *testing.T) { value metav1.ObjectMeta } - testUniqueAndValid := func(t *testing.T, tests []test) sets.String { - names := sets.NewString() + testUniqueAndValid := func(t *testing.T, tests []test) sets.Set[string] { + names := sets.Set[string]{} for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { assert.Equal(t, tt.value.Namespace, cluster.Namespace) @@ -170,7 +159,7 @@ func TestClusterNamesUniqueAndValid(t *testing.T) { assert.Assert(t, nil == validation.IsDNS1123Label(value.Name)) prefix := PostgresUserSecret(cluster, "").Name - for _, name := range names.List() { + for _, name := range sets.List(names) { assert.Assert(t, !strings.HasPrefix(name, prefix), "%q may collide", name) } }) @@ -209,6 +198,12 @@ func TestClusterNamesUniqueAndValid(t *testing.T) { {"PGBackRestRepoVolume", PGBackRestRepoVolume(cluster, repoName)}, }) }) + + t.Run("VolumeSnapshots", func(t *testing.T) { + testUniqueAndValid(t, []test{ + {"ClusterVolumeSnapshot", ClusterVolumeSnapshot(cluster)}, + }) + }) } func TestInstanceNamesUniqueAndValid(t *testing.T) { diff --git a/internal/naming/selectors.go b/internal/naming/selectors.go index 41ad021c90..94dbc3a9fa 100644 --- a/internal/naming/selectors.go +++ b/internal/naming/selectors.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package naming @@ -46,6 +35,30 @@ func Cluster(cluster string) metav1.LabelSelector { } } +// ClusterRestoreJobs selects all existing restore jobs in a cluster. +func ClusterRestoreJobs(cluster string) metav1.LabelSelector { + return metav1.LabelSelector{ + MatchLabels: map[string]string{ + LabelCluster: cluster, + }, + MatchExpressions: []metav1.LabelSelectorRequirement{ + {Key: LabelPGBackRestRestore, Operator: metav1.LabelSelectorOpExists}, + }, + } +} + +// ClusterBackupJobs selects things for all existing backup jobs in cluster. +func ClusterBackupJobs(cluster string) metav1.LabelSelector { + return metav1.LabelSelector{ + MatchLabels: map[string]string{ + LabelCluster: cluster, + }, + MatchExpressions: []metav1.LabelSelectorRequirement{ + {Key: LabelPGBackRestBackup, Operator: metav1.LabelSelectorOpExists}, + }, + } +} + // ClusterDataForPostgresAndPGBackRest selects things for PostgreSQL data and // things for pgBackRest data. func ClusterDataForPostgresAndPGBackRest(cluster string) metav1.LabelSelector { @@ -139,9 +152,13 @@ func ClusterPostgresUsers(cluster string) metav1.LabelSelector { } } -// ClusterPrimary selects things for the Primary PostgreSQL instance. -func ClusterPrimary(cluster string) metav1.LabelSelector { - s := ClusterInstances(cluster) - s.MatchLabels[LabelRole] = RolePatroniLeader - return s +// CrunchyBridgeClusterPostgresRoles selects things labeled for CrunchyBridgeCluster +// PostgreSQL roles in cluster. +func CrunchyBridgeClusterPostgresRoles(clusterName string) metav1.LabelSelector { + return metav1.LabelSelector{ + MatchLabels: map[string]string{ + LabelCluster: clusterName, + LabelRole: RoleCrunchyBridgeClusterPostgresRole, + }, + } } diff --git a/internal/naming/selectors_test.go b/internal/naming/selectors_test.go index d60386b7cd..1f5f42ad96 100644 --- a/internal/naming/selectors_test.go +++ b/internal/naming/selectors_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package naming @@ -43,6 +32,18 @@ func TestCluster(t *testing.T) { assert.ErrorContains(t, err, "Invalid") } +func TestClusterBackupJobs(t *testing.T) { + s, err := AsSelector(ClusterBackupJobs("something")) + assert.NilError(t, err) + assert.DeepEqual(t, s.String(), strings.Join([]string{ + "postgres-operator.crunchydata.com/cluster=something", + "postgres-operator.crunchydata.com/pgbackrest-backup", + }, ",")) + + _, err = AsSelector(Cluster("--whoa/yikes")) + assert.ErrorContains(t, err, "Invalid") +} + func TestClusterDataForPostgresAndPGBackRest(t *testing.T) { s, err := AsSelector(ClusterDataForPostgresAndPGBackRest("something")) assert.NilError(t, err) @@ -147,12 +148,14 @@ func TestClusterPostgresUsers(t *testing.T) { assert.ErrorContains(t, err, "Invalid") } -func TestClusterPrimary(t *testing.T) { - s, err := AsSelector(ClusterPrimary("something")) +func TestCrunchyBridgeClusterPostgresRoles(t *testing.T) { + s, err := AsSelector(CrunchyBridgeClusterPostgresRoles("something")) assert.NilError(t, err) assert.DeepEqual(t, s.String(), strings.Join([]string{ "postgres-operator.crunchydata.com/cluster=something", - "postgres-operator.crunchydata.com/instance", - "postgres-operator.crunchydata.com/role=master", + "postgres-operator.crunchydata.com/role=cbc-pgrole", }, ",")) + + _, err = AsSelector(CrunchyBridgeClusterPostgresRoles("--nope--")) + assert.ErrorContains(t, err, "Invalid") } diff --git a/internal/naming/telemetry.go b/internal/naming/telemetry.go index d20c96eb49..5825d6299f 100644 --- a/internal/naming/telemetry.go +++ b/internal/naming/telemetry.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package naming diff --git a/internal/patroni/api.go b/internal/patroni/api.go index 184aadedef..679da5f4af 100644 --- a/internal/patroni/api.go +++ b/internal/patroni/api.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package patroni @@ -200,9 +189,9 @@ func (exec Executor) GetTimeline(ctx context.Context) (int64, error) { } var members []struct { - Role string - State string - Timeline int64 `json:"TL"` + Role string `json:"Role"` + State string `json:"State"` + Timeline int64 `json:"TL"` } err = json.Unmarshal(stdout.Bytes(), &members) if err != nil { diff --git a/internal/patroni/api_test.go b/internal/patroni/api_test.go index 8f3c01fc13..1603d2fc75 100644 --- a/internal/patroni/api_test.go +++ b/internal/patroni/api_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package patroni diff --git a/internal/patroni/certificates.go b/internal/patroni/certificates.go index 67041858db..9aa1525769 100644 --- a/internal/patroni/certificates.go +++ b/internal/patroni/certificates.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package patroni diff --git a/internal/patroni/certificates.md b/internal/patroni/certificates.md index a198a72c47..f58786ce20 100644 --- a/internal/patroni/certificates.md +++ b/internal/patroni/certificates.md @@ -1,16 +1,7 @@ Server diff --git a/internal/patroni/certificates_test.go b/internal/patroni/certificates_test.go index 11ae0ceb7d..3073f2247f 100644 --- a/internal/patroni/certificates_test.go +++ b/internal/patroni/certificates_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package patroni diff --git a/internal/patroni/config.go b/internal/patroni/config.go index a0ff076fe3..b4d7e54f68 100644 --- a/internal/patroni/config.go +++ b/internal/patroni/config.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package patroni @@ -23,6 +12,7 @@ import ( corev1 "k8s.io/api/core/v1" "sigs.k8s.io/yaml" + "github.com/crunchydata/postgres-operator/internal/config" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/internal/postgres" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" @@ -205,6 +195,15 @@ func DynamicConfiguration( // TODO(cbandy): explain this. requires an archive, perhaps. "use_slots": false, } + + // When TDE is configured, override the pg_rewind binary name to point + // to the wrapper script. + if config.FetchKeyCommand(&cluster.Spec) != "" { + postgresql["bin_name"] = map[string]any{ + "pg_rewind": "/tmp/pg_rewind_tde.sh", + } + } + if section, ok := root["postgresql"].(map[string]any); ok { for k, v := range section { postgresql[k] = v @@ -227,17 +226,24 @@ func DynamicConfiguration( // Override the above with mandatory parameters. if pgParameters.Mandatory != nil { for k, v := range pgParameters.Mandatory.AsMap() { - // Unlike other PostgreSQL parameters that have mandatory values, - // shared_preload_libraries is a comma separated list that can have - // other values appended in addition to the mandatory values. Below, - // any values provided in the CRD are appended after the mandatory - // values. - s, ok := parameters[k].(string) - if k == "shared_preload_libraries" && ok { - parameters[k] = v + "," + s - } else { - parameters[k] = v + + // This parameter is a comma-separated list. Rather than overwrite the + // user-defined value, we want to combine it with the mandatory one. + // Some libraries belong at specific positions in the list, so figure + // that out as well. + if k == "shared_preload_libraries" { + // Load mandatory libraries ahead of user-defined libraries. + if s, ok := parameters[k].(string); ok && len(s) > 0 { + v = v + "," + s + } + // Load "citus" ahead of any other libraries. + // - https://github.com/citusdata/citus/blob/v12.0.0/src/backend/distributed/shared_library_init.c#L417-L419 + if strings.Contains(v, "citus") { + v = "citus," + v + } } + + parameters[k] = v } } postgresql["parameters"] = parameters @@ -265,7 +271,8 @@ func DynamicConfiguration( // Enabling `pg_rewind` allows a former primary to automatically rejoin the // cluster even if it has commits that were not sent to a replica. In other - // words, this favors availability over consistency. + // words, this favors availability over consistency. Without it, the former + // primary needs patronictl reinit to rejoin. // // Recent versions of `pg_rewind` can run with limited permissions granted // by Patroni to the user defined in "postgresql.authentication.rewind". @@ -586,6 +593,33 @@ func instanceYAML( }, } } else { + + initdb := []string{ + // Enable checksums on data pages to help detect corruption of + // storage that would otherwise be silent. This also enables + // "wal_log_hints" which is a prerequisite for using `pg_rewind`. + // - https://www.postgresql.org/docs/current/app-initdb.html + // - https://www.postgresql.org/docs/current/app-pgrewind.html + // - https://www.postgresql.org/docs/current/runtime-config-wal.html + // + // The benefits of checksums in the Kubernetes storage landscape + // outweigh their negligible overhead, and enabling them later + // is costly. (Every file of the cluster must be rewritten.) + // PostgreSQL v12 introduced the `pg_checksums` utility which + // can cheaply disable them while PostgreSQL is stopped. + // - https://www.postgresql.org/docs/current/app-pgchecksums.html + "data-checksums", + "encoding=UTF8", + + // NOTE(cbandy): The "--waldir" option was introduced in PostgreSQL v10. + "waldir=" + postgres.WALDirectory(cluster, instance), + } + + // Append the encryption key command, if provided. + if ekc := config.FetchKeyCommand(&cluster.Spec); ekc != "" { + initdb = append(initdb, fmt.Sprintf("encryption-key-command=%s", ekc)) + } + // Populate some "bootstrap" fields to initialize the cluster. // When Patroni is already bootstrapped, this section is ignored. // - https://github.com/zalando/patroni/blob/v2.0.2/docs/SETTINGS.rst#bootstrap-configuration @@ -596,26 +630,7 @@ func instanceYAML( // The "initdb" bootstrap method is configured differently from others. // Patroni prepends "--" before it calls `initdb`. // - https://github.com/zalando/patroni/blob/v2.0.2/patroni/postgresql/bootstrap.py#L45 - "initdb": []string{ - // Enable checksums on data pages to help detect corruption of - // storage that would otherwise be silent. This also enables - // "wal_log_hints" which is a prerequisite for using `pg_rewind`. - // - https://www.postgresql.org/docs/current/app-initdb.html - // - https://www.postgresql.org/docs/current/app-pgrewind.html - // - https://www.postgresql.org/docs/current/runtime-config-wal.html - // - // The benefits of checksums in the Kubernetes storage landscape - // outweigh their negligible overhead, and enabling them later - // is costly. (Every file of the cluster must be rewritten.) - // PostgreSQL v12 introduced the `pg_checksums` utility which - // can cheaply disable them while PostgreSQL is stopped. - // - https://www.postgresql.org/docs/current/app-pgchecksums.html - "data-checksums", - "encoding=UTF8", - - // NOTE(cbandy): The "--waldir" option was introduced in PostgreSQL v10. - "waldir=" + postgres.WALDirectory(cluster, instance), - }, + "initdb": initdb, } } } diff --git a/internal/patroni/config.md b/internal/patroni/config.md index 1c7dbd4874..18d28d8a4e 100644 --- a/internal/patroni/config.md +++ b/internal/patroni/config.md @@ -1,16 +1,7 @@ Patroni configuration is complicated. The daemon `patroni` and the client diff --git a/internal/patroni/config_test.go b/internal/patroni/config_test.go index 3da020feed..a45568df8b 100644 --- a/internal/patroni/config_test.go +++ b/internal/patroni/config_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package patroni @@ -394,7 +383,7 @@ func TestDynamicConfiguration(t *testing.T) { }, }, { - name: "postgresql.parameters: mandatory shared_preload_libraries bad type", + name: "postgresql.parameters: mandatory shared_preload_libraries wrong-type is ignored", input: map[string]any{ "postgresql": map[string]any{ "parameters": map[string]any{ @@ -420,6 +409,33 @@ func TestDynamicConfiguration(t *testing.T) { }, }, }, + { + name: "postgresql.parameters: shared_preload_libraries order", + input: map[string]any{ + "postgresql": map[string]any{ + "parameters": map[string]any{ + "shared_preload_libraries": "given, citus, more", + }, + }, + }, + params: postgres.Parameters{ + Mandatory: parameters(map[string]string{ + "shared_preload_libraries": "mandatory", + }), + }, + expected: map[string]any{ + "loop_wait": int32(10), + "ttl": int32(30), + "postgresql": map[string]any{ + "parameters": map[string]any{ + "shared_preload_libraries": "citus,mandatory,given, citus, more", + }, + "pg_hba": []string{}, + "use_pg_rewind": true, + "use_slots": false, + }, + }, + }, { name: "postgresql.pg_hba: wrong-type is ignored", input: map[string]any{ @@ -689,20 +705,29 @@ func TestDynamicConfiguration(t *testing.T) { }, }, { - name: "pg version 10", + name: "tde enabled", cluster: &v1beta1.PostgresCluster{ Spec: v1beta1.PostgresClusterSpec{ - PostgresVersion: 10, + Patroni: &v1beta1.PatroniSpec{ + DynamicConfiguration: map[string]any{ + "postgresql": map[string]any{ + "parameters": map[string]any{ + "encryption_key_command": "echo test", + }, + }, + }, + }, }, }, expected: map[string]any{ "loop_wait": int32(10), "ttl": int32(30), "postgresql": map[string]any{ + "bin_name": map[string]any{"pg_rewind": string("/tmp/pg_rewind_tde.sh")}, "parameters": map[string]any{}, "pg_hba": []string{}, - "use_pg_rewind": false, - "use_slots": false, + "use_pg_rewind": bool(true), + "use_slots": bool(false), }, }, }, @@ -888,6 +913,41 @@ postgresql: restapi: {} tags: {} `, "\t\n")+"\n") + + cluster.Spec.Patroni = &v1beta1.PatroniSpec{ + DynamicConfiguration: map[string]any{ + "postgresql": map[string]any{ + "parameters": map[string]any{ + "encryption_key_command": "echo test", + }, + }, + }, + } + + datawithTDE, err := instanceYAML(cluster, instance, nil) + assert.NilError(t, err) + assert.Equal(t, datawithTDE, strings.Trim(` +# Generated by postgres-operator. DO NOT EDIT. +# Your changes will not be saved. +bootstrap: + initdb: + - data-checksums + - encoding=UTF8 + - waldir=/pgdata/pg12_wal + - encryption-key-command=echo test + method: initdb +kubernetes: {} +postgresql: + basebackup: + - waldir=/pgdata/pg12_wal + create_replica_methods: + - basebackup + pgpass: /tmp/.pgpass + use_unix_socket: true +restapi: {} +tags: {} + `, "\t\n")+"\n") + } func TestPGBackRestCreateReplicaCommand(t *testing.T) { diff --git a/internal/patroni/doc.go b/internal/patroni/doc.go index b181eab9e1..500305406d 100644 --- a/internal/patroni/doc.go +++ b/internal/patroni/doc.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 // Package patroni provides clients, utilities and resources for configuring and // interacting with Patroni inside of a PostgreSQL cluster diff --git a/internal/patroni/rbac.go b/internal/patroni/rbac.go index d539f60590..dcf3f18cea 100644 --- a/internal/patroni/rbac.go +++ b/internal/patroni/rbac.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package patroni @@ -23,25 +12,25 @@ import ( ) // "list", "patch", and "watch" are required. Include "get" for good measure. -// +kubebuilder:rbac:namespace=patroni,groups="",resources="pods",verbs={get} -// +kubebuilder:rbac:namespace=patroni,groups="",resources="pods",verbs={list,watch} -// +kubebuilder:rbac:namespace=patroni,groups="",resources="pods",verbs={patch} +// +kubebuilder:rbac:groups="",resources="pods",verbs={get} +// +kubebuilder:rbac:groups="",resources="pods",verbs={list,watch} +// +kubebuilder:rbac:groups="",resources="pods",verbs={patch} // TODO(cbandy): Separate these so that one can choose ConfigMap over Endpoints. // When using Endpoints for DCS, "create", "list", "patch", and "watch" are // required. Include "get" for good measure. The `patronictl scaffold` and // `patronictl remove` commands require "deletecollection". -// +kubebuilder:rbac:namespace=patroni,groups="",resources="endpoints",verbs={get} -// +kubebuilder:rbac:namespace=patroni,groups="",resources="endpoints",verbs={create,deletecollection} -// +kubebuilder:rbac:namespace=patroni,groups="",resources="endpoints",verbs={list,watch} -// +kubebuilder:rbac:namespace=patroni,groups="",resources="endpoints",verbs={patch} -// +kubebuilder:rbac:namespace=patroni,groups="",resources="services",verbs={create} +// +kubebuilder:rbac:groups="",resources="endpoints",verbs={get} +// +kubebuilder:rbac:groups="",resources="endpoints",verbs={create,deletecollection} +// +kubebuilder:rbac:groups="",resources="endpoints",verbs={list,watch} +// +kubebuilder:rbac:groups="",resources="endpoints",verbs={patch} +// +kubebuilder:rbac:groups="",resources="services",verbs={create} // The OpenShift RestrictedEndpointsAdmission plugin requires special // authorization to create Endpoints that contain Pod IPs. // - https://github.com/openshift/origin/pull/9383 -// +kubebuilder:rbac:namespace=patroni,groups="",resources="endpoints/restricted",verbs={create} +// +kubebuilder:rbac:groups="",resources="endpoints/restricted",verbs={create} // Permissions returns the RBAC rules Patroni needs for cluster. func Permissions(cluster *v1beta1.PostgresCluster) []rbacv1.PolicyRule { diff --git a/internal/patroni/rbac_test.go b/internal/patroni/rbac_test.go index 6d4f684ef4..39a8dff245 100644 --- a/internal/patroni/rbac_test.go +++ b/internal/patroni/rbac_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package patroni diff --git a/internal/patroni/reconcile.go b/internal/patroni/reconcile.go index 65cba23387..4fbb08b67d 100644 --- a/internal/patroni/reconcile.go +++ b/internal/patroni/reconcile.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package patroni @@ -46,7 +35,7 @@ func ClusterConfigMap(ctx context.Context, ) error { var err error - initialize.StringMap(&outClusterConfigMap.Data) + initialize.Map(&outClusterConfigMap.Data) outClusterConfigMap.Data[configMapFileKey], err = clusterYAML(inCluster, inHBAs, inParameters) @@ -62,7 +51,7 @@ func InstanceConfigMap(ctx context.Context, ) error { var err error - initialize.StringMap(&outInstanceConfigMap.Data) + initialize.Map(&outInstanceConfigMap.Data) command := pgbackrest.ReplicaCreateCommand(inCluster, inInstanceSpec) @@ -77,7 +66,7 @@ func InstanceCertificates(ctx context.Context, inRoot pki.Certificate, inDNS pki.Certificate, inDNSKey pki.PrivateKey, outInstanceCertificates *corev1.Secret, ) error { - initialize.ByteMap(&outInstanceCertificates.Data) + initialize.Map(&outInstanceCertificates.Data) var err error outInstanceCertificates.Data[certAuthorityFileKey], err = certFile(inRoot) @@ -181,6 +170,24 @@ func instanceProbes(cluster *v1beta1.PostgresCluster, container *corev1.Containe } } +// PodIsPrimary returns whether or not pod is currently acting as the leader with +// the "master" role. This role will be called "primary" in the future, see: +// - https://github.com/zalando/patroni/blob/master/docs/releases.rst?plain=1#L213 +func PodIsPrimary(pod metav1.Object) bool { + if pod == nil { + return false + } + + // TODO(cbandy): This works only when using Kubernetes for DCS. + + // - https://github.com/zalando/patroni/blob/v3.1.1/patroni/ha.py#L296 + // - https://github.com/zalando/patroni/blob/v3.1.1/patroni/ha.py#L583 + // - https://github.com/zalando/patroni/blob/v3.1.1/patroni/ha.py#L782 + // - https://github.com/zalando/patroni/blob/v3.1.1/patroni/ha.py#L1574 + status := pod.GetAnnotations()["status"] + return strings.Contains(status, `"role":"master"`) +} + // PodIsStandbyLeader returns whether or not pod is currently acting as a "standby_leader". func PodIsStandbyLeader(pod metav1.Object) bool { if pod == nil { diff --git a/internal/patroni/reconcile_test.go b/internal/patroni/reconcile_test.go index e2e172ea9f..5d2a2c0ad5 100644 --- a/internal/patroni/reconcile_test.go +++ b/internal/patroni/reconcile_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package patroni @@ -132,7 +121,7 @@ func TestInstancePod(t *testing.T) { cluster.Spec.ImagePullPolicy = corev1.PullAlways clusterConfigMap := new(corev1.ConfigMap) clusterPodService := new(corev1.Service) - instanceCertficates := new(corev1.Secret) + instanceCertificates := new(corev1.Secret) instanceConfigMap := new(corev1.ConfigMap) instanceSpec := new(v1beta1.PostgresInstanceSetSpec) patroniLeaderService := new(corev1.Service) @@ -142,7 +131,7 @@ func TestInstancePod(t *testing.T) { call := func() error { return InstancePod(context.Background(), cluster, clusterConfigMap, clusterPodService, patroniLeaderService, - instanceSpec, instanceCertficates, instanceConfigMap, template) + instanceSpec, instanceCertificates, instanceConfigMap, template) } assert.NilError(t, call()) @@ -231,6 +220,31 @@ volumes: `)) } +func TestPodIsPrimary(t *testing.T) { + // No object + assert.Assert(t, !PodIsPrimary(nil)) + + // No annotations + pod := &corev1.Pod{} + assert.Assert(t, !PodIsPrimary(pod)) + + // No role + pod.Annotations = map[string]string{"status": `{}`} + assert.Assert(t, !PodIsPrimary(pod)) + + // Replica + pod.Annotations["status"] = `{"role":"replica"}` + assert.Assert(t, !PodIsPrimary(pod)) + + // Standby leader + pod.Annotations["status"] = `{"role":"standby_leader"}` + assert.Assert(t, !PodIsPrimary(pod)) + + // Primary + pod.Annotations["status"] = `{"role":"master"}` + assert.Assert(t, PodIsPrimary(pod)) +} + func TestPodIsStandbyLeader(t *testing.T) { // No object assert.Assert(t, !PodIsStandbyLeader(nil)) diff --git a/internal/pgadmin/config.go b/internal/pgadmin/config.go index ad836ee422..553a90f656 100644 --- a/internal/pgadmin/config.go +++ b/internal/pgadmin/config.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgadmin @@ -151,7 +140,7 @@ func startupCommand() []string { import glob, json, re, os DEFAULT_BINARY_PATHS = {'pg': sorted([''] + glob.glob('/usr/pgsql-*/bin')).pop()} with open('` + settingsAbsolutePath + `') as _f: - _conf, _data = re.compile(r'[A-Z_]+'), json.load(_f) + _conf, _data = re.compile(r'[A-Z_0-9]+'), json.load(_f) if type(_data) is dict: globals().update({k: v for k, v in _data.items() if _conf.fullmatch(k)}) if os.path.isfile('` + ldapPasswordAbsolutePath + `'): diff --git a/internal/pgadmin/config_test.go b/internal/pgadmin/config_test.go index 931a8a48c4..87cd7847c2 100644 --- a/internal/pgadmin/config_test.go +++ b/internal/pgadmin/config_test.go @@ -1,3 +1,7 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + package pgadmin import ( @@ -55,7 +59,7 @@ func TestStartupCommand(t *testing.T) { import glob, json, re, os DEFAULT_BINARY_PATHS = {'pg': sorted([''] + glob.glob('/usr/pgsql-*/bin')).pop()} with open('/etc/pgadmin/conf.d/~postgres-operator/pgadmin.json') as _f: - _conf, _data = re.compile(r'[A-Z_]+'), json.load(_f) + _conf, _data = re.compile(r'[A-Z_0-9]+'), json.load(_f) if type(_data) is dict: globals().update({k: v for k, v in _data.items() if _conf.fullmatch(k)}) if os.path.isfile('/etc/pgadmin/conf.d/~postgres-operator/ldap-bind-password'): diff --git a/internal/pgadmin/reconcile.go b/internal/pgadmin/reconcile.go index 7335b10b67..af62c482f2 100644 --- a/internal/pgadmin/reconcile.go +++ b/internal/pgadmin/reconcile.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgadmin @@ -43,8 +32,6 @@ RED="\033[0;31m" GREEN="\033[0;32m" RESET="\033[0m" -CRUNCHY_DIR=${CRUNCHY_DIR:-'/opt/crunchy'} - function enable_debugging() { if [[ ${CRUNCHY_DEBUG:-false} == "true" ]] then @@ -130,8 +117,6 @@ then err_check "$?" "pgAdmin4 Database Setup" "Could not create pgAdmin4 database: \n$(cat /tmp/pgadmin4.stderr)" fi -cd ${PGADMIN_DIR?} - echo_info "Starting Apache web server.." /usr/sbin/httpd -D FOREGROUND & echo $! > $APACHE_PIDFILE @@ -148,7 +133,7 @@ func ConfigMap( return nil } - initialize.StringMap(&outConfigMap.Data) + initialize.Map(&outConfigMap.Data) // To avoid spurious reconciles, the following value must not change when // the spec does not change. [json.Encoder] and [json.Marshal] do this by diff --git a/internal/pgadmin/reconcile_test.go b/internal/pgadmin/reconcile_test.go index 9dc2095cb1..f91a9b807f 100644 --- a/internal/pgadmin/reconcile_test.go +++ b/internal/pgadmin/reconcile_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgadmin @@ -117,8 +106,6 @@ containers: GREEN="\033[0;32m" RESET="\033[0m" - CRUNCHY_DIR=${CRUNCHY_DIR:-'/opt/crunchy'} - function enable_debugging() { if [[ ${CRUNCHY_DEBUG:-false} == "true" ]] then @@ -204,8 +191,6 @@ containers: err_check "$?" "pgAdmin4 Database Setup" "Could not create pgAdmin4 database: \n$(cat /tmp/pgadmin4.stderr)" fi - cd ${PGADMIN_DIR?} - echo_info "Starting Apache web server.." /usr/sbin/httpd -D FOREGROUND & echo $! > $APACHE_PIDFILE @@ -244,6 +229,8 @@ containers: privileged: false readOnlyRootFilesystem: true runAsNonRoot: true + seccompProfile: + type: RuntimeDefault volumeMounts: - mountPath: /etc/pgadmin name: pgadmin-startup @@ -268,7 +255,7 @@ initContainers: import glob, json, re, os DEFAULT_BINARY_PATHS = {'pg': sorted([''] + glob.glob('/usr/pgsql-*/bin')).pop()} with open('/etc/pgadmin/conf.d/~postgres-operator/pgadmin.json') as _f: - _conf, _data = re.compile(r'[A-Z_]+'), json.load(_f) + _conf, _data = re.compile(r'[A-Z_0-9]+'), json.load(_f) if type(_data) is dict: globals().update({k: v for k, v in _data.items() if _conf.fullmatch(k)}) if os.path.isfile('/etc/pgadmin/conf.d/~postgres-operator/ldap-bind-password'): @@ -284,6 +271,8 @@ initContainers: privileged: false readOnlyRootFilesystem: true runAsNonRoot: true + seccompProfile: + type: RuntimeDefault volumeMounts: - mountPath: /etc/pgadmin name: pgadmin-startup @@ -351,8 +340,6 @@ containers: GREEN="\033[0;32m" RESET="\033[0m" - CRUNCHY_DIR=${CRUNCHY_DIR:-'/opt/crunchy'} - function enable_debugging() { if [[ ${CRUNCHY_DEBUG:-false} == "true" ]] then @@ -438,8 +425,6 @@ containers: err_check "$?" "pgAdmin4 Database Setup" "Could not create pgAdmin4 database: \n$(cat /tmp/pgadmin4.stderr)" fi - cd ${PGADMIN_DIR?} - echo_info "Starting Apache web server.." /usr/sbin/httpd -D FOREGROUND & echo $! > $APACHE_PIDFILE @@ -482,6 +467,8 @@ containers: privileged: false readOnlyRootFilesystem: true runAsNonRoot: true + seccompProfile: + type: RuntimeDefault volumeMounts: - mountPath: /etc/pgadmin name: pgadmin-startup @@ -506,7 +493,7 @@ initContainers: import glob, json, re, os DEFAULT_BINARY_PATHS = {'pg': sorted([''] + glob.glob('/usr/pgsql-*/bin')).pop()} with open('/etc/pgadmin/conf.d/~postgres-operator/pgadmin.json') as _f: - _conf, _data = re.compile(r'[A-Z_]+'), json.load(_f) + _conf, _data = re.compile(r'[A-Z_0-9]+'), json.load(_f) if type(_data) is dict: globals().update({k: v for k, v in _data.items() if _conf.fullmatch(k)}) if os.path.isfile('/etc/pgadmin/conf.d/~postgres-operator/ldap-bind-password'): @@ -526,6 +513,8 @@ initContainers: privileged: false readOnlyRootFilesystem: true runAsNonRoot: true + seccompProfile: + type: RuntimeDefault volumeMounts: - mountPath: /etc/pgadmin name: pgadmin-startup diff --git a/internal/pgadmin/users.go b/internal/pgadmin/users.go index 624594ecf3..7ce69ce211 100644 --- a/internal/pgadmin/users.go +++ b/internal/pgadmin/users.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgadmin diff --git a/internal/pgadmin/users_test.go b/internal/pgadmin/users_test.go index 7f9e98e428..69619667af 100644 --- a/internal/pgadmin/users_test.go +++ b/internal/pgadmin/users_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgadmin diff --git a/internal/pgaudit/postgres.go b/internal/pgaudit/postgres.go index 7ba6e514a7..07867d020e 100644 --- a/internal/pgaudit/postgres.go +++ b/internal/pgaudit/postgres.go @@ -1,23 +1,11 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgaudit import ( "context" - "strings" "github.com/crunchydata/postgres-operator/internal/logging" "github.com/crunchydata/postgres-operator/internal/postgres" @@ -67,7 +55,5 @@ func PostgreSQLParameters(outParameters *postgres.Parameters) { // PostgreSQL must be restarted when changing this value. // - https://github.com/pgaudit/pgaudit#settings // - https://www.postgresql.org/docs/current/runtime-config-client.html - shared := outParameters.Mandatory.Value("shared_preload_libraries") - outParameters.Mandatory.Add("shared_preload_libraries", - strings.TrimPrefix(shared+",pgaudit", ",")) + outParameters.Mandatory.AppendToList("shared_preload_libraries", "pgaudit") } diff --git a/internal/pgaudit/postgres_test.go b/internal/pgaudit/postgres_test.go index 025c815414..3734e511f0 100644 --- a/internal/pgaudit/postgres_test.go +++ b/internal/pgaudit/postgres_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgaudit diff --git a/internal/pgbackrest/certificates.go b/internal/pgbackrest/certificates.go index 9336434672..bb2633dfe7 100644 --- a/internal/pgbackrest/certificates.go +++ b/internal/pgbackrest/certificates.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbackrest diff --git a/internal/pgbackrest/certificates.md b/internal/pgbackrest/certificates.md index 5152cdba82..344616486b 100644 --- a/internal/pgbackrest/certificates.md +++ b/internal/pgbackrest/certificates.md @@ -1,16 +1,7 @@ Server diff --git a/internal/pgbackrest/certificates_test.go b/internal/pgbackrest/certificates_test.go index 22defcc50f..4ef41b2879 100644 --- a/internal/pgbackrest/certificates_test.go +++ b/internal/pgbackrest/certificates_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbackrest diff --git a/internal/pgbackrest/config.go b/internal/pgbackrest/config.go index 605b0a17d3..f50b2690ee 100644 --- a/internal/pgbackrest/config.go +++ b/internal/pgbackrest/config.go @@ -1,23 +1,13 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbackrest import ( "context" "fmt" + "strconv" "strings" corev1 "k8s.io/api/core/v1" @@ -98,33 +88,37 @@ func CreatePGBackRestConfigMapIntent(postgresCluster *v1beta1.PostgresCluster, } // create an empty map for the config data - initialize.StringMap(&cm.Data) + initialize.Map(&cm.Data) - addDedicatedHost := DedicatedRepoHostEnabled(postgresCluster) pgdataDir := postgres.DataDirectory(postgresCluster) // Port will always be populated, since the API will set a default of 5432 if not provided pgPort := *postgresCluster.Spec.Port cm.Data[CMInstanceKey] = iniGeneratedWarning + populatePGInstanceConfigurationMap( - serviceName, serviceNamespace, repoHostName, - pgdataDir, pgPort, postgresCluster.Spec.Backups.PGBackRest.Repos, + serviceName, serviceNamespace, repoHostName, pgdataDir, + config.FetchKeyCommand(&postgresCluster.Spec), + strconv.Itoa(postgresCluster.Spec.PostgresVersion), + pgPort, postgresCluster.Spec.Backups.PGBackRest.Repos, postgresCluster.Spec.Backups.PGBackRest.Global, ).String() - // As the cluster transitions from having a repository host to having none, // PostgreSQL instances that have not rolled out expect to mount a server // config file. Always populate that file so those volumes stay valid and - // Kubernetes propagates their contents to those pods. + // Kubernetes propagates their contents to those pods. The repo host name + // given below should always be set, but this guards for cases when it might + // not be. cm.Data[serverConfigMapKey] = "" - if addDedicatedHost && repoHostName != "" { + if repoHostName != "" { cm.Data[serverConfigMapKey] = iniGeneratedWarning + serverConfig(postgresCluster).String() cm.Data[CMRepoKey] = iniGeneratedWarning + populateRepoHostConfigurationMap( serviceName, serviceNamespace, - pgdataDir, pgPort, instanceNames, + pgdataDir, config.FetchKeyCommand(&postgresCluster.Spec), + strconv.Itoa(postgresCluster.Spec.PostgresVersion), + pgPort, instanceNames, postgresCluster.Spec.Backups.PGBackRest.Repos, postgresCluster.Spec.Backups.PGBackRest.Global, ).String() @@ -177,7 +171,7 @@ func MakePGBackrestLogDir(template *corev1.PodTemplateSpec, // - Renames the data directory as needed to bootstrap the cluster using the restored database. // This ensures compatibility with the "existing" bootstrap method that is included in the // Patroni config when bootstrapping a cluster using an existing data directory. -func RestoreCommand(pgdata, hugePagesSetting string, tablespaceVolumes []*corev1.PersistentVolumeClaim, args ...string) []string { +func RestoreCommand(pgdata, hugePagesSetting, fetchKeyCommand string, tablespaceVolumes []*corev1.PersistentVolumeClaim, args ...string) []string { // After pgBackRest restores files, PostgreSQL starts in recovery to finish // replaying WAL files. "hot_standby" is "on" (by default) so we can detect @@ -212,6 +206,14 @@ func RestoreCommand(pgdata, hugePagesSetting string, tablespaceVolumes []*corev1 tablespaceVolume.Labels[naming.LabelData]) } + // If the fetch key command is not empty, save the GUC variable and value + // to a new string. + var ekc string + if fetchKeyCommand != "" { + ekc = ` +encryption_key_command = '` + fetchKeyCommand + `'` + } + restoreScript := `declare -r pgdata="$1" opts="$2" install --directory --mode=0700 "${pgdata}"` + tablespaceCmd + ` rm -f "${pgdata}/postmaster.pid" @@ -219,8 +221,8 @@ bash -xc "pgbackrest restore ${opts}" rm -f "${pgdata}/patroni.dynamic.json" export PGDATA="${pgdata}" PGHOST='/tmp' -until [ "${recovery=}" = 'f' ]; do -if [ -z "${recovery}" ]; then +until [[ "${recovery=}" == 'f' ]]; do +if [[ -z "${recovery}" ]]; then control=$(pg_controldata) read -r max_conn <<< "${control##*max_connections setting:}" read -r max_lock <<< "${control##*max_locks_per_xact setting:}" @@ -235,10 +237,12 @@ max_connections = '${max_conn}' max_locks_per_transaction = '${max_lock}' max_prepared_transactions = '${max_ptxn}' max_worker_processes = '${max_work}' -unix_socket_directories = '/tmp' +unix_socket_directories = '/tmp'` + + // Add the encryption key command setting, if provided. + ekc + ` huge_pages = ` + hugePagesSetting + ` EOF -if [ "$(< "${pgdata}/PG_VERSION")" -ge 12 ]; then +if [[ "$(< "${pgdata}/PG_VERSION")" -ge 12 ]]; then read -r max_wals <<< "${control##*max_wal_senders setting:}" echo >> /tmp/postgres.restore.conf "max_wal_senders = '${max_wals}'" fi @@ -250,7 +254,7 @@ recovery=$(psql -Atc "SELECT CASE WHEN NOT pg_catalog.pg_is_in_recovery() THEN false WHEN NOT pg_catalog.pg_is_wal_replay_paused() THEN true ELSE pg_catalog.pg_wal_replay_resume()::text = '' -END recovery" && sleep 1) || true +END recovery" && sleep 1) ||: done pg_ctl stop --silent --wait --timeout=31536000 @@ -259,10 +263,47 @@ mv "${pgdata}" "${pgdata}_bootstrap"` return append([]string{"bash", "-ceu", "--", restoreScript, "-", pgdata}, args...) } +// DedicatedSnapshotVolumeRestoreCommand returns the command for performing a pgBackRest delta restore +// into a dedicated snapshot volume. In addition to calling the pgBackRest restore command with any +// pgBackRest options provided, the script also removes the patroni.dynamic.json file if present. This +// ensures the configuration from the cluster being restored from is not utilized when bootstrapping a +// new cluster, and the configuration for the new cluster is utilized instead. +func DedicatedSnapshotVolumeRestoreCommand(pgdata string, args ...string) []string { + + // The postmaster.pid file is removed, if it exists, before attempting a restore. + // This allows the restore to be tried more than once without the causing an + // error due to the presence of the file in subsequent attempts. + + // Wrap pgbackrest restore command in backup_label checks. If pre/post + // backup_labels are different, restore moved database forward, so return 0 + // so that the Job is successful and we know to proceed with snapshot. + // Otherwise return 1, Job will fail, and we will not proceed with snapshot. + restoreScript := `declare -r pgdata="$1" opts="$2" +BACKUP_LABEL=$([[ ! -e "${pgdata}/backup_label" ]] || md5sum "${pgdata}/backup_label") +echo "Starting pgBackRest delta restore" + +install --directory --mode=0700 "${pgdata}" +rm -f "${pgdata}/postmaster.pid" +bash -xc "pgbackrest restore ${opts}" +rm -f "${pgdata}/patroni.dynamic.json" + +BACKUP_LABEL_POST=$([[ ! -e "${pgdata}/backup_label" ]] || md5sum "${pgdata}/backup_label") +if [[ "${BACKUP_LABEL}" != "${BACKUP_LABEL_POST}" ]] +then + exit 0 +fi +echo Database was not advanced by restore. No snapshot will be taken. +echo Check that your last backup was successful. +exit 1` + + return append([]string{"bash", "-ceu", "--", restoreScript, "-", pgdata}, args...) +} + // populatePGInstanceConfigurationMap returns options representing the pgBackRest configuration for // a PostgreSQL instance func populatePGInstanceConfigurationMap( - serviceName, serviceNamespace, repoHostName, pgdataDir string, + serviceName, serviceNamespace, repoHostName, pgdataDir, + fetchKeyCommand, postgresVersion string, pgPort int32, repos []v1beta1.PGBackRestRepo, globalConfig map[string]string, ) iniSectionSet { @@ -275,6 +316,10 @@ func populatePGInstanceConfigurationMap( global := iniMultiSet{} stanza := iniMultiSet{} + // For faster and more robust WAL archiving, we turn on pgBackRest archive-async. + global.Set("archive-async", "y") + // pgBackRest spool-path should always be co-located with the Postgres WAL path. + global.Set("spool-path", "/pgdata/pgbackrest-spool") // pgBackRest will log to the pgData volume for commands run on the PostgreSQL instance global.Set("log-path", naming.PGBackRestPGDataLogPath) @@ -312,6 +357,12 @@ func populatePGInstanceConfigurationMap( stanza.Set("pg1-port", fmt.Sprint(pgPort)) stanza.Set("pg1-socket-path", postgres.SocketDirectory) + if fetchKeyCommand != "" { + stanza.Set("archive-header-check", "n") + stanza.Set("page-header-check", "n") + stanza.Set("pg-version-force", postgresVersion) + } + return iniSectionSet{ "global": global, DefaultStanzaName: stanza, @@ -321,7 +372,8 @@ func populatePGInstanceConfigurationMap( // populateRepoHostConfigurationMap returns options representing the pgBackRest configuration for // a pgBackRest dedicated repository host func populateRepoHostConfigurationMap( - serviceName, serviceNamespace, pgdataDir string, + serviceName, serviceNamespace, pgdataDir, + fetchKeyCommand, postgresVersion string, pgPort int32, pgHosts []string, repos []v1beta1.PGBackRestRepo, globalConfig map[string]string, ) iniSectionSet { @@ -345,13 +397,18 @@ func populateRepoHostConfigurationMap( if !pgBackRestLogPathSet && repo.Volume != nil { // pgBackRest will log to the first configured repo volume when commands // are run on the pgBackRest repo host. With our previous check in - // DedicatedRepoHostEnabled(), we've already validated that at least one + // RepoHostVolumeDefined(), we've already validated that at least one // defined repo has a volume. global.Set("log-path", fmt.Sprintf(naming.PGBackRestRepoLogPath, repo.Name)) pgBackRestLogPathSet = true } } + // If no log path was set, don't log because the default path is not writable. + if !pgBackRestLogPathSet { + global.Set("log-level-file", "off") + } + for option, val := range globalConfig { global.Set(option, val) } @@ -372,6 +429,12 @@ func populateRepoHostConfigurationMap( stanza.Set(fmt.Sprintf("pg%d-path", i+1), pgdataDir) stanza.Set(fmt.Sprintf("pg%d-port", i+1), fmt.Sprint(pgPort)) stanza.Set(fmt.Sprintf("pg%d-socket-path", i+1), postgres.SocketDirectory) + + if fetchKeyCommand != "" { + stanza.Set("archive-header-check", "n") + stanza.Set("page-header-check", "n") + stanza.Set("pg-version-force", postgresVersion) + } } return iniSectionSet{ @@ -422,21 +485,21 @@ func reloadCommand(name string) []string { // mtimes. // - https://unix.stackexchange.com/a/407383 const script = ` -exec {fd}<> <(:) +exec {fd}<> <(:||:) until read -r -t 5 -u "${fd}"; do if - [ "${filename}" -nt "/proc/self/fd/${fd}" ] && + [[ "${filename}" -nt "/proc/self/fd/${fd}" ]] && pkill -HUP --exact --parent=0 pgbackrest then - exec {fd}>&- && exec {fd}<> <(:) + exec {fd}>&- && exec {fd}<> <(:||:) stat --dereference --format='Loaded configuration dated %y' "${filename}" elif - { [ "${directory}" -nt "/proc/self/fd/${fd}" ] || - [ "${authority}" -nt "/proc/self/fd/${fd}" ] + { [[ "${directory}" -nt "/proc/self/fd/${fd}" ]] || + [[ "${authority}" -nt "/proc/self/fd/${fd}" ]] } && pkill -HUP --exact --parent=0 pgbackrest then - exec {fd}>&- && exec {fd}<> <(:) + exec {fd}>&- && exec {fd}<> <(:||:) stat --format='Loaded certificates dated %y' "${directory}" fi done @@ -463,7 +526,7 @@ func serverConfig(cluster *v1beta1.PostgresCluster) iniSectionSet { // // NOTE(cbandy): The unspecified IPv6 address, which ends up being the IPv6 // wildcard address, did not work in all environments. In some cases, the - // the "server-ping" command would not connect. + // "server-ping" command would not connect. // - https://tools.ietf.org/html/rfc3493#section-3.8 // // TODO(cbandy): When pgBackRest provides a way to bind to all addresses, diff --git a/internal/pgbackrest/config.md b/internal/pgbackrest/config.md index 2c7a19f700..2101535b3a 100644 --- a/internal/pgbackrest/config.md +++ b/internal/pgbackrest/config.md @@ -1,16 +1,7 @@ # pgBackRest Configuration Overview @@ -31,6 +22,8 @@ As shown, the settings with the `cfgSectionGlobal` designation are `log-path`: The log path provides a location for pgBackRest to store log files. +`log-level-file`: Level for file logging. Set to 'off' when the repo host has no volume. + `repo-path`: Path where backups and archive are stored. The repository is where pgBackRest stores backups and archives WAL segments. @@ -75,6 +68,7 @@ pg1-socket-path [global] log-path repo1-path +log-level-file [stanza] pg1-host diff --git a/internal/pgbackrest/config_test.go b/internal/pgbackrest/config_test.go index ba350d216f..b74bf9a4a8 100644 --- a/internal/pgbackrest/config_test.go +++ b/internal/pgbackrest/config_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbackrest @@ -131,6 +120,7 @@ pg1-socket-path = /tmp/postgres # Your changes will not be saved. [global] +archive-async = y log-path = /pgdata/pgbackrest/log repo1-host = repo-hostname-0.pod-service-name.test-ns.svc.`+domain+` repo1-host-ca-file = /etc/pgbackrest/conf.d/~postgres-operator/tls-ca.crt @@ -151,6 +141,7 @@ repo4-s3-bucket = s-bucket repo4-s3-endpoint = endpoint-s repo4-s3-region = earth repo4-type = s3 +spool-path = /pgdata/pgbackrest-spool [db] pg1-path = /pgdata/pg12 @@ -204,6 +195,54 @@ pg1-socket-path = /tmp/postgres "postgres-operator.crunchydata.com/pgbackrest-config": "", }) }) + + t.Run("EnabledTDE", func(t *testing.T) { + cluster := cluster.DeepCopy() + cluster.Spec.Patroni = &v1beta1.PatroniSpec{ + DynamicConfiguration: map[string]any{ + "postgresql": map[string]any{ + "parameters": map[string]any{ + "encryption_key_command": "echo test", + }, + }, + }, + } + + configmap := CreatePGBackRestConfigMapIntent(cluster, + "", "number", "pod-service-name", "test-ns", + []string{"some-instance"}) + + assert.Assert(t, + strings.Contains(configmap.Data["pgbackrest_instance.conf"], + "archive-header-check = n")) + assert.Assert(t, + strings.Contains(configmap.Data["pgbackrest_instance.conf"], + "page-header-check = n")) + assert.Assert(t, + strings.Contains(configmap.Data["pgbackrest_instance.conf"], + "pg-version-force")) + + cluster.Spec.Backups.PGBackRest.Repos = []v1beta1.PGBackRestRepo{ + { + Name: "repo1", + Volume: &v1beta1.RepoPVC{}, + }, + } + + configmap = CreatePGBackRestConfigMapIntent(cluster, + "repo1", "number", "pod-service-name", "test-ns", + []string{"some-instance"}) + + assert.Assert(t, + strings.Contains(configmap.Data["pgbackrest_repo.conf"], + "archive-header-check = n")) + assert.Assert(t, + strings.Contains(configmap.Data["pgbackrest_repo.conf"], + "page-header-check = n")) + assert.Assert(t, + strings.Contains(configmap.Data["pgbackrest_repo.conf"], + "pg-version-force")) + }) } func TestMakePGBackrestLogDir(t *testing.T) { @@ -297,7 +336,7 @@ func TestRestoreCommand(t *testing.T) { opts := []string{ "--stanza=" + DefaultStanzaName, "--pg1-path=" + pgdata, "--repo=1"} - command := RestoreCommand(pgdata, "try", nil, strings.Join(opts, " ")) + command := RestoreCommand(pgdata, "try", "", nil, strings.Join(opts, " ")) assert.DeepEqual(t, command[:3], []string{"bash", "-ceu", "--"}) assert.Assert(t, len(command) > 3) @@ -312,7 +351,45 @@ func TestRestoreCommand(t *testing.T) { } func TestRestoreCommandPrettyYAML(t *testing.T) { - b, err := yaml.Marshal(RestoreCommand("/dir", "try", nil, "--options")) + b, err := yaml.Marshal(RestoreCommand("/dir", "try", "", nil, "--options")) + + assert.NilError(t, err) + assert.Assert(t, strings.Contains(string(b), "\n- |"), + "expected literal block scalar, got:\n%s", b) +} + +func TestRestoreCommandTDE(t *testing.T) { + b, err := yaml.Marshal(RestoreCommand("/dir", "try", "echo testValue", nil, "--options")) + + assert.NilError(t, err) + assert.Assert(t, strings.Contains(string(b), "encryption_key_command = 'echo testValue'"), + "expected encryption_key_command setting, got:\n%s", b) +} + +func TestDedicatedSnapshotVolumeRestoreCommand(t *testing.T) { + shellcheck := require.ShellCheck(t) + + pgdata := "/pgdata/pg13" + opts := []string{ + "--stanza=" + DefaultStanzaName, "--pg1-path=" + pgdata, + "--repo=1"} + command := DedicatedSnapshotVolumeRestoreCommand(pgdata, strings.Join(opts, " ")) + + assert.DeepEqual(t, command[:3], []string{"bash", "-ceu", "--"}) + assert.Assert(t, len(command) > 3) + + dir := t.TempDir() + file := filepath.Join(dir, "script.bash") + assert.NilError(t, os.WriteFile(file, []byte(command[3]), 0o600)) + + cmd := exec.Command(shellcheck, "--enable=all", file) + output, err := cmd.CombinedOutput() + assert.NilError(t, err, "%q\n%s", cmd.Args, output) +} + +func TestDedicatedSnapshotVolumeRestoreCommandPrettyYAML(t *testing.T) { + b, err := yaml.Marshal(DedicatedSnapshotVolumeRestoreCommand("/dir", "--options")) + assert.NilError(t, err) assert.Assert(t, strings.Contains(string(b), "\n- |"), "expected literal block scalar, got:\n%s", b) diff --git a/internal/pgbackrest/helpers_test.go b/internal/pgbackrest/helpers_test.go deleted file mode 100644 index c5d6f0de52..0000000000 --- a/internal/pgbackrest/helpers_test.go +++ /dev/null @@ -1,25 +0,0 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package pgbackrest - -import ( - "github.com/crunchydata/postgres-operator/internal/testing/cmp" -) - -// marshalMatches converts actual to YAML and compares that to expected. -func marshalMatches(actual interface{}, expected string) cmp.Comparison { - return cmp.MarshalMatches(actual, expected) -} diff --git a/internal/pgbackrest/iana.go b/internal/pgbackrest/iana.go index 652c3e7882..c6e2f71e6c 100644 --- a/internal/pgbackrest/iana.go +++ b/internal/pgbackrest/iana.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbackrest diff --git a/internal/pgbackrest/options.go b/internal/pgbackrest/options.go index 470017c811..2439901e47 100644 --- a/internal/pgbackrest/options.go +++ b/internal/pgbackrest/options.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbackrest diff --git a/internal/pgbackrest/options_test.go b/internal/pgbackrest/options_test.go index 8b46381512..374737ec7f 100644 --- a/internal/pgbackrest/options_test.go +++ b/internal/pgbackrest/options_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbackrest diff --git a/internal/pgbackrest/pgbackrest.go b/internal/pgbackrest/pgbackrest.go index a19733c8b8..21124b9744 100644 --- a/internal/pgbackrest/pgbackrest.go +++ b/internal/pgbackrest/pgbackrest.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbackrest @@ -20,9 +9,10 @@ import ( "context" "fmt" "io" - "strings" "github.com/pkg/errors" + + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) const ( @@ -30,9 +20,9 @@ const ( // is detected while attempting stanza creation errMsgConfigHashMismatch = "postgres operator error: pgBackRest config hash mismatch" - // errMsgBackupDbMismatch is the error message returned from pgBackRest when PG versions - // or PG system identifiers do not match between the PG instance and the existing stanza - errMsgBackupDbMismatch = "backup and archive info files exist but do not match the database" + // errMsgStaleReposWithVolumesConfig is the error message displayed when a volume-backed repo has been + // configured, but the configuration has not yet propagated into the container. + errMsgStaleReposWithVolumesConfig = "postgres operator error: pgBackRest stale volume-backed repo configuration" ) // Executor calls "pgbackrest" commands @@ -51,31 +41,50 @@ type Executor func( // from running (with a config mismatch indicating that the pgBackRest configuration as stored in // the cluster's pgBackRest ConfigMap has not yet propagated to the Pod). func (exec Executor) StanzaCreateOrUpgrade(ctx context.Context, configHash string, - upgrade bool) (bool, error) { + postgresCluster *v1beta1.PostgresCluster) (bool, error) { var stdout, stderr bytes.Buffer - stanzaCmd := "create" - if upgrade { - stanzaCmd = "upgrade" + var reposWithVolumes []v1beta1.PGBackRestRepo + for _, repo := range postgresCluster.Spec.Backups.PGBackRest.Repos { + if repo.Volume != nil { + reposWithVolumes = append(reposWithVolumes, repo) + } + } + + grep := "grep %s-path /etc/pgbackrest/conf.d/pgbackrest_instance.conf" + + var checkRepoCmd string + if len(reposWithVolumes) > 0 { + repo := reposWithVolumes[0] + checkRepoCmd = checkRepoCmd + fmt.Sprintf(grep, repo.Name) + + reposWithVolumes = reposWithVolumes[1:] + for _, repo := range reposWithVolumes { + checkRepoCmd = checkRepoCmd + fmt.Sprintf(" && "+grep, repo.Name) + } } // this is the script that is run to create a stanza. First it checks the // "config-hash" file to ensure all configuration changes (e.g. from ConfigMaps) have // propagated to the container, and if not, it prints an error and returns with exit code 1). + // Next, it checks that any volume-backed repo added to the config has propagated into + // the container, and if not, prints an error and exits with code 1. // Otherwise, it runs the pgbackrest command, which will either be "stanza-create" or // "stanza-upgrade", depending on the value of the boolean "upgrade" parameter. const script = ` -declare -r hash="$1" stanza="$2" message="$3" cmd="$4" +declare -r hash="$1" stanza="$2" hash_msg="$3" vol_msg="$4" check_repo_cmd="$5" if [[ "$(< /etc/pgbackrest/conf.d/config-hash)" != "${hash}" ]]; then - printf >&2 "%s" "${message}"; exit 1; + printf >&2 "%s" "${hash_msg}"; exit 1; +elif ! bash -c "${check_repo_cmd}"; then + printf >&2 "%s" "${vol_msg}"; exit 1; else - pgbackrest "${cmd}" --stanza="${stanza}" + pgbackrest stanza-create --stanza="${stanza}" || pgbackrest stanza-upgrade --stanza="${stanza}" fi ` if err := exec(ctx, nil, &stdout, &stderr, "bash", "-ceu", "--", - script, "-", configHash, DefaultStanzaName, errMsgConfigHashMismatch, - fmt.Sprintf("stanza-%s", stanzaCmd)); err != nil { + script, "-", configHash, DefaultStanzaName, errMsgConfigHashMismatch, errMsgStaleReposWithVolumesConfig, + checkRepoCmd); err != nil { errReturn := stderr.String() @@ -86,10 +95,10 @@ fi return true, nil } - // if the err returned from pgbackrest command is about a version mismatch - // then we should run upgrade rather than create - if strings.Contains(errReturn, errMsgBackupDbMismatch) { - return exec.StanzaCreateOrUpgrade(ctx, configHash, true) + // if the configuration for volume-backed repositories is stale, return true and don't return an error since this + // is expected while waiting for config changes in ConfigMaps to make it to the container + if errReturn == errMsgStaleReposWithVolumesConfig { + return true, nil } // if none of the above errors, return the err diff --git a/internal/pgbackrest/pgbackrest_test.go b/internal/pgbackrest/pgbackrest_test.go index 8ae3c868b1..33c97913cf 100644 --- a/internal/pgbackrest/pgbackrest_test.go +++ b/internal/pgbackrest/pgbackrest_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbackrest @@ -24,8 +13,13 @@ import ( "testing" "gotest.tools/v3/assert" + "k8s.io/apimachinery/pkg/api/resource" + + corev1 "k8s.io/api/core/v1" "github.com/crunchydata/postgres-operator/internal/testing/require" + + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) func TestStanzaCreateOrUpgrade(t *testing.T) { @@ -34,15 +28,19 @@ func TestStanzaCreateOrUpgrade(t *testing.T) { ctx := context.Background() configHash := "7f5d4d5bdc" expectedCommand := []string{"bash", "-ceu", "--", ` -declare -r hash="$1" stanza="$2" message="$3" cmd="$4" +declare -r hash="$1" stanza="$2" hash_msg="$3" vol_msg="$4" check_repo_cmd="$5" if [[ "$(< /etc/pgbackrest/conf.d/config-hash)" != "${hash}" ]]; then - printf >&2 "%s" "${message}"; exit 1; + printf >&2 "%s" "${hash_msg}"; exit 1; +elif ! bash -c "${check_repo_cmd}"; then + printf >&2 "%s" "${vol_msg}"; exit 1; else - pgbackrest "${cmd}" --stanza="${stanza}" + pgbackrest stanza-create --stanza="${stanza}" || pgbackrest stanza-upgrade --stanza="${stanza}" fi `, "-", "7f5d4d5bdc", "db", "postgres operator error: pgBackRest config hash mismatch", - "stanza-create"} + "postgres operator error: pgBackRest stale volume-backed repo configuration", + "grep repo1-path /etc/pgbackrest/conf.d/pgbackrest_instance.conf", + } var shellCheckScript string stanzaExec := func(ctx context.Context, stdin io.Reader, stdout, stderr io.Writer, @@ -56,8 +54,36 @@ fi return nil } + postgresCluster := &v1beta1.PostgresCluster{ + Spec: v1beta1.PostgresClusterSpec{ + Backups: v1beta1.Backups{ + PGBackRest: v1beta1.PGBackRestArchive{ + Repos: []v1beta1.PGBackRestRepo{{ + Name: "repo1", + Volume: &v1beta1.RepoPVC{ + VolumeClaimSpec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteMany}, + Resources: corev1.VolumeResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceStorage: resource.MustParse("1Gi"), + }, + }, + }, + }, + }, { + Name: "repo2", + S3: &v1beta1.RepoS3{ + Bucket: "bucket", + Endpoint: "endpoint", + Region: "region", + }, + }}, + }, + }, + }, + } - configHashMismatch, err := Executor(stanzaExec).StanzaCreateOrUpgrade(ctx, configHash, false) + configHashMismatch, err := Executor(stanzaExec).StanzaCreateOrUpgrade(ctx, configHash, postgresCluster) assert.NilError(t, err) assert.Assert(t, !configHashMismatch) diff --git a/internal/pgbackrest/postgres.go b/internal/pgbackrest/postgres.go index fc4c579014..ab5c71868c 100644 --- a/internal/pgbackrest/postgres.go +++ b/internal/pgbackrest/postgres.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbackrest @@ -26,6 +15,7 @@ import ( func PostgreSQL( inCluster *v1beta1.PostgresCluster, outParameters *postgres.Parameters, + backupsEnabled bool, ) { if outParameters.Mandatory == nil { outParameters.Mandatory = postgres.NewParameterSet() @@ -38,9 +28,15 @@ func PostgreSQL( // - https://pgbackrest.org/user-guide.html#quickstart/configure-archiving // - https://pgbackrest.org/command.html#command-archive-push // - https://www.postgresql.org/docs/current/runtime-config-wal.html - archive := `pgbackrest --stanza=` + DefaultStanzaName + ` archive-push "%p"` outParameters.Mandatory.Add("archive_mode", "on") - outParameters.Mandatory.Add("archive_command", archive) + if backupsEnabled { + archive := `pgbackrest --stanza=` + DefaultStanzaName + ` archive-push "%p"` + outParameters.Mandatory.Add("archive_command", archive) + } else { + // If backups are disabled, keep archive_mode on (to avoid a Postgres restart) + // and throw away WAL. + outParameters.Mandatory.Add("archive_command", `true`) + } // archive_timeout is used to determine at what point a WAL file is switched, // if the WAL archive has not reached its full size in # of transactions diff --git a/internal/pgbackrest/postgres_test.go b/internal/pgbackrest/postgres_test.go index 207dee991a..b87b35631a 100644 --- a/internal/pgbackrest/postgres_test.go +++ b/internal/pgbackrest/postgres_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbackrest @@ -28,7 +17,7 @@ func TestPostgreSQLParameters(t *testing.T) { cluster := new(v1beta1.PostgresCluster) parameters := new(postgres.Parameters) - PostgreSQL(cluster, parameters) + PostgreSQL(cluster, parameters, true) assert.DeepEqual(t, parameters.Mandatory.AsMap(), map[string]string{ "archive_mode": "on", "archive_command": `pgbackrest --stanza=db archive-push "%p"`, @@ -39,12 +28,19 @@ func TestPostgreSQLParameters(t *testing.T) { "archive_timeout": "60s", }) + PostgreSQL(cluster, parameters, false) + assert.DeepEqual(t, parameters.Mandatory.AsMap(), map[string]string{ + "archive_mode": "on", + "archive_command": "true", + "restore_command": `pgbackrest --stanza=db archive-get %f "%p"`, + }) + cluster.Spec.Standby = &v1beta1.PostgresStandbySpec{ Enabled: true, RepoName: "repo99", } - PostgreSQL(cluster, parameters) + PostgreSQL(cluster, parameters, true) assert.DeepEqual(t, parameters.Mandatory.AsMap(), map[string]string{ "archive_mode": "on", "archive_command": `pgbackrest --stanza=db archive-push "%p"`, diff --git a/internal/pgbackrest/rbac.go b/internal/pgbackrest/rbac.go index 490ac23a06..950f10ef8b 100644 --- a/internal/pgbackrest/rbac.go +++ b/internal/pgbackrest/rbac.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbackrest @@ -22,8 +11,8 @@ import ( "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) -// +kubebuilder:rbac:namespace=pgbackrest,groups="",resources="pods",verbs={list} -// +kubebuilder:rbac:namespace=pgbackrest,groups="",resources="pods/exec",verbs={create} +// +kubebuilder:rbac:groups="",resources="pods",verbs={list} +// +kubebuilder:rbac:groups="",resources="pods/exec",verbs={create} // Permissions returns the RBAC rules pgBackRest needs for a cluster. func Permissions(cluster *v1beta1.PostgresCluster) []rbacv1.PolicyRule { diff --git a/internal/pgbackrest/rbac_test.go b/internal/pgbackrest/rbac_test.go index 77cd705dc0..a620276f64 100644 --- a/internal/pgbackrest/rbac_test.go +++ b/internal/pgbackrest/rbac_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbackrest diff --git a/internal/pgbackrest/reconcile.go b/internal/pgbackrest/reconcile.go index 56402dda55..d22bccc3c0 100644 --- a/internal/pgbackrest/reconcile.go +++ b/internal/pgbackrest/reconcile.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbackrest @@ -24,11 +13,11 @@ import ( corev1 "k8s.io/api/core/v1" "github.com/crunchydata/postgres-operator/internal/config" + "github.com/crunchydata/postgres-operator/internal/feature" "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/internal/pki" "github.com/crunchydata/postgres-operator/internal/postgres" - "github.com/crunchydata/postgres-operator/internal/util" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -116,22 +105,15 @@ func AddConfigToInstancePod( {Key: ConfigHashKey, Path: ConfigHashKey}, } - // As the cluster transitions from having a repository host to having none, - // PostgreSQL instances that have not rolled out expect to mount client - // certificates. Specify those files are optional so the configuration - // volumes stay valid and Kubernetes propagates their contents to those pods. secret := corev1.VolumeProjection{Secret: &corev1.SecretProjection{}} secret.Secret.Name = naming.PGBackRestSecret(cluster).Name - secret.Secret.Optional = initialize.Bool(true) - if DedicatedRepoHostEnabled(cluster) { - configmap.ConfigMap.Items = append( - configmap.ConfigMap.Items, corev1.KeyToPath{ - Key: serverConfigMapKey, - Path: serverConfigProjectionPath, - }) - secret.Secret.Items = append(secret.Secret.Items, clientCertificates()...) - } + configmap.ConfigMap.Items = append( + configmap.ConfigMap.Items, corev1.KeyToPath{ + Key: serverConfigMapKey, + Path: serverConfigProjectionPath, + }) + secret.Secret.Items = append(secret.Secret.Items, clientCertificates()...) // Start with a copy of projections specified in the cluster. Items later in // the list take precedence over earlier items (that is, last write wins). @@ -206,18 +188,47 @@ func AddConfigToRestorePod( sources := append([]corev1.VolumeProjection{}, cluster.Spec.Backups.PGBackRest.Configuration...) - if cluster.Spec.DataSource != nil && - cluster.Spec.DataSource.PGBackRest != nil && - cluster.Spec.DataSource.PGBackRest.Configuration != nil { - sources = append(sources, cluster.Spec.DataSource.PGBackRest.Configuration...) - } - // For a PostgresCluster restore, append all pgBackRest configuration from - // the source cluster for the restore + // the source cluster for the restore. if sourceCluster != nil { sources = append(sources, sourceCluster.Spec.Backups.PGBackRest.Configuration...) } + // Currently the spec accepts a dataSource with both a PostgresCluster and + // a PGBackRest section. In that case only the PostgresCluster is honored (see + // internal/controller/postgrescluster/cluster.go, reconcileDataSource). + // + // `sourceCluster` is always nil for a cloud based restore (see + // internal/controller/postgrescluster/pgbackrest.go, reconcileCloudBasedDataSource). + // + // So, if `sourceCluster` is nil and `DataSource.PGBackRest` is not, + // this is a cloud based datasource restore and only the configuration from + // `dataSource.pgbackrest` section should be included. + if sourceCluster == nil && + cluster.Spec.DataSource != nil && + cluster.Spec.DataSource.PGBackRest != nil { + + sources = append([]corev1.VolumeProjection{}, + cluster.Spec.DataSource.PGBackRest.Configuration...) + } + + // mount any provided configuration files to the restore Job Pod + if len(cluster.Spec.Config.Files) != 0 { + additionalConfigVolumeMount := postgres.AdditionalConfigVolumeMount() + additionalConfigVolume := corev1.Volume{Name: additionalConfigVolumeMount.Name} + additionalConfigVolume.Projected = &corev1.ProjectedVolumeSource{ + Sources: append(sources, cluster.Spec.Config.Files...), + } + for i := range pod.Containers { + container := &pod.Containers[i] + + if container.Name == naming.PGBackRestRestoreContainerName { + container.VolumeMounts = append(container.VolumeMounts, additionalConfigVolumeMount) + } + } + pod.Volumes = append(pod.Volumes, additionalConfigVolume) + } + addConfigVolumeAndMounts(pod, append(sources, configmap, secret)) } @@ -260,6 +271,7 @@ func addConfigVolumeAndMounts( // addServerContainerAndVolume adds the TLS server container and certificate // projections to pod. Any PostgreSQL data and WAL volumes in pod are also mounted. func addServerContainerAndVolume( + ctx context.Context, cluster *v1beta1.PostgresCluster, pod *corev1.PodSpec, certificates []corev1.VolumeProjection, resources *corev1.ResourceRequirements, ) { @@ -303,7 +315,7 @@ func addServerContainerAndVolume( postgres.DataVolumeMount().Name: postgres.DataVolumeMount(), postgres.WALVolumeMount().Name: postgres.WALVolumeMount(), } - if util.DefaultMutableFeatureGate.Enabled(util.TablespaceVolumes) { + if feature.Enabled(ctx, feature.TablespaceVolumes) { for _, instance := range cluster.Spec.InstanceSets { for _, vol := range instance.TablespaceVolumes { tablespaceVolumeMount := postgres.TablespaceVolumeMount(vol.Name) @@ -341,6 +353,7 @@ func addServerContainerAndVolume( // AddServerToInstancePod adds the TLS server container and volume to pod for // an instance of cluster. Any PostgreSQL volumes must already be in pod. func AddServerToInstancePod( + ctx context.Context, cluster *v1beta1.PostgresCluster, pod *corev1.PodSpec, instanceCertificateSecretName string, ) { @@ -358,12 +371,13 @@ func AddServerToInstancePod( resources = sidecars.PGBackRest.Resources } - addServerContainerAndVolume(cluster, pod, certificates, resources) + addServerContainerAndVolume(ctx, cluster, pod, certificates, resources) } // AddServerToRepoPod adds the TLS server container and volume to pod for // the dedicated repository host of cluster. func AddServerToRepoPod( + ctx context.Context, cluster *v1beta1.PostgresCluster, pod *corev1.PodSpec, ) { certificates := []corev1.VolumeProjection{{ @@ -380,7 +394,7 @@ func AddServerToRepoPod( resources = &cluster.Spec.Backups.PGBackRest.RepoHost.Resources } - addServerContainerAndVolume(cluster, pod, certificates, resources) + addServerContainerAndVolume(ctx, cluster, pod, certificates, resources) } // InstanceCertificates populates the shared Secret with certificates needed to run pgBackRest. @@ -392,15 +406,13 @@ func InstanceCertificates(ctx context.Context, ) error { var err error - if DedicatedRepoHostEnabled(inCluster) { - initialize.ByteMap(&outInstanceCertificates.Data) + initialize.Map(&outInstanceCertificates.Data) - if err == nil { - outInstanceCertificates.Data[certInstanceSecretKey], err = certFile(inDNS) - } - if err == nil { - outInstanceCertificates.Data[certInstancePrivateKeySecretKey], err = certFile(inDNSKey) - } + if err == nil { + outInstanceCertificates.Data[certInstanceSecretKey], err = certFile(inDNS) + } + if err == nil { + outInstanceCertificates.Data[certInstancePrivateKeySecretKey], err = certFile(inDNSKey) } return err @@ -461,7 +473,7 @@ func RestoreConfig( sourceConfigMap, targetConfigMap *corev1.ConfigMap, sourceSecret, targetSecret *corev1.Secret, ) { - initialize.StringMap(&targetConfigMap.Data) + initialize.Map(&targetConfigMap.Data) // Use the repository definitions from the source cluster. // @@ -473,7 +485,7 @@ func RestoreConfig( targetConfigMap.Data[CMInstanceKey] = sourceConfigMap.Data[CMInstanceKey] if sourceSecret != nil && targetSecret != nil { - initialize.ByteMap(&targetSecret.Data) + initialize.Map(&targetSecret.Data) // - https://golang.org/issue/45038 bytesClone := func(b []byte) []byte { return append([]byte(nil), b...) } @@ -497,7 +509,7 @@ func Secret(ctx context.Context, // Save the CA and generate a TLS client certificate for the entire cluster. if inRepoHost != nil { - initialize.ByteMap(&outSecret.Data) + initialize.Map(&outSecret.Data) // The server verifies its "tls-server-auth" option contains the common // name (CN) of the certificate presented by a client. The entire diff --git a/internal/pgbackrest/reconcile_test.go b/internal/pgbackrest/reconcile_test.go index 1d33f79aac..4957d58f7b 100644 --- a/internal/pgbackrest/reconcile_test.go +++ b/internal/pgbackrest/reconcile_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbackrest @@ -28,9 +17,10 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/crunchydata/postgres-operator/internal/feature" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/internal/pki" - "github.com/crunchydata/postgres-operator/internal/util" + "github.com/crunchydata/postgres-operator/internal/testing/cmp" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -197,7 +187,7 @@ func TestAddConfigToInstancePod(t *testing.T) { assert.DeepEqual(t, pod, *result, cmpopts.IgnoreFields(pod, "Containers", "Volumes")) // Only database and pgBackRest containers have mounts. - assert.Assert(t, marshalMatches(result.Containers, ` + assert.Assert(t, cmp.MarshalMatches(result.Containers, ` - name: database resources: {} volumeMounts: @@ -229,7 +219,7 @@ func TestAddConfigToInstancePod(t *testing.T) { alwaysExpect(t, out) // Instance configuration files after custom projections. - assert.Assert(t, marshalMatches(out.Volumes, ` + assert.Assert(t, cmp.MarshalMatches(out.Volumes, ` - name: pgbackrest-config projected: sources: @@ -241,7 +231,19 @@ func TestAddConfigToInstancePod(t *testing.T) { path: pgbackrest_instance.conf - key: config-hash path: config-hash + - key: pgbackrest-server.conf + path: ~postgres-operator_server.conf name: hippo-pgbackrest-config + - secret: + items: + - key: pgbackrest.ca-roots + path: ~postgres-operator/tls-ca.crt + - key: pgbackrest-client.crt + path: ~postgres-operator/client-tls.crt + - key: pgbackrest-client.key + mode: 384 + path: ~postgres-operator/client-tls.key + name: hippo-pgbackrest `)) }) @@ -253,8 +255,8 @@ func TestAddConfigToInstancePod(t *testing.T) { AddConfigToInstancePod(cluster, out) alwaysExpect(t, out) - // Instance configuration files but no certificates. - assert.Assert(t, marshalMatches(out.Volumes, ` + // Instance configuration and certificates. + assert.Assert(t, cmp.MarshalMatches(out.Volumes, ` - name: pgbackrest-config projected: sources: @@ -264,7 +266,19 @@ func TestAddConfigToInstancePod(t *testing.T) { path: pgbackrest_instance.conf - key: config-hash path: config-hash + - key: pgbackrest-server.conf + path: ~postgres-operator_server.conf name: hippo-pgbackrest-config + - secret: + items: + - key: pgbackrest.ca-roots + path: ~postgres-operator/tls-ca.crt + - key: pgbackrest-client.crt + path: ~postgres-operator/client-tls.crt + - key: pgbackrest-client.key + mode: 384 + path: ~postgres-operator/client-tls.key + name: hippo-pgbackrest `)) }) @@ -282,7 +296,7 @@ func TestAddConfigToInstancePod(t *testing.T) { alwaysExpect(t, out) // Instance configuration files, server config, and optional client certificates. - assert.Assert(t, marshalMatches(out.Volumes, ` + assert.Assert(t, cmp.MarshalMatches(out.Volumes, ` - name: pgbackrest-config projected: sources: @@ -305,7 +319,6 @@ func TestAddConfigToInstancePod(t *testing.T) { mode: 384 path: ~postgres-operator/client-tls.key name: hippo-pgbackrest - optional: true `)) }) } @@ -327,7 +340,7 @@ func TestAddConfigToRepoPod(t *testing.T) { assert.DeepEqual(t, pod, *result, cmpopts.IgnoreFields(pod, "Containers", "Volumes")) // Only pgBackRest containers have mounts. - assert.Assert(t, marshalMatches(result.Containers, ` + assert.Assert(t, cmp.MarshalMatches(result.Containers, ` - name: other resources: {} - name: pgbackrest @@ -354,7 +367,7 @@ func TestAddConfigToRepoPod(t *testing.T) { // Repository configuration files, server config, and client certificates // after custom projections. - assert.Assert(t, marshalMatches(out.Volumes, ` + assert.Assert(t, cmp.MarshalMatches(out.Volumes, ` - name: pgbackrest-config projected: sources: @@ -400,7 +413,7 @@ func TestAddConfigToRestorePod(t *testing.T) { assert.DeepEqual(t, pod, *result, cmpopts.IgnoreFields(pod, "Containers", "Volumes")) // Only pgBackRest containers have mounts. - assert.Assert(t, marshalMatches(result.Containers, ` + assert.Assert(t, cmp.MarshalMatches(result.Containers, ` - name: other resources: {} - name: pgbackrest @@ -435,7 +448,7 @@ func TestAddConfigToRestorePod(t *testing.T) { // Instance configuration files and optional client certificates // after custom projections. - assert.Assert(t, marshalMatches(out.Volumes, ` + assert.Assert(t, cmp.MarshalMatches(out.Volumes, ` - name: pgbackrest-config projected: sources: @@ -479,7 +492,7 @@ func TestAddConfigToRestorePod(t *testing.T) { // Instance configuration files and optional client certificates // after custom projections. - assert.Assert(t, marshalMatches(out.Volumes, ` + assert.Assert(t, cmp.MarshalMatches(out.Volumes, ` - name: pgbackrest-config projected: sources: @@ -503,9 +516,57 @@ func TestAddConfigToRestorePod(t *testing.T) { optional: true `)) }) + + t.Run("CustomFiles", func(t *testing.T) { + custom := corev1.ConfigMapProjection{} + custom.Name = "custom-configmap-files" + + cluster := cluster.DeepCopy() + cluster.Spec.Config.Files = []corev1.VolumeProjection{ + {ConfigMap: &custom}, + } + + sourceCluster := cluster.DeepCopy() + + out := pod.DeepCopy() + AddConfigToRestorePod(cluster, sourceCluster, out) + alwaysExpect(t, out) + + // Instance configuration files and optional configuration files + // after custom projections. + assert.Assert(t, cmp.MarshalMatches(out.Volumes, ` +- name: postgres-config + projected: + sources: + - configMap: + name: custom-configmap-files +- name: pgbackrest-config + projected: + sources: + - configMap: + items: + - key: pgbackrest_instance.conf + path: pgbackrest_instance.conf + name: source-pgbackrest-config + - secret: + items: + - key: pgbackrest.ca-roots + path: ~postgres-operator/tls-ca.crt + - key: pgbackrest-client.crt + path: ~postgres-operator/client-tls.crt + - key: pgbackrest-client.key + mode: 384 + path: ~postgres-operator/client-tls.key + name: source-pgbackrest + optional: true + `)) + }) } func TestAddServerToInstancePod(t *testing.T) { + t.Parallel() + + ctx := context.Background() cluster := v1beta1.PostgresCluster{} cluster.Name = "hippo" cluster.Default() @@ -523,7 +584,6 @@ func TestAddServerToInstancePod(t *testing.T) { } t.Run("CustomResources", func(t *testing.T) { - assert.NilError(t, util.AddAndSetFeatureGates(string(util.TablespaceVolumes+"=false"))) cluster := cluster.DeepCopy() cluster.Spec.Backups.PGBackRest.Sidecars = &v1beta1.PGBackRestSidecars{ PGBackRest: &v1beta1.Sidecar{ @@ -543,14 +603,14 @@ func TestAddServerToInstancePod(t *testing.T) { } out := pod.DeepCopy() - AddServerToInstancePod(cluster, out, "instance-secret-name") + AddServerToInstancePod(ctx, cluster, out, "instance-secret-name") // Only Containers and Volumes fields have changed. assert.DeepEqual(t, pod, *out, cmpopts.IgnoreFields(pod, "Containers", "Volumes")) // The TLS server is added while other containers are untouched. // It has PostgreSQL volumes mounted while other volumes are ignored. - assert.Assert(t, marshalMatches(out.Containers, ` + assert.Assert(t, cmp.MarshalMatches(out.Containers, ` - name: database resources: {} - name: other @@ -575,6 +635,8 @@ func TestAddServerToInstancePod(t *testing.T) { privileged: false readOnlyRootFilesystem: true runAsNonRoot: true + seccompProfile: + type: RuntimeDefault volumeMounts: - mountPath: /etc/pgbackrest/server name: pgbackrest-server @@ -589,21 +651,21 @@ func TestAddServerToInstancePod(t *testing.T) { - -- - |- monitor() { - exec {fd}<> <(:) + exec {fd}<> <(:||:) until read -r -t 5 -u "${fd}"; do if - [ "${filename}" -nt "/proc/self/fd/${fd}" ] && + [[ "${filename}" -nt "/proc/self/fd/${fd}" ]] && pkill -HUP --exact --parent=0 pgbackrest then - exec {fd}>&- && exec {fd}<> <(:) + exec {fd}>&- && exec {fd}<> <(:||:) stat --dereference --format='Loaded configuration dated %y' "${filename}" elif - { [ "${directory}" -nt "/proc/self/fd/${fd}" ] || - [ "${authority}" -nt "/proc/self/fd/${fd}" ] + { [[ "${directory}" -nt "/proc/self/fd/${fd}" ]] || + [[ "${authority}" -nt "/proc/self/fd/${fd}" ]] } && pkill -HUP --exact --parent=0 pgbackrest then - exec {fd}>&- && exec {fd}<> <(:) + exec {fd}>&- && exec {fd}<> <(:||:) stat --format='Loaded certificates dated %y' "${directory}" fi done @@ -624,6 +686,8 @@ func TestAddServerToInstancePod(t *testing.T) { privileged: false readOnlyRootFilesystem: true runAsNonRoot: true + seccompProfile: + type: RuntimeDefault volumeMounts: - mountPath: /etc/pgbackrest/server name: pgbackrest-server @@ -632,7 +696,7 @@ func TestAddServerToInstancePod(t *testing.T) { // The server certificate comes from the instance Secret. // Other volumes are untouched. - assert.Assert(t, marshalMatches(out.Volumes, ` + assert.Assert(t, cmp.MarshalMatches(out.Volumes, ` - name: other - name: postgres-data - name: postgres-wal @@ -651,7 +715,12 @@ func TestAddServerToInstancePod(t *testing.T) { }) t.Run("AddTablespaces", func(t *testing.T) { - assert.NilError(t, util.AddAndSetFeatureGates(string(util.TablespaceVolumes+"=true"))) + gate := feature.NewGate() + assert.NilError(t, gate.SetFromMap(map[string]bool{ + feature.TablespaceVolumes: true, + })) + ctx := feature.NewContext(ctx, gate) + clusterWithTablespaces := cluster.DeepCopy() clusterWithTablespaces.Spec.InstanceSets = []v1beta1.PostgresInstanceSetSpec{ { @@ -664,11 +733,11 @@ func TestAddServerToInstancePod(t *testing.T) { out := pod.DeepCopy() out.Volumes = append(out.Volumes, corev1.Volume{Name: "tablespace-trial"}, corev1.Volume{Name: "tablespace-castle"}) - AddServerToInstancePod(clusterWithTablespaces, out, "instance-secret-name") + AddServerToInstancePod(ctx, clusterWithTablespaces, out, "instance-secret-name") // Only Containers and Volumes fields have changed. assert.DeepEqual(t, pod, *out, cmpopts.IgnoreFields(pod, "Containers", "Volumes")) - assert.Assert(t, marshalMatches(out.Containers, ` + assert.Assert(t, cmp.MarshalMatches(out.Containers, ` - name: database resources: {} - name: other @@ -691,6 +760,8 @@ func TestAddServerToInstancePod(t *testing.T) { privileged: false readOnlyRootFilesystem: true runAsNonRoot: true + seccompProfile: + type: RuntimeDefault volumeMounts: - mountPath: /etc/pgbackrest/server name: pgbackrest-server @@ -709,21 +780,21 @@ func TestAddServerToInstancePod(t *testing.T) { - -- - |- monitor() { - exec {fd}<> <(:) + exec {fd}<> <(:||:) until read -r -t 5 -u "${fd}"; do if - [ "${filename}" -nt "/proc/self/fd/${fd}" ] && + [[ "${filename}" -nt "/proc/self/fd/${fd}" ]] && pkill -HUP --exact --parent=0 pgbackrest then - exec {fd}>&- && exec {fd}<> <(:) + exec {fd}>&- && exec {fd}<> <(:||:) stat --dereference --format='Loaded configuration dated %y' "${filename}" elif - { [ "${directory}" -nt "/proc/self/fd/${fd}" ] || - [ "${authority}" -nt "/proc/self/fd/${fd}" ] + { [[ "${directory}" -nt "/proc/self/fd/${fd}" ]] || + [[ "${authority}" -nt "/proc/self/fd/${fd}" ]] } && pkill -HUP --exact --parent=0 pgbackrest then - exec {fd}>&- && exec {fd}<> <(:) + exec {fd}>&- && exec {fd}<> <(:||:) stat --format='Loaded certificates dated %y' "${directory}" fi done @@ -742,6 +813,8 @@ func TestAddServerToInstancePod(t *testing.T) { privileged: false readOnlyRootFilesystem: true runAsNonRoot: true + seccompProfile: + type: RuntimeDefault volumeMounts: - mountPath: /etc/pgbackrest/server name: pgbackrest-server @@ -751,6 +824,9 @@ func TestAddServerToInstancePod(t *testing.T) { } func TestAddServerToRepoPod(t *testing.T) { + t.Parallel() + + ctx := context.Background() cluster := v1beta1.PostgresCluster{} cluster.Name = "hippo" cluster.Default() @@ -781,13 +857,13 @@ func TestAddServerToRepoPod(t *testing.T) { } out := pod.DeepCopy() - AddServerToRepoPod(cluster, out) + AddServerToRepoPod(ctx, cluster, out) // Only Containers and Volumes fields have changed. assert.DeepEqual(t, pod, *out, cmpopts.IgnoreFields(pod, "Containers", "Volumes")) // The TLS server is added while other containers are untouched. - assert.Assert(t, marshalMatches(out.Containers, ` + assert.Assert(t, cmp.MarshalMatches(out.Containers, ` - name: other resources: {} - command: @@ -810,6 +886,8 @@ func TestAddServerToRepoPod(t *testing.T) { privileged: false readOnlyRootFilesystem: true runAsNonRoot: true + seccompProfile: + type: RuntimeDefault volumeMounts: - mountPath: /etc/pgbackrest/server name: pgbackrest-server @@ -820,21 +898,21 @@ func TestAddServerToRepoPod(t *testing.T) { - -- - |- monitor() { - exec {fd}<> <(:) + exec {fd}<> <(:||:) until read -r -t 5 -u "${fd}"; do if - [ "${filename}" -nt "/proc/self/fd/${fd}" ] && + [[ "${filename}" -nt "/proc/self/fd/${fd}" ]] && pkill -HUP --exact --parent=0 pgbackrest then - exec {fd}>&- && exec {fd}<> <(:) + exec {fd}>&- && exec {fd}<> <(:||:) stat --dereference --format='Loaded configuration dated %y' "${filename}" elif - { [ "${directory}" -nt "/proc/self/fd/${fd}" ] || - [ "${authority}" -nt "/proc/self/fd/${fd}" ] + { [[ "${directory}" -nt "/proc/self/fd/${fd}" ]] || + [[ "${authority}" -nt "/proc/self/fd/${fd}" ]] } && pkill -HUP --exact --parent=0 pgbackrest then - exec {fd}>&- && exec {fd}<> <(:) + exec {fd}>&- && exec {fd}<> <(:||:) stat --format='Loaded certificates dated %y' "${directory}" fi done @@ -855,6 +933,8 @@ func TestAddServerToRepoPod(t *testing.T) { privileged: false readOnlyRootFilesystem: true runAsNonRoot: true + seccompProfile: + type: RuntimeDefault volumeMounts: - mountPath: /etc/pgbackrest/server name: pgbackrest-server @@ -862,7 +942,7 @@ func TestAddServerToRepoPod(t *testing.T) { `)) // The server certificate comes from the pgBackRest Secret. - assert.Assert(t, marshalMatches(out.Volumes, ` + assert.Assert(t, cmp.MarshalMatches(out.Volumes, ` - name: pgbackrest-server projected: sources: diff --git a/internal/pgbackrest/restore.md b/internal/pgbackrest/restore.md index c51736e606..8828576921 100644 --- a/internal/pgbackrest/restore.md +++ b/internal/pgbackrest/restore.md @@ -1,16 +1,7 @@ ## Target Action diff --git a/internal/pgbackrest/tls-server.md b/internal/pgbackrest/tls-server.md index 1eeef5a438..b572cc1ea4 100644 --- a/internal/pgbackrest/tls-server.md +++ b/internal/pgbackrest/tls-server.md @@ -1,16 +1,7 @@ # pgBackRest TLS Server @@ -21,8 +12,10 @@ on different pods: - [dedicated repository host](https://pgbackrest.org/user-guide.html#repo-host) - [backup from standby](https://pgbackrest.org/user-guide.html#standby-backup) -When a PostgresCluster is configured to store backups on a PVC, we start a dedicated -repository host to make that PVC available to all PostgreSQL instances in the cluster. +When a PostgresCluster is configured to store backups on a PVC, the dedicated +repository host is used to make that PVC available to all PostgreSQL instances +in the cluster. Regardless of whether the repo host has a defined PVC, it +functions as the server for the pgBackRest clients that run on the Instances. The repository host runs a `pgbackrest` server that is secured through TLS and [certificates][]. When performing backups, it connects to `pgbackrest` servers diff --git a/internal/pgbackrest/util.go b/internal/pgbackrest/util.go index 3e2af44c1d..4fc2266c56 100644 --- a/internal/pgbackrest/util.go +++ b/internal/pgbackrest/util.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbackrest @@ -30,9 +19,9 @@ import ( // multi-repository solution implemented within pgBackRest const maxPGBackrestRepos = 4 -// DedicatedRepoHostEnabled determines whether not a pgBackRest dedicated repository host is -// enabled according to the provided PostgresCluster -func DedicatedRepoHostEnabled(postgresCluster *v1beta1.PostgresCluster) bool { +// RepoHostVolumeDefined determines whether not at least one pgBackRest dedicated +// repository host volume has been defined in the PostgresCluster manifest. +func RepoHostVolumeDefined(postgresCluster *v1beta1.PostgresCluster) bool { for _, repo := range postgresCluster.Spec.Backups.PGBackRest.Repos { if repo.Volume != nil { return true diff --git a/internal/pgbackrest/util_test.go b/internal/pgbackrest/util_test.go index 861de0e5e1..eb0f4dec29 100644 --- a/internal/pgbackrest/util_test.go +++ b/internal/pgbackrest/util_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbackrest diff --git a/internal/pgbouncer/assertions_test.go b/internal/pgbouncer/assertions_test.go deleted file mode 100644 index b73fa57783..0000000000 --- a/internal/pgbouncer/assertions_test.go +++ /dev/null @@ -1,25 +0,0 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package pgbouncer - -import ( - "github.com/crunchydata/postgres-operator/internal/testing/cmp" -) - -// marshalMatches converts actual to YAML and compares that to expected. -func marshalMatches(actual interface{}, expected string) cmp.Comparison { - return cmp.MarshalMatches(actual, expected) -} diff --git a/internal/pgbouncer/certificates.go b/internal/pgbouncer/certificates.go index be9c48175d..31f91c503a 100644 --- a/internal/pgbouncer/certificates.go +++ b/internal/pgbouncer/certificates.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbouncer diff --git a/internal/pgbouncer/certificates_test.go b/internal/pgbouncer/certificates_test.go index b77ce4ad23..5955c3de9c 100644 --- a/internal/pgbouncer/certificates_test.go +++ b/internal/pgbouncer/certificates_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbouncer @@ -20,6 +9,8 @@ import ( "gotest.tools/v3/assert" corev1 "k8s.io/api/core/v1" + + "github.com/crunchydata/postgres-operator/internal/testing/cmp" ) func TestBackendAuthority(t *testing.T) { @@ -27,7 +18,7 @@ func TestBackendAuthority(t *testing.T) { projection := &corev1.SecretProjection{ LocalObjectReference: corev1.LocalObjectReference{Name: "some-name"}, } - assert.Assert(t, marshalMatches(backendAuthority(projection), ` + assert.Assert(t, cmp.MarshalMatches(backendAuthority(projection), ` secret: items: - key: ca.crt @@ -40,7 +31,7 @@ secret: {Key: "some-crt-key", Path: "tls.crt"}, {Key: "some-ca-key", Path: "ca.crt"}, } - assert.Assert(t, marshalMatches(backendAuthority(projection), ` + assert.Assert(t, cmp.MarshalMatches(backendAuthority(projection), ` secret: items: - key: some-ca-key @@ -54,7 +45,7 @@ func TestFrontendCertificate(t *testing.T) { secret.Name = "op-secret" t.Run("Generated", func(t *testing.T) { - assert.Assert(t, marshalMatches(frontendCertificate(nil, secret), ` + assert.Assert(t, cmp.MarshalMatches(frontendCertificate(nil, secret), ` secret: items: - key: pgbouncer-frontend.ca-roots @@ -72,7 +63,7 @@ secret: custom.Name = "some-other" // No items; assume Key matches Path. - assert.Assert(t, marshalMatches(frontendCertificate(custom, secret), ` + assert.Assert(t, cmp.MarshalMatches(frontendCertificate(custom, secret), ` secret: items: - key: ca.crt @@ -91,7 +82,7 @@ secret: {Key: "some-cert-key", Path: "tls.crt"}, {Key: "some-key-key", Path: "tls.key"}, } - assert.Assert(t, marshalMatches(frontendCertificate(custom, secret), ` + assert.Assert(t, cmp.MarshalMatches(frontendCertificate(custom, secret), ` secret: items: - key: some-ca-key diff --git a/internal/pgbouncer/config.go b/internal/pgbouncer/config.go index 3a3753e6bb..a203144817 100644 --- a/internal/pgbouncer/config.go +++ b/internal/pgbouncer/config.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbouncer @@ -250,11 +239,11 @@ func reloadCommand(name string) []string { // mtimes. // - https://unix.stackexchange.com/a/407383 const script = ` -exec {fd}<> <(:) -while read -r -t 5 -u "${fd}" || true; do - if [ "${directory}" -nt "/proc/self/fd/${fd}" ] && pkill -HUP --exact pgbouncer +exec {fd}<> <(:||:) +while read -r -t 5 -u "${fd}" ||:; do + if [[ "${directory}" -nt "/proc/self/fd/${fd}" ]] && pkill -HUP --exact pgbouncer then - exec {fd}>&- && exec {fd}<> <(:) + exec {fd}>&- && exec {fd}<> <(:||:) stat --format='Loaded configuration dated %y' "${directory}" fi done diff --git a/internal/pgbouncer/config.md b/internal/pgbouncer/config.md index 55b0f83b40..abfec12518 100644 --- a/internal/pgbouncer/config.md +++ b/internal/pgbouncer/config.md @@ -1,16 +1,7 @@ PgBouncer is configured through INI files. It will reload these files when diff --git a/internal/pgbouncer/config_test.go b/internal/pgbouncer/config_test.go index 5aee24f677..7a96da571c 100644 --- a/internal/pgbouncer/config_test.go +++ b/internal/pgbouncer/config_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbouncer @@ -27,6 +16,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/yaml" + "github.com/crunchydata/postgres-operator/internal/testing/cmp" "github.com/crunchydata/postgres-operator/internal/testing/require" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -150,7 +140,7 @@ func TestPodConfigFiles(t *testing.T) { t.Run("Default", func(t *testing.T) { projections := podConfigFiles(config, configmap, secret) - assert.Assert(t, marshalMatches(projections, ` + assert.Assert(t, cmp.MarshalMatches(projections, ` - configMap: items: - key: pgbouncer-empty @@ -183,7 +173,7 @@ func TestPodConfigFiles(t *testing.T) { } projections := podConfigFiles(config, configmap, secret) - assert.Assert(t, marshalMatches(projections, ` + assert.Assert(t, cmp.MarshalMatches(projections, ` - configMap: items: - key: pgbouncer-empty diff --git a/internal/pgbouncer/postgres.go b/internal/pgbouncer/postgres.go index bc92ab8042..cbc2e29916 100644 --- a/internal/pgbouncer/postgres.go +++ b/internal/pgbouncer/postgres.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbouncer diff --git a/internal/pgbouncer/postgres_test.go b/internal/pgbouncer/postgres_test.go index f79292f984..f2ce419753 100644 --- a/internal/pgbouncer/postgres_test.go +++ b/internal/pgbouncer/postgres_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbouncer diff --git a/internal/pgbouncer/reconcile.go b/internal/pgbouncer/reconcile.go index 56fc53a669..999d6524a5 100644 --- a/internal/pgbouncer/reconcile.go +++ b/internal/pgbouncer/reconcile.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbouncer @@ -23,11 +12,11 @@ import ( "k8s.io/apimachinery/pkg/api/resource" "github.com/crunchydata/postgres-operator/internal/config" + "github.com/crunchydata/postgres-operator/internal/feature" "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/internal/pki" "github.com/crunchydata/postgres-operator/internal/postgres" - "github.com/crunchydata/postgres-operator/internal/util" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -41,7 +30,7 @@ func ConfigMap( return } - initialize.StringMap(&outConfigMap.Data) + initialize.Map(&outConfigMap.Data) outConfigMap.Data[emptyConfigMapKey] = "" outConfigMap.Data[iniFileConfigMapKey] = clusterINI(inCluster) @@ -61,7 +50,7 @@ func Secret(ctx context.Context, } var err error - initialize.ByteMap(&outSecret.Data) + initialize.Map(&outSecret.Data) // Use the existing password and verifier. Generate both when either is missing. // NOTE(cbandy): We don't have a function to compare a plaintext password @@ -114,6 +103,7 @@ func Secret(ctx context.Context, // Pod populates a PodSpec with the container and volumes needed to run PgBouncer. func Pod( + ctx context.Context, inCluster *v1beta1.PostgresCluster, inConfigMap *corev1.ConfigMap, inPostgreSQLCertificate *corev1.SecretProjection, @@ -191,7 +181,7 @@ func Pod( // If the PGBouncerSidecars feature gate is enabled and custom pgBouncer // sidecars are defined, add the defined container to the Pod. - if util.DefaultMutableFeatureGate.Enabled(util.PGBouncerSidecars) && + if feature.Enabled(ctx, feature.PGBouncerSidecars) && inCluster.Spec.Proxy.PGBouncer.Containers != nil { outPod.Containers = append(outPod.Containers, inCluster.Spec.Proxy.PGBouncer.Containers...) } diff --git a/internal/pgbouncer/reconcile_test.go b/internal/pgbouncer/reconcile_test.go index 832c736e8b..a53de8cf64 100644 --- a/internal/pgbouncer/reconcile_test.go +++ b/internal/pgbouncer/reconcile_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbouncer @@ -19,14 +8,15 @@ import ( "context" "testing" - "github.com/google/go-cmp/cmp" + gocmp "github.com/google/go-cmp/cmp" "gotest.tools/v3/assert" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" + "github.com/crunchydata/postgres-operator/internal/feature" "github.com/crunchydata/postgres-operator/internal/pki" "github.com/crunchydata/postgres-operator/internal/postgres" - "github.com/crunchydata/postgres-operator/internal/util" + "github.com/crunchydata/postgres-operator/internal/testing/cmp" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -103,8 +93,8 @@ func TestSecret(t *testing.T) { func TestPod(t *testing.T) { t.Parallel() - // Initialize the feature gate - assert.NilError(t, util.AddAndSetFeatureGates("")) + features := feature.NewGate() + ctx := feature.NewContext(context.Background(), features) cluster := new(v1beta1.PostgresCluster) configMap := new(corev1.ConfigMap) @@ -112,7 +102,7 @@ func TestPod(t *testing.T) { secret := new(corev1.Secret) pod := new(corev1.PodSpec) - call := func() { Pod(cluster, configMap, primaryCertificate, secret, pod) } + call := func() { Pod(ctx, cluster, configMap, primaryCertificate, secret, pod) } t.Run("Disabled", func(t *testing.T) { before := pod.DeepCopy() @@ -129,7 +119,7 @@ func TestPod(t *testing.T) { call() - assert.Assert(t, marshalMatches(pod, ` + assert.Assert(t, cmp.MarshalMatches(pod, ` containers: - command: - pgbouncer @@ -148,6 +138,8 @@ containers: privileged: false readOnlyRootFilesystem: true runAsNonRoot: true + seccompProfile: + type: RuntimeDefault volumeMounts: - mountPath: /etc/pgbouncer name: pgbouncer-config @@ -158,11 +150,11 @@ containers: - -- - |- monitor() { - exec {fd}<> <(:) - while read -r -t 5 -u "${fd}" || true; do - if [ "${directory}" -nt "/proc/self/fd/${fd}" ] && pkill -HUP --exact pgbouncer + exec {fd}<> <(:||:) + while read -r -t 5 -u "${fd}" ||:; do + if [[ "${directory}" -nt "/proc/self/fd/${fd}" ]] && pkill -HUP --exact pgbouncer then - exec {fd}>&- && exec {fd}<> <(:) + exec {fd}>&- && exec {fd}<> <(:||:) stat --format='Loaded configuration dated %y' "${directory}" fi done @@ -179,6 +171,8 @@ containers: privileged: false readOnlyRootFilesystem: true runAsNonRoot: true + seccompProfile: + type: RuntimeDefault volumeMounts: - mountPath: /etc/pgbouncer name: pgbouncer-config @@ -235,7 +229,7 @@ volumes: call() - assert.Assert(t, marshalMatches(pod, ` + assert.Assert(t, cmp.MarshalMatches(pod, ` containers: - command: - pgbouncer @@ -258,6 +252,8 @@ containers: privileged: false readOnlyRootFilesystem: true runAsNonRoot: true + seccompProfile: + type: RuntimeDefault volumeMounts: - mountPath: /etc/pgbouncer name: pgbouncer-config @@ -268,11 +264,11 @@ containers: - -- - |- monitor() { - exec {fd}<> <(:) - while read -r -t 5 -u "${fd}" || true; do - if [ "${directory}" -nt "/proc/self/fd/${fd}" ] && pkill -HUP --exact pgbouncer + exec {fd}<> <(:||:) + while read -r -t 5 -u "${fd}" ||:; do + if [[ "${directory}" -nt "/proc/self/fd/${fd}" ]] && pkill -HUP --exact pgbouncer then - exec {fd}>&- && exec {fd}<> <(:) + exec {fd}>&- && exec {fd}<> <(:||:) stat --format='Loaded configuration dated %y' "${directory}" fi done @@ -294,6 +290,8 @@ containers: privileged: false readOnlyRootFilesystem: true runAsNonRoot: true + seccompProfile: + type: RuntimeDefault volumeMounts: - mountPath: /etc/pgbouncer name: pgbouncer-config @@ -341,7 +339,7 @@ volumes: call() - assert.Assert(t, marshalMatches(pod, ` + assert.Assert(t, cmp.MarshalMatches(pod, ` containers: - command: - pgbouncer @@ -364,6 +362,8 @@ containers: privileged: false readOnlyRootFilesystem: true runAsNonRoot: true + seccompProfile: + type: RuntimeDefault volumeMounts: - mountPath: /etc/pgbouncer name: pgbouncer-config @@ -374,11 +374,11 @@ containers: - -- - |- monitor() { - exec {fd}<> <(:) - while read -r -t 5 -u "${fd}" || true; do - if [ "${directory}" -nt "/proc/self/fd/${fd}" ] && pkill -HUP --exact pgbouncer + exec {fd}<> <(:||:) + while read -r -t 5 -u "${fd}" ||:; do + if [[ "${directory}" -nt "/proc/self/fd/${fd}" ]] && pkill -HUP --exact pgbouncer then - exec {fd}>&- && exec {fd}<> <(:) + exec {fd}>&- && exec {fd}<> <(:||:) stat --format='Loaded configuration dated %y' "${directory}" fi done @@ -399,6 +399,8 @@ containers: privileged: false readOnlyRootFilesystem: true runAsNonRoot: true + seccompProfile: + type: RuntimeDefault volumeMounts: - mountPath: /etc/pgbouncer name: pgbouncer-config @@ -445,7 +447,9 @@ volumes: }) t.Run("SidecarEnabled", func(t *testing.T) { - assert.NilError(t, util.AddAndSetFeatureGates(string(util.PGBouncerSidecars+"=true"))) + assert.NilError(t, features.SetFromMap(map[string]bool{ + feature.PGBouncerSidecars: true, + })) call() assert.Equal(t, len(pod.Containers), 3, "expected 3 containers in Pod, got %d", len(pod.Containers)) @@ -487,6 +491,6 @@ func TestPostgreSQL(t *testing.T) { Mandatory: postgresqlHBAs(), }, // postgres.HostBasedAuthentication has unexported fields. Call String() to compare. - cmp.Transformer("", postgres.HostBasedAuthentication.String)) + gocmp.Transformer("", postgres.HostBasedAuthentication.String)) }) } diff --git a/internal/pgmonitor/exporter.go b/internal/pgmonitor/exporter.go index 4c19bee1f7..9d7a1fc3c6 100644 --- a/internal/pgmonitor/exporter.go +++ b/internal/pgmonitor/exporter.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgmonitor @@ -43,9 +32,8 @@ const ( // postgres_exporter command flags var ( - ExporterExtendQueryPathFlag = "--extend.query-path=/tmp/queries.yml" - ExporterWebListenAddressFlag = fmt.Sprintf("--web.listen-address=:%d", ExporterPort) - ExporterWebConfigFileFlag = "--web.config.file=/web-config/web-config.yml" + ExporterWebConfigFileFlag = "--web.config.file=/web-config/web-config.yml" + ExporterDeactivateStatBGWriterFlag = "--no-collector.stat_bgwriter" ) // Defaults for certain values used in queries.yml @@ -120,51 +108,76 @@ func GenerateDefaultExporterQueries(ctx context.Context, cluster *v1beta1.Postgr // ExporterStartCommand generates an entrypoint that will create a master queries file and // start the postgres_exporter. It will repeat those steps if it notices a change in // the source queries files. -func ExporterStartCommand(commandFlags []string) []string { - script := strings.Join([]string{ +func ExporterStartCommand(builtinCollectors bool, commandFlags ...string) []string { + script := []string{ // Older images do not have the command on the PATH. `PATH="$PATH:$(echo /opt/cpm/bin/postgres_exporter-*)"`, // Set up temporary file to hold postgres_exporter process id `POSTGRES_EXPORTER_PIDFILE=/tmp/postgres_exporter.pid`, + `postgres_exporter_flags=(`, + `'--extend.query-path=/tmp/queries.yml'`, + fmt.Sprintf(`'--web.listen-address=:%d'`, ExporterPort), + `"$@")`, + } + + // Append flags that disable built-in collectors. Find flags in the help + // output and return them with "--[no-]" replaced by "--no-" or "--". + if !builtinCollectors { + script = append(script, + `postgres_exporter_flags+=($(`, + `postgres_exporter --help 2>&1 | while read -r w _; do case "${w}" in`, + `'--[no-]collector.'*) echo "--no-${w#*-]}";;`, + `'--[no-]disable'*'metrics') echo "--${w#*-]}";;`, + `esac; done))`, + ) + } + + script = append(script, // declare function that will combine custom queries file and default // queries and start the postgres_exporter `start_postgres_exporter() {`, - ` cat /conf/* > /tmp/queries.yml`, - ` echo "Starting postgres_exporter with the following flags..."`, - ` echo "$@"`, - ` postgres_exporter "$@" &`, - ` echo $! > $POSTGRES_EXPORTER_PIDFILE`, + ` cat /conf/* > /tmp/queries.yml`, + ` echo "Starting postgres_exporter with the following flags..."`, + ` echo "${postgres_exporter_flags[@]}"`, + ` postgres_exporter "${postgres_exporter_flags[@]}" &`, + ` echo $! > $POSTGRES_EXPORTER_PIDFILE`, `}`, // run function to combine queries files and start postgres_exporter - `start_postgres_exporter "$@"`, + `start_postgres_exporter`, // Create a file descriptor with a no-op process that will not get // cleaned up - `exec {fd}<> <(:)`, + `exec {fd}<> <(:||:)`, // Set up loop. Use read's timeout setting instead of sleep, // which uses up a lot of memory - `while read -r -t 3 -u "${fd}" || true; do`, + `while read -r -t 3 -u "${fd}" ||:; do`, // If either directories' modify time is newer than our file descriptor's, - // something must have changed, so kill the postgres_exporter and rerun - // the function to combine queries files and start postgres_exporter + // something must have changed, so kill the postgres_exporter ` if ([ "/conf" -nt "/proc/self/fd/${fd}" ] || [ "/opt/crunchy/password" -nt "/proc/self/fd/${fd}" ]) \`, - ` && kill $(head -1 ${POSTGRES_EXPORTER_PIDFILE?}) && start_postgres_exporter "$@";`, + ` && kill $(head -1 ${POSTGRES_EXPORTER_PIDFILE?});`, ` then`, - // When something changes we want to get rid of the old file descriptor, get a fresh one // and restart the loop ` echo "Something changed..."`, - ` exec {fd}>&- && exec {fd}<> <(:)`, + ` exec {fd}>&- && exec {fd}<> <(:||:)`, ` stat --format='Latest queries file dated %y' "/conf"`, ` stat --format='Latest password file dated %y' "/opt/crunchy/password"`, ` fi`, + + // If postgres_exporter is not running, restart it + // Use the recorded pid as a proxy for checking if postgres_exporter is running + ` if [[ ! -e /proc/$(head -1 ${POSTGRES_EXPORTER_PIDFILE?}) ]] ; then`, + ` start_postgres_exporter`, + ` fi`, `done`, - }, "\n") + ) - return append([]string{"bash", "-ceu", "--", script, "postgres_exporter_watcher"}, commandFlags...) + return append([]string{ + "bash", "-ceu", "--", strings.Join(script, "\n"), "postgres_exporter_watcher", + }, commandFlags...) } diff --git a/internal/pgmonitor/exporter_test.go b/internal/pgmonitor/exporter_test.go index 4f336f5625..5ba14e0993 100644 --- a/internal/pgmonitor/exporter_test.go +++ b/internal/pgmonitor/exporter_test.go @@ -1,34 +1,27 @@ -//go:build envtest -// +build envtest - -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgmonitor import ( "context" + "os" "strings" "testing" "gotest.tools/v3/assert" + "sigs.k8s.io/yaml" + "github.com/crunchydata/postgres-operator/internal/testing/cmp" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) func TestGenerateDefaultExporterQueries(t *testing.T) { + if os.Getenv("QUERIES_CONFIG_DIR") == "" { + t.Skip("QUERIES_CONFIG_DIR must be set") + } + ctx := context.Background() cluster := &v1beta1.PostgresCluster{} @@ -48,15 +41,50 @@ func TestGenerateDefaultExporterQueries(t *testing.T) { } func TestExporterStartCommand(t *testing.T) { - t.Run("OneFlag", func(t *testing.T) { - commandSlice := ExporterStartCommand([]string{"--testFlag"}) - assert.DeepEqual(t, commandSlice[:3], []string{"bash", "-ceu", "--"}) - assert.DeepEqual(t, commandSlice[4:], []string{"postgres_exporter_watcher", "--testFlag"}) - }) + for _, tt := range []struct { + Name string + Collectors bool + Flags []string + Expect func(t *testing.T, command []string, script string) + }{ + { + Name: "NoCollectorsNoFlags", + Expect: func(t *testing.T, _ []string, script string) { + assert.Assert(t, cmp.Contains(script, "--[no-]collector")) + }, + }, + { + Name: "WithCollectorsNoFlags", + Collectors: true, + Expect: func(t *testing.T, _ []string, script string) { + assert.Assert(t, !strings.Contains(script, "collector")) + }, + }, + { + Name: "MultipleFlags", + Flags: []string{"--firstTestFlag", "--secondTestFlag"}, + Expect: func(t *testing.T, command []string, _ string) { + assert.DeepEqual(t, command[4:], []string{"postgres_exporter_watcher", "--firstTestFlag", "--secondTestFlag"}) + }, + }, + } { + t.Run(tt.Name, func(t *testing.T) { + command := ExporterStartCommand(tt.Collectors, tt.Flags...) + assert.DeepEqual(t, command[:3], []string{"bash", "-ceu", "--"}) + assert.Assert(t, len(command) > 3) + script := command[3] - t.Run("MultipleFlags", func(t *testing.T) { - commandSlice := ExporterStartCommand([]string{"--firstTestFlag", "--secondTestFlag"}) - assert.DeepEqual(t, commandSlice[:3], []string{"bash", "-ceu", "--"}) - assert.DeepEqual(t, commandSlice[4:], []string{"postgres_exporter_watcher", "--firstTestFlag", "--secondTestFlag"}) - }) + assert.Assert(t, cmp.Contains(script, "'--extend.query-path=/tmp/queries.yml'")) + assert.Assert(t, cmp.Contains(script, "'--web.listen-address=:9187'")) + + tt.Expect(t, command, script) + + t.Run("PrettyYAML", func(t *testing.T) { + b, err := yaml.Marshal(script) + assert.NilError(t, err) + assert.Assert(t, strings.HasPrefix(string(b), `|`), + "expected literal block scalar, got:\n%s", b) + }) + }) + } } diff --git a/internal/pgmonitor/postgres.go b/internal/pgmonitor/postgres.go index 3109a2f849..8aed164a18 100644 --- a/internal/pgmonitor/postgres.go +++ b/internal/pgmonitor/postgres.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgmonitor @@ -50,14 +39,7 @@ func PostgreSQLParameters(inCluster *v1beta1.PostgresCluster, outParameters *pos // Exporter expects that shared_preload_libraries are installed // pg_stat_statements: https://access.crunchydata.com/documentation/pgmonitor/latest/exporter/ // pgnodemx: https://github.com/CrunchyData/pgnodemx - libraries := []string{"pg_stat_statements", "pgnodemx"} - - defined, found := outParameters.Mandatory.Get("shared_preload_libraries") - if found { - libraries = append(libraries, defined) - } - - outParameters.Mandatory.Add("shared_preload_libraries", strings.Join(libraries, ",")) + outParameters.Mandatory.AppendToList("shared_preload_libraries", "pg_stat_statements", "pgnodemx") outParameters.Mandatory.Add("pgnodemx.kdapi_path", postgres.DownwardAPIVolumeMount().MountPath) } diff --git a/internal/pgmonitor/postgres_test.go b/internal/pgmonitor/postgres_test.go index 8cfc0dbcbd..655fa936ae 100644 --- a/internal/pgmonitor/postgres_test.go +++ b/internal/pgmonitor/postgres_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgmonitor diff --git a/internal/pgmonitor/util.go b/internal/pgmonitor/util.go index b379de38a3..f5606ccd08 100644 --- a/internal/pgmonitor/util.go +++ b/internal/pgmonitor/util.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgmonitor diff --git a/internal/pgmonitor/util_test.go b/internal/pgmonitor/util_test.go index 8d95ab07f0..8d16d74bae 100644 --- a/internal/pgmonitor/util_test.go +++ b/internal/pgmonitor/util_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgmonitor diff --git a/internal/pki/common.go b/internal/pki/common.go index 8d4f4ca7ab..fbe9421f8b 100644 --- a/internal/pki/common.go +++ b/internal/pki/common.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pki diff --git a/internal/pki/doc.go b/internal/pki/doc.go index 393b8d2584..71f8c0a1bc 100644 --- a/internal/pki/doc.go +++ b/internal/pki/doc.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 // Package pki provides types and functions to support the public key // infrastructure of the Postgres Operator. It enforces a two layer system diff --git a/internal/pki/encoding.go b/internal/pki/encoding.go index 572eb07497..2d2cd851e3 100644 --- a/internal/pki/encoding.go +++ b/internal/pki/encoding.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pki diff --git a/internal/pki/encoding_test.go b/internal/pki/encoding_test.go index 5512873fe1..cdf7c0de5a 100644 --- a/internal/pki/encoding_test.go +++ b/internal/pki/encoding_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pki diff --git a/internal/pki/pki.go b/internal/pki/pki.go index 75c9984d21..7048810654 100644 --- a/internal/pki/pki.go +++ b/internal/pki/pki.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pki diff --git a/internal/pki/pki_test.go b/internal/pki/pki_test.go index 9a05936518..cd13896450 100644 --- a/internal/pki/pki_test.go +++ b/internal/pki/pki_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pki @@ -479,7 +468,7 @@ func basicOpenSSLVerify(t *testing.T, openssl string, root, leaf Certificate) { // - https://mail.python.org/pipermail/cryptography-dev/2016-August/000676.html // TODO(cbandy): When we generate intermediate certificates, verify them - // idependently then bundle them with the root to verify the leaf. + // independently then bundle them with the root to verify the leaf. verify(t, "-CAfile", rootFile, leafFile) verify(t, "-CAfile", rootFile, "-purpose", "sslclient", leafFile) @@ -525,7 +514,7 @@ func strictOpenSSLVerify(t *testing.T, openssl string, root, leaf Certificate) { assert.NilError(t, os.WriteFile(leafFile, leafBytes, 0o600)) // TODO(cbandy): When we generate intermediate certificates, verify them - // idependently then pass them via "-untrusted" to verify the leaf. + // independently then pass them via "-untrusted" to verify the leaf. verify(t, "-trusted", rootFile, leafFile) verify(t, "-trusted", rootFile, "-purpose", "sslclient", leafFile) diff --git a/internal/postgis/postgis.go b/internal/postgis/postgis.go index 8720d09f8f..f54da0dd93 100644 --- a/internal/postgis/postgis.go +++ b/internal/postgis/postgis.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgis diff --git a/internal/postgis/postgis_test.go b/internal/postgis/postgis_test.go index e6d18b7008..5f604abc90 100644 --- a/internal/postgis/postgis_test.go +++ b/internal/postgis/postgis_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgis diff --git a/internal/postgres/assertions_test.go b/internal/postgres/assertions_test.go deleted file mode 100644 index c418641428..0000000000 --- a/internal/postgres/assertions_test.go +++ /dev/null @@ -1,24 +0,0 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package postgres - -import ( - "github.com/crunchydata/postgres-operator/internal/testing/cmp" -) - -func marshalMatches(actual interface{}, expected string) cmp.Comparison { - return cmp.MarshalMatches(actual, expected) -} diff --git a/internal/postgres/config.go b/internal/postgres/config.go index 7d3b4d4296..ce1acde3fb 100644 --- a/internal/postgres/config.go +++ b/internal/postgres/config.go @@ -1,28 +1,19 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgres import ( + "context" "fmt" "strings" corev1 "k8s.io/api/core/v1" + "github.com/crunchydata/postgres-operator/internal/config" + "github.com/crunchydata/postgres-operator/internal/feature" "github.com/crunchydata/postgres-operator/internal/naming" - "github.com/crunchydata/postgres-operator/internal/util" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -54,7 +45,7 @@ recreate() ( safelink() ( local desired="$1" name="$2" current current=$(realpath "${name}") - if [ "${current}" = "${desired}" ]; then return; fi + if [[ "${current}" == "${desired}" ]]; then return; fi set -x; mv --no-target-directory "${current}" "${desired}" ln --no-dereference --force --symbolic "${desired}" "${name}" ) @@ -101,12 +92,17 @@ func DataDirectory(cluster *v1beta1.PostgresCluster) string { func WALDirectory( cluster *v1beta1.PostgresCluster, instance *v1beta1.PostgresInstanceSetSpec, ) string { - // When no WAL volume is specified, store WAL files on the main data volume. - walStorage := dataMountPath + return fmt.Sprintf("%s/pg%d_wal", WALStorage(instance), cluster.Spec.PostgresVersion) +} + +// WALStorage returns the absolute path to the disk where an instance stores its +// WAL files. Use [WALDirectory] for the exact directory that Postgres uses. +func WALStorage(instance *v1beta1.PostgresInstanceSetSpec) string { if instance.WALVolumeClaimSpec != nil { - walStorage = walMountPath + return walMountPath } - return fmt.Sprintf("%s/pg%d_wal", walStorage, cluster.Spec.PostgresVersion) + // When no WAL volume is specified, store WAL files on the main data volume. + return dataMountPath } // Environment returns the environment variables required to invoke PostgreSQL @@ -141,6 +137,19 @@ func Environment(cluster *v1beta1.PostgresCluster) []corev1.EnvVar { Name: "KRB5RCACHEDIR", Value: "/tmp", }, + // This allows a custom CA certificate to be mounted for Postgres LDAP + // authentication via spec.config.files. + // - https://wiki.postgresql.org/wiki/LDAP_Authentication_against_AD + // + // When setting the TLS_CACERT for LDAP as an environment variable, 'LDAP' + // must be appended as a prefix. + // - https://www.openldap.org/software/man.cgi?query=ldap.conf + // + // Testing with LDAPTLS_CACERTDIR did not work as expected during testing. + { + Name: "LDAPTLS_CACERT", + Value: configMountPath + "/ldap/ca.crt", + }, } } @@ -171,16 +180,39 @@ func reloadCommand(name string) []string { // mtimes. // - https://unix.stackexchange.com/a/407383 script := fmt.Sprintf(` +# Parameters for curl when managing autogrow annotation. +APISERVER="https://kubernetes.default.svc" +SERVICEACCOUNT="/var/run/secrets/kubernetes.io/serviceaccount" +NAMESPACE=$(cat ${SERVICEACCOUNT}/namespace) +TOKEN=$(cat ${SERVICEACCOUNT}/token) +CACERT=${SERVICEACCOUNT}/ca.crt + declare -r directory=%q -exec {fd}<> <(:) -while read -r -t 5 -u "${fd}" || true; do - if [ "${directory}" -nt "/proc/self/fd/${fd}" ] && +exec {fd}<> <(:||:) +while read -r -t 5 -u "${fd}" ||:; do + # Manage replication certificate. + if [[ "${directory}" -nt "/proc/self/fd/${fd}" ]] && install -D --mode=0600 -t %q "${directory}"/{%s,%s,%s} && pkill -HUP --exact --parent=1 postgres then - exec {fd}>&- && exec {fd}<> <(:) + exec {fd}>&- && exec {fd}<> <(:||:) stat --format='Loaded certificates dated %%y' "${directory}" fi + + # Manage autogrow annotation. + # Return size in Mebibytes. + size=$(df --human-readable --block-size=M /pgdata | awk 'FNR == 2 {print $2}') + use=$(df --human-readable /pgdata | awk 'FNR == 2 {print $5}') + sizeInt="${size//M/}" + # Use the sed punctuation class, because the shell will not accept the percent sign in an expansion. + useInt=$(echo $use | sed 's/[[:punct:]]//g') + triggerExpansion="$((useInt > 75))" + if [ $triggerExpansion -eq 1 ]; then + newSize="$(((sizeInt / 2)+sizeInt))" + newSizeMi="${newSize}Mi" + d='[{"op": "add", "path": "/metadata/annotations/suggested-pgdata-pvc-size", "value": "'"$newSizeMi"'"}]' + curl --cacert ${CACERT} --header "Authorization: Bearer ${TOKEN}" -XPATCH "${APISERVER}/api/v1/namespaces/${NAMESPACE}/pods/${HOSTNAME}?fieldManager=kubectl-annotate" -H "Content-Type: application/json-patch+json" --data "$d" + fi done `, naming.CertMountPath, @@ -200,6 +232,7 @@ done // startupCommand returns an entrypoint that prepares the filesystem for // PostgreSQL. func startupCommand( + ctx context.Context, cluster *v1beta1.PostgresCluster, instance *v1beta1.PostgresInstanceSetSpec, ) []string { version := fmt.Sprint(cluster.Spec.PostgresVersion) @@ -208,7 +241,7 @@ func startupCommand( // If the user requests tablespaces, we want to make sure the directories exist with the // correct owner and permissions. tablespaceCmd := "" - if util.DefaultMutableFeatureGate.Enabled(util.TablespaceVolumes) { + if feature.Enabled(ctx, feature.TablespaceVolumes) { // This command checks if a dir exists and if not, creates it; // if the dir does exist, then we `recreate` it to make sure the owner is correct; // if the dir exists with the wrong owner and is not writeable, we error. @@ -246,6 +279,18 @@ func startupCommand( } } + pg_rewind_override := "" + if config.FetchKeyCommand(&cluster.Spec) != "" { + // Quoting "EOF" disables parameter substitution during write. + // - https://tldp.org/LDP/abs/html/here-docs.html#EX71C + pg_rewind_override = `cat << "EOF" > /tmp/pg_rewind_tde.sh +#!/bin/sh +pg_rewind -K "$(postgres -C encryption_key_command)" "$@" +EOF +chmod +x /tmp/pg_rewind_tde.sh +` + } + args := []string{version, walDir, naming.PGBackRestPGDataLogPath} script := strings.Join([]string{ `declare -r expected_major_version="$1" pgwal_directory="$2" pgbrLog_directory="$3"`, @@ -267,27 +312,32 @@ func startupCommand( // Log the effective user ID and all the group IDs. `echo Initializing ...`, - `results 'uid' "$(id -u)" 'gid' "$(id -G)"`, + `results 'uid' "$(id -u ||:)" 'gid' "$(id -G ||:)"`, + + // The pgbackrest spool path should be co-located with wal. If a wal volume exists, symlink the spool-path to it. + `if [[ "${pgwal_directory}" == *"pgwal/"* ]] && [[ ! -d "/pgwal/pgbackrest-spool" ]];then rm -rf "/pgdata/pgbackrest-spool" && mkdir -p "/pgwal/pgbackrest-spool" && ln --force --symbolic "/pgwal/pgbackrest-spool" "/pgdata/pgbackrest-spool";fi`, + // When a pgwal volume is removed, the symlink will be broken; force pgbackrest to recreate spool-path. + `if [[ ! -e "/pgdata/pgbackrest-spool" ]];then rm -rf /pgdata/pgbackrest-spool;fi`, // Abort when the PostgreSQL version installed in the image does not // match the cluster spec. - `results 'postgres path' "$(command -v postgres)"`, - `results 'postgres version' "${postgres_version:=$(postgres --version)}"`, + `results 'postgres path' "$(command -v postgres ||:)"`, + `results 'postgres version' "${postgres_version:=$(postgres --version ||:)}"`, `[[ "${postgres_version}" =~ ") ${expected_major_version}"($|[^0-9]) ]] ||`, `halt Expected PostgreSQL version "${expected_major_version}"`, // Abort when the configured data directory is not $PGDATA. // - https://www.postgresql.org/docs/current/runtime-config-file-locations.html `results 'config directory' "${PGDATA:?}"`, - `postgres_data_directory=$([ -d "${PGDATA}" ] && postgres -C data_directory || echo "${PGDATA}")`, + `postgres_data_directory=$([[ -d "${PGDATA}" ]] && postgres -C data_directory || echo "${PGDATA}")`, `results 'data directory' "${postgres_data_directory}"`, `[[ "${postgres_data_directory}" == "${PGDATA}" ]] ||`, `halt Expected matching config and data directories`, // Determine if the data directory has been prepared for bootstrapping the cluster `bootstrap_dir="${postgres_data_directory}_bootstrap"`, - `[ -d "${bootstrap_dir}" ] && results 'bootstrap directory' "${bootstrap_dir}"`, - `[ -d "${bootstrap_dir}" ] && postgres_data_directory="${bootstrap_dir}"`, + `[[ -d "${bootstrap_dir}" ]] && results 'bootstrap directory' "${bootstrap_dir}"`, + `[[ -d "${bootstrap_dir}" ]] && postgres_data_directory="${bootstrap_dir}"`, // PostgreSQL requires its directory to be writable by only itself. // Pod "securityContext.fsGroup" sets g+w on directories for *some* @@ -332,9 +382,12 @@ func startupCommand( naming.ReplicationCert, naming.ReplicationPrivateKey, naming.ReplicationCACert), + // Add the pg_rewind wrapper script, if TDE is enabled. + pg_rewind_override, + tablespaceCmd, // When the data directory is empty, there's nothing more to do. - `[ -f "${postgres_data_directory}/PG_VERSION" ] || exit 0`, + `[[ -f "${postgres_data_directory}/PG_VERSION" ]] || exit 0`, // Abort when the data directory is not empty and its version does not // match the cluster spec. @@ -358,7 +411,7 @@ func startupCommand( // - https://git.postgresql.org/gitweb/?p=postgresql.git;f=src/bin/initdb/initdb.c;hb=REL_13_0#l2718 // - https://git.postgresql.org/gitweb/?p=postgresql.git;f=src/bin/pg_basebackup/pg_basebackup.c;hb=REL_13_0#l2621 `safelink "${pgwal_directory}" "${postgres_data_directory}/pg_wal"`, - `results 'wal directory' "$(realpath "${postgres_data_directory}/pg_wal")"`, + `results 'wal directory' "$(realpath "${postgres_data_directory}/pg_wal" ||:)"`, // Early versions of PGO create replicas with a recovery signal file. // Patroni also creates a standby signal file before starting Postgres, diff --git a/internal/postgres/config_test.go b/internal/postgres/config_test.go index ec84a19f99..cd4c92d185 100644 --- a/internal/postgres/config_test.go +++ b/internal/postgres/config_test.go @@ -1,22 +1,12 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgres import ( "bytes" + "context" "errors" "fmt" "os" @@ -31,7 +21,6 @@ import ( "github.com/crunchydata/postgres-operator/internal/testing/cmp" "github.com/crunchydata/postgres-operator/internal/testing/require" - "github.com/crunchydata/postgres-operator/internal/util" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -466,13 +455,14 @@ func TestBashSafeLink(t *testing.T) { func TestStartupCommand(t *testing.T) { shellcheck := require.ShellCheck(t) + t.Parallel() - assert.NilError(t, util.AddAndSetFeatureGates(string(util.TablespaceVolumes+"=false"))) cluster := new(v1beta1.PostgresCluster) cluster.Spec.PostgresVersion = 13 instance := new(v1beta1.PostgresInstanceSetSpec) - command := startupCommand(cluster, instance) + ctx := context.Background() + command := startupCommand(ctx, cluster, instance) // Expect a bash command with an inline script. assert.DeepEqual(t, command[:3], []string{"bash", "-ceu", "--"}) @@ -495,4 +485,24 @@ func TestStartupCommand(t *testing.T) { assert.Assert(t, strings.HasPrefix(string(b), `|`), "expected literal block scalar, got:\n%s", b) }) + + t.Run("EnableTDE", func(t *testing.T) { + + cluster.Spec.Patroni = &v1beta1.PatroniSpec{ + DynamicConfiguration: map[string]any{ + "postgresql": map[string]any{ + "parameters": map[string]any{ + "encryption_key_command": "echo test", + }, + }, + }, + } + command := startupCommand(ctx, cluster, instance) + assert.Assert(t, len(command) > 3) + assert.Assert(t, strings.Contains(command[3], `cat << "EOF" > /tmp/pg_rewind_tde.sh +#!/bin/sh +pg_rewind -K "$(postgres -C encryption_key_command)" "$@" +EOF +chmod +x /tmp/pg_rewind_tde.sh`)) + }) } diff --git a/internal/postgres/databases.go b/internal/postgres/databases.go index 89589a3379..0d70170527 100644 --- a/internal/postgres/databases.go +++ b/internal/postgres/databases.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgres diff --git a/internal/postgres/databases_test.go b/internal/postgres/databases_test.go index d0467a18ef..e025e86788 100644 --- a/internal/postgres/databases_test.go +++ b/internal/postgres/databases_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgres diff --git a/internal/postgres/doc.go b/internal/postgres/doc.go index 9065fdfe6f..bd616b5916 100644 --- a/internal/postgres/doc.go +++ b/internal/postgres/doc.go @@ -1,19 +1,8 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + // Package postgres is a collection of resources that interact with PostgreSQL // or provide functionality that makes it easier for other resources to interact // with PostgreSQL. package postgres - -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ diff --git a/internal/postgres/exec.go b/internal/postgres/exec.go index d697c1c8c9..a846a8aa57 100644 --- a/internal/postgres/exec.go +++ b/internal/postgres/exec.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgres diff --git a/internal/postgres/exec_test.go b/internal/postgres/exec_test.go index 8c75be9a2e..df9b862577 100644 --- a/internal/postgres/exec_test.go +++ b/internal/postgres/exec_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgres diff --git a/internal/postgres/hba.go b/internal/postgres/hba.go index 37b0eee267..d9b5ce2680 100644 --- a/internal/postgres/hba.go +++ b/internal/postgres/hba.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgres diff --git a/internal/postgres/hba_test.go b/internal/postgres/hba_test.go index e692ed8953..9744479fdd 100644 --- a/internal/postgres/hba_test.go +++ b/internal/postgres/hba_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgres diff --git a/internal/postgres/huge_pages.go b/internal/postgres/huge_pages.go index 7bb470fb06..ee13c0d11b 100644 --- a/internal/postgres/huge_pages.go +++ b/internal/postgres/huge_pages.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgres diff --git a/internal/postgres/huge_pages_test.go b/internal/postgres/huge_pages_test.go index ca4f87069d..58a6a6aa57 100644 --- a/internal/postgres/huge_pages_test.go +++ b/internal/postgres/huge_pages_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgres diff --git a/internal/postgres/iana.go b/internal/postgres/iana.go index ece1c05ef3..4392b549f1 100644 --- a/internal/postgres/iana.go +++ b/internal/postgres/iana.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgres diff --git a/internal/postgres/parameters.go b/internal/postgres/parameters.go index 0715d4549e..434d9fd1dd 100644 --- a/internal/postgres/parameters.go +++ b/internal/postgres/parameters.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgres @@ -96,6 +85,22 @@ func (ps *ParameterSet) Add(name, value string) { ps.values[ps.normalize(name)] = value } +// AppendToList adds each value to the right-hand side of parameter name +// as a comma-separated list without quoting. +func (ps *ParameterSet) AppendToList(name string, value ...string) { + result := ps.Value(name) + + if len(value) > 0 { + if len(result) > 0 { + result += "," + strings.Join(value, ",") + } else { + result = strings.Join(value, ",") + } + } + + ps.Add(name, result) +} + // Get returns the value of parameter name and whether or not it was present in ps. func (ps ParameterSet) Get(name string) (string, bool) { value, ok := ps.values[ps.normalize(name)] diff --git a/internal/postgres/parameters_test.go b/internal/postgres/parameters_test.go index be7c56dcb1..c6228d7958 100644 --- a/internal/postgres/parameters_test.go +++ b/internal/postgres/parameters_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgres @@ -68,3 +57,26 @@ func TestParameterSet(t *testing.T) { ps2.Add("x", "n") assert.Assert(t, ps2.Value("x") != ps.Value("x")) } + +func TestParameterSetAppendToList(t *testing.T) { + ps := NewParameterSet() + + ps.AppendToList("empty") + assert.Assert(t, ps.Has("empty")) + assert.Equal(t, ps.Value("empty"), "") + + ps.AppendToList("empty") + assert.Equal(t, ps.Value("empty"), "", "expected no change") + + ps.AppendToList("full", "a") + assert.Equal(t, ps.Value("full"), "a") + + ps.AppendToList("full", "b") + assert.Equal(t, ps.Value("full"), "a,b") + + ps.AppendToList("full") + assert.Equal(t, ps.Value("full"), "a,b", "expected no change") + + ps.AppendToList("full", "a", "cd", `"e"`) + assert.Equal(t, ps.Value("full"), `a,b,a,cd,"e"`) +} diff --git a/internal/postgres/password/doc.go b/internal/postgres/password/doc.go index 6646bfa4a6..eef7ed7db2 100644 --- a/internal/postgres/password/doc.go +++ b/internal/postgres/password/doc.go @@ -1,19 +1,7 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + // package password lets one create the appropriate password hashes and // verifiers that are used for adding the information into PostgreSQL - package password - -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ diff --git a/internal/postgres/password/md5.go b/internal/postgres/password/md5.go index 7fc9c8a7be..884dfb655e 100644 --- a/internal/postgres/password/md5.go +++ b/internal/postgres/password/md5.go @@ -1,19 +1,8 @@ -package password - -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +package password import ( diff --git a/internal/postgres/password/md5_test.go b/internal/postgres/password/md5_test.go index 7f6a830627..80cb7742d6 100644 --- a/internal/postgres/password/md5_test.go +++ b/internal/postgres/password/md5_test.go @@ -1,19 +1,8 @@ -package password - -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +package password import ( "fmt" diff --git a/internal/postgres/password/password.go b/internal/postgres/password/password.go index 94c81dc990..337282cc74 100644 --- a/internal/postgres/password/password.go +++ b/internal/postgres/password/password.go @@ -1,19 +1,8 @@ -package password - -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +package password import ( "errors" diff --git a/internal/postgres/password/password_test.go b/internal/postgres/password/password_test.go index 588c3ffbe3..3401dec4ac 100644 --- a/internal/postgres/password/password_test.go +++ b/internal/postgres/password/password_test.go @@ -1,19 +1,8 @@ -package password - -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +package password import ( "errors" diff --git a/internal/postgres/password/scram.go b/internal/postgres/password/scram.go index bb8dfac4a6..8264cd87a0 100644 --- a/internal/postgres/password/scram.go +++ b/internal/postgres/password/scram.go @@ -1,19 +1,8 @@ -package password - -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +package password import ( "crypto/hmac" @@ -37,7 +26,7 @@ import ( // // where: // DIGEST = SCRAM-SHA-256 (only value for now in PostgreSQL) -// ITERATIONS = the number of iteratiosn to use for PBKDF2 +// ITERATIONS = the number of iterations to use for PBKDF2 // SALT = the salt used as part of the PBKDF2, stored in base64 // STORED_KEY = the hash of the client key, stored in base64 // SERVER_KEY = the hash of the server key @@ -179,7 +168,7 @@ func (s *SCRAMPassword) saslPrep() string { // perform SASLprep on the password. if the SASLprep fails or returns an // empty string, return the original password - // Otherwise return the clean pasword + // Otherwise return the clean password cleanedPassword, err := stringprep.SASLprep.Prepare(s.password) if cleanedPassword == "" || err != nil { return s.password diff --git a/internal/postgres/password/scram_test.go b/internal/postgres/password/scram_test.go index c1883d6bbe..0552e519b7 100644 --- a/internal/postgres/password/scram_test.go +++ b/internal/postgres/password/scram_test.go @@ -1,19 +1,8 @@ -package password - -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +package password import ( "bytes" diff --git a/internal/postgres/reconcile.go b/internal/postgres/reconcile.go index 2b250af07e..344f91dd9f 100644 --- a/internal/postgres/reconcile.go +++ b/internal/postgres/reconcile.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgres @@ -22,9 +11,9 @@ import ( "k8s.io/apimachinery/pkg/api/resource" "github.com/crunchydata/postgres-operator/internal/config" + "github.com/crunchydata/postgres-operator/internal/feature" "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/naming" - "github.com/crunchydata/postgres-operator/internal/util" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -195,7 +184,7 @@ func InstancePod(ctx context.Context, ImagePullPolicy: container.ImagePullPolicy, SecurityContext: initialize.RestrictedSecurityContext(), - VolumeMounts: []corev1.VolumeMount{certVolumeMount}, + VolumeMounts: []corev1.VolumeMount{certVolumeMount, dataVolumeMount}, } if inInstanceSpec.Sidecars != nil && @@ -207,7 +196,7 @@ func InstancePod(ctx context.Context, startup := corev1.Container{ Name: naming.ContainerPostgresStartup, - Command: startupCommand(inCluster, inInstanceSpec), + Command: startupCommand(ctx, inCluster, inInstanceSpec), Env: Environment(inCluster), Image: container.Image, @@ -276,7 +265,7 @@ func InstancePod(ctx context.Context, // If the InstanceSidecars feature gate is enabled and instance sidecars are // defined, add the defined container to the Pod. - if util.DefaultMutableFeatureGate.Enabled(util.InstanceSidecars) && + if feature.Enabled(ctx, feature.InstanceSidecars) && inInstanceSpec.Containers != nil { outInstancePod.Containers = append(outInstancePod.Containers, inInstanceSpec.Containers...) } @@ -294,8 +283,7 @@ func PodSecurityContext(cluster *v1beta1.PostgresCluster) *corev1.PodSecurityCon // - https://docs.k8s.io/concepts/security/pod-security-standards/ for i := range cluster.Spec.SupplementalGroups { if gid := cluster.Spec.SupplementalGroups[i]; gid > 0 { - podSecurityContext.SupplementalGroups = - append(podSecurityContext.SupplementalGroups, gid) + podSecurityContext.SupplementalGroups = append(podSecurityContext.SupplementalGroups, gid) } } diff --git a/internal/postgres/reconcile_test.go b/internal/postgres/reconcile_test.go index d93000af64..138b5c7b3e 100644 --- a/internal/postgres/reconcile_test.go +++ b/internal/postgres/reconcile_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgres @@ -23,9 +12,10 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" + "github.com/crunchydata/postgres-operator/internal/feature" "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/naming" - "github.com/crunchydata/postgres-operator/internal/util" + "github.com/crunchydata/postgres-operator/internal/testing/cmp" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -70,11 +60,9 @@ func TestTablespaceVolumeMount(t *testing.T) { } func TestInstancePod(t *testing.T) { - ctx := context.Background() - - // Initialize the feature gate - assert.NilError(t, util.AddAndSetFeatureGates("")) + t.Parallel() + ctx := context.Background() cluster := new(v1beta1.PostgresCluster) cluster.Default() cluster.Spec.ImagePullPolicy = corev1.PullAlways @@ -130,7 +118,7 @@ func TestInstancePod(t *testing.T) { InstancePod(ctx, cluster, instance, serverSecretProjection, clientSecretProjection, dataVolume, nil, nil, pod) - assert.Assert(t, marshalMatches(pod, ` + assert.Assert(t, cmp.MarshalMatches(pod, ` containers: - env: - name: PGDATA @@ -143,6 +131,8 @@ containers: value: /etc/postgres/krb5.conf - name: KRB5RCACHEDIR value: /tmp + - name: LDAPTLS_CACERT + value: /etc/postgres/ldap/ca.crt imagePullPolicy: Always name: database ports: @@ -160,6 +150,8 @@ containers: privileged: false readOnlyRootFilesystem: true runAsNonRoot: true + seccompProfile: + type: RuntimeDefault volumeMounts: - mountPath: /pgconf/tls name: cert-volume @@ -175,16 +167,39 @@ containers: - -- - |- monitor() { + # Parameters for curl when managing autogrow annotation. + APISERVER="https://kubernetes.default.svc" + SERVICEACCOUNT="/var/run/secrets/kubernetes.io/serviceaccount" + NAMESPACE=$(cat ${SERVICEACCOUNT}/namespace) + TOKEN=$(cat ${SERVICEACCOUNT}/token) + CACERT=${SERVICEACCOUNT}/ca.crt + declare -r directory="/pgconf/tls" - exec {fd}<> <(:) - while read -r -t 5 -u "${fd}" || true; do - if [ "${directory}" -nt "/proc/self/fd/${fd}" ] && + exec {fd}<> <(:||:) + while read -r -t 5 -u "${fd}" ||:; do + # Manage replication certificate. + if [[ "${directory}" -nt "/proc/self/fd/${fd}" ]] && install -D --mode=0600 -t "/tmp/replication" "${directory}"/{replication/tls.crt,replication/tls.key,replication/ca.crt} && pkill -HUP --exact --parent=1 postgres then - exec {fd}>&- && exec {fd}<> <(:) + exec {fd}>&- && exec {fd}<> <(:||:) stat --format='Loaded certificates dated %y' "${directory}" fi + + # Manage autogrow annotation. + # Return size in Mebibytes. + size=$(df --human-readable --block-size=M /pgdata | awk 'FNR == 2 {print $2}') + use=$(df --human-readable /pgdata | awk 'FNR == 2 {print $5}') + sizeInt="${size//M/}" + # Use the sed punctuation class, because the shell will not accept the percent sign in an expansion. + useInt=$(echo $use | sed 's/[[:punct:]]//g') + triggerExpansion="$((useInt > 75))" + if [ $triggerExpansion -eq 1 ]; then + newSize="$(((sizeInt / 2)+sizeInt))" + newSizeMi="${newSize}Mi" + d='[{"op": "add", "path": "/metadata/annotations/suggested-pgdata-pvc-size", "value": "'"$newSizeMi"'"}]' + curl --cacert ${CACERT} --header "Authorization: Bearer ${TOKEN}" -XPATCH "${APISERVER}/api/v1/namespaces/${NAMESPACE}/pods/${HOSTNAME}?fieldManager=kubectl-annotate" -H "Content-Type: application/json-patch+json" --data "$d" + fi done }; export -f monitor; exec -a "$0" bash -ceu monitor - replication-cert-copy @@ -201,10 +216,14 @@ containers: privileged: false readOnlyRootFilesystem: true runAsNonRoot: true + seccompProfile: + type: RuntimeDefault volumeMounts: - mountPath: /pgconf/tls name: cert-volume readOnly: true + - mountPath: /pgdata + name: postgres-data initContainers: - command: - bash @@ -222,24 +241,26 @@ initContainers: safelink() ( local desired="$1" name="$2" current current=$(realpath "${name}") - if [ "${current}" = "${desired}" ]; then return; fi + if [[ "${current}" == "${desired}" ]]; then return; fi set -x; mv --no-target-directory "${current}" "${desired}" ln --no-dereference --force --symbolic "${desired}" "${name}" ) echo Initializing ... - results 'uid' "$(id -u)" 'gid' "$(id -G)" - results 'postgres path' "$(command -v postgres)" - results 'postgres version' "${postgres_version:=$(postgres --version)}" + results 'uid' "$(id -u ||:)" 'gid' "$(id -G ||:)" + if [[ "${pgwal_directory}" == *"pgwal/"* ]] && [[ ! -d "/pgwal/pgbackrest-spool" ]];then rm -rf "/pgdata/pgbackrest-spool" && mkdir -p "/pgwal/pgbackrest-spool" && ln --force --symbolic "/pgwal/pgbackrest-spool" "/pgdata/pgbackrest-spool";fi + if [[ ! -e "/pgdata/pgbackrest-spool" ]];then rm -rf /pgdata/pgbackrest-spool;fi + results 'postgres path' "$(command -v postgres ||:)" + results 'postgres version' "${postgres_version:=$(postgres --version ||:)}" [[ "${postgres_version}" =~ ") ${expected_major_version}"($|[^0-9]) ]] || halt Expected PostgreSQL version "${expected_major_version}" results 'config directory' "${PGDATA:?}" - postgres_data_directory=$([ -d "${PGDATA}" ] && postgres -C data_directory || echo "${PGDATA}") + postgres_data_directory=$([[ -d "${PGDATA}" ]] && postgres -C data_directory || echo "${PGDATA}") results 'data directory' "${postgres_data_directory}" [[ "${postgres_data_directory}" == "${PGDATA}" ]] || halt Expected matching config and data directories bootstrap_dir="${postgres_data_directory}_bootstrap" - [ -d "${bootstrap_dir}" ] && results 'bootstrap directory' "${bootstrap_dir}" - [ -d "${bootstrap_dir}" ] && postgres_data_directory="${bootstrap_dir}" + [[ -d "${bootstrap_dir}" ]] && results 'bootstrap directory' "${bootstrap_dir}" + [[ -d "${bootstrap_dir}" ]] && postgres_data_directory="${bootstrap_dir}" if [[ ! -e "${postgres_data_directory}" || -O "${postgres_data_directory}" ]]; then install --directory --mode=0700 "${postgres_data_directory}" elif [[ -w "${postgres_data_directory}" && -g "${postgres_data_directory}" ]]; then @@ -251,14 +272,15 @@ initContainers: halt "$(permissions "${pgbrLog_directory}" ||:)" install -D --mode=0600 -t "/tmp/replication" "/pgconf/tls/replication"/{tls.crt,tls.key,ca.crt} - [ -f "${postgres_data_directory}/PG_VERSION" ] || exit 0 + + [[ -f "${postgres_data_directory}/PG_VERSION" ]] || exit 0 results 'data version' "${postgres_data_version:=$(< "${postgres_data_directory}/PG_VERSION")}" [[ "${postgres_data_version}" == "${expected_major_version}" ]] || halt Expected PostgreSQL data version "${expected_major_version}" [[ ! -f "${postgres_data_directory}/postgresql.conf" ]] && touch "${postgres_data_directory}/postgresql.conf" safelink "${pgwal_directory}" "${postgres_data_directory}/pg_wal" - results 'wal directory' "$(realpath "${postgres_data_directory}/pg_wal")" + results 'wal directory' "$(realpath "${postgres_data_directory}/pg_wal" ||:)" rm -f "${postgres_data_directory}/recovery.signal" - startup - "11" @@ -275,6 +297,8 @@ initContainers: value: /etc/postgres/krb5.conf - name: KRB5RCACHEDIR value: /tmp + - name: LDAPTLS_CACERT + value: /etc/postgres/ldap/ca.crt imagePullPolicy: Always name: postgres-startup resources: @@ -288,6 +312,8 @@ initContainers: privileged: false readOnlyRootFilesystem: true runAsNonRoot: true + seccompProfile: + type: RuntimeDefault volumeMounts: - mountPath: /pgconf/tls name: cert-volume @@ -363,7 +389,7 @@ volumes: assert.Assert(t, len(pod.InitContainers) > 0) // Container has all mountPaths, including downwardAPI - assert.Assert(t, marshalMatches(pod.Containers[0].VolumeMounts, ` + assert.Assert(t, cmp.MarshalMatches(pod.Containers[0].VolumeMounts, ` - mountPath: /pgconf/tls name: cert-volume readOnly: true @@ -376,7 +402,7 @@ volumes: name: postgres-wal`), "expected WAL and downwardAPI mounts in %q container", pod.Containers[0].Name) // InitContainer has all mountPaths, except downwardAPI - assert.Assert(t, marshalMatches(pod.InitContainers[0].VolumeMounts, ` + assert.Assert(t, cmp.MarshalMatches(pod.InitContainers[0].VolumeMounts, ` - mountPath: /pgconf/tls name: cert-volume readOnly: true @@ -385,7 +411,7 @@ volumes: - mountPath: /pgwal name: postgres-wal`), "expected WAL mount, no downwardAPI mount in %q container", pod.InitContainers[0].Name) - assert.Assert(t, marshalMatches(pod.Volumes, ` + assert.Assert(t, cmp.MarshalMatches(pod.Volumes, ` - name: cert-volume projected: defaultMode: 384 @@ -471,7 +497,7 @@ volumes: // Container has all mountPaths, including downwardAPI, // and the postgres-config - assert.Assert(t, marshalMatches(pod.Containers[0].VolumeMounts, ` + assert.Assert(t, cmp.MarshalMatches(pod.Containers[0].VolumeMounts, ` - mountPath: /pgconf/tls name: cert-volume readOnly: true @@ -485,7 +511,7 @@ volumes: readOnly: true`), "expected WAL and downwardAPI mounts in %q container", pod.Containers[0].Name) // InitContainer has all mountPaths, except downwardAPI and additionalConfig - assert.Assert(t, marshalMatches(pod.InitContainers[0].VolumeMounts, ` + assert.Assert(t, cmp.MarshalMatches(pod.InitContainers[0].VolumeMounts, ` - mountPath: /pgconf/tls name: cert-volume readOnly: true @@ -507,7 +533,12 @@ volumes: }) t.Run("SidecarEnabled", func(t *testing.T) { - assert.NilError(t, util.AddAndSetFeatureGates(string(util.InstanceSidecars+"=true"))) + gate := feature.NewGate() + assert.NilError(t, gate.SetFromMap(map[string]bool{ + feature.InstanceSidecars: true, + })) + ctx := feature.NewContext(ctx, gate) + InstancePod(ctx, cluster, sidecarInstance, serverSecretProjection, clientSecretProjection, dataVolume, nil, nil, pod) @@ -525,7 +556,6 @@ volumes: }) t.Run("WithTablespaces", func(t *testing.T) { - clusterWithTablespaces := cluster.DeepCopy() clusterWithTablespaces.Spec.InstanceSets = []v1beta1.PostgresInstanceSetSpec{ { @@ -549,7 +579,7 @@ volumes: InstancePod(ctx, cluster, instance, serverSecretProjection, clientSecretProjection, dataVolume, nil, tablespaceVolumes, pod) - assert.Assert(t, marshalMatches(pod.Containers[0].VolumeMounts, ` + assert.Assert(t, cmp.MarshalMatches(pod.Containers[0].VolumeMounts, ` - mountPath: /pgconf/tls name: cert-volume readOnly: true @@ -564,7 +594,7 @@ volumes: name: tablespace-trial`), "expected tablespace mount(s) in %q container", pod.Containers[0].Name) // InitContainer has all mountPaths, except downwardAPI and additionalConfig - assert.Assert(t, marshalMatches(pod.InitContainers[0].VolumeMounts, ` + assert.Assert(t, cmp.MarshalMatches(pod.InitContainers[0].VolumeMounts, ` - mountPath: /pgconf/tls name: cert-volume readOnly: true @@ -590,7 +620,7 @@ volumes: assert.Assert(t, len(pod.Containers) > 0) assert.Assert(t, len(pod.InitContainers) > 0) - assert.Assert(t, marshalMatches(pod.Containers[0].VolumeMounts, ` + assert.Assert(t, cmp.MarshalMatches(pod.Containers[0].VolumeMounts, ` - mountPath: /pgconf/tls name: cert-volume readOnly: true @@ -602,7 +632,7 @@ volumes: - mountPath: /pgwal name: postgres-wal`), "expected WAL and downwardAPI mounts in %q container", pod.Containers[0].Name) - assert.Assert(t, marshalMatches(pod.InitContainers[0].VolumeMounts, ` + assert.Assert(t, cmp.MarshalMatches(pod.InitContainers[0].VolumeMounts, ` - mountPath: /pgconf/tls name: cert-volume readOnly: true @@ -611,7 +641,7 @@ volumes: - mountPath: /pgwal name: postgres-wal`), "expected WAL mount, no downwardAPI mount in %q container", pod.InitContainers[0].Name) - assert.Assert(t, marshalMatches(pod.Volumes, ` + assert.Assert(t, cmp.MarshalMatches(pod.Volumes, ` - name: cert-volume projected: defaultMode: 384 @@ -681,23 +711,23 @@ func TestPodSecurityContext(t *testing.T) { cluster := new(v1beta1.PostgresCluster) cluster.Default() - assert.Assert(t, marshalMatches(PodSecurityContext(cluster), ` + assert.Assert(t, cmp.MarshalMatches(PodSecurityContext(cluster), ` fsGroup: 26 fsGroupChangePolicy: OnRootMismatch `)) cluster.Spec.OpenShift = initialize.Bool(true) - assert.Assert(t, marshalMatches(PodSecurityContext(cluster), ` + assert.Assert(t, cmp.MarshalMatches(PodSecurityContext(cluster), ` fsGroupChangePolicy: OnRootMismatch `)) cluster.Spec.SupplementalGroups = []int64{} - assert.Assert(t, marshalMatches(PodSecurityContext(cluster), ` + assert.Assert(t, cmp.MarshalMatches(PodSecurityContext(cluster), ` fsGroupChangePolicy: OnRootMismatch `)) cluster.Spec.SupplementalGroups = []int64{999, 65000} - assert.Assert(t, marshalMatches(PodSecurityContext(cluster), ` + assert.Assert(t, cmp.MarshalMatches(PodSecurityContext(cluster), ` fsGroupChangePolicy: OnRootMismatch supplementalGroups: - 999 @@ -705,7 +735,7 @@ supplementalGroups: `)) *cluster.Spec.OpenShift = false - assert.Assert(t, marshalMatches(PodSecurityContext(cluster), ` + assert.Assert(t, cmp.MarshalMatches(PodSecurityContext(cluster), ` fsGroup: 26 fsGroupChangePolicy: OnRootMismatch supplementalGroups: diff --git a/internal/postgres/users.go b/internal/postgres/users.go index ccc407507a..be8785a4e5 100644 --- a/internal/postgres/users.go +++ b/internal/postgres/users.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgres @@ -19,17 +8,57 @@ import ( "bytes" "context" "encoding/json" + "strings" + + pg_query "github.com/pganalyze/pg_query_go/v5" + "github.com/crunchydata/postgres-operator/internal/feature" "github.com/crunchydata/postgres-operator/internal/logging" + "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) +var RESERVED_SCHEMA_NAMES = map[string]bool{ + "public": true, // This is here for documentation; Postgres will reject a role named `public` as reserved + "pgbouncer": true, + "monitor": true, +} + +func sanitizeAlterRoleOptions(options string) string { + const AlterRolePrefix = `ALTER ROLE "any" WITH ` + + // Parse the options and discard them completely when incoherent. + parsed, err := pg_query.Parse(AlterRolePrefix + options) + if err != nil || len(parsed.GetStmts()) != 1 { + return "" + } + + // Rebuild the options list without invalid options. TODO(go1.21) TODO(slices) + orig := parsed.GetStmts()[0].GetStmt().GetAlterRoleStmt().GetOptions() + next := make([]*pg_query.Node, 0, len(orig)) + for i, option := range orig { + if strings.EqualFold(option.GetDefElem().GetDefname(), "password") { + continue + } + next = append(next, orig[i]) + } + if len(next) > 0 { + parsed.GetStmts()[0].GetStmt().GetAlterRoleStmt().Options = next + } else { + return "" + } + + // Turn the modified statement back into SQL and remove the ALTER ROLE portion. + sql, _ := pg_query.Deparse(parsed) + return strings.TrimPrefix(sql, AlterRolePrefix) +} + // WriteUsersInPostgreSQL calls exec to create users that do not exist in // PostgreSQL. Once they exist, it updates their options and passwords and // grants them access to their specified databases. The databases must already // exist. func WriteUsersInPostgreSQL( - ctx context.Context, exec Executor, + ctx context.Context, cluster *v1beta1.PostgresCluster, exec Executor, users []v1beta1.PostgresUserSpec, verifiers map[string]string, ) error { log := logging.FromContext(ctx) @@ -56,7 +85,7 @@ CREATE TEMPORARY TABLE input (id serial, data json); spec := users[i] databases := spec.Databases - options := spec.Options + options := sanitizeAlterRoleOptions(spec.Options) // The "postgres" user must always be a superuser that can login to // the "postgres" database. @@ -130,5 +159,83 @@ SELECT pg_catalog.format('GRANT ALL PRIVILEGES ON DATABASE %I TO %I', log.V(1).Info("wrote PostgreSQL users", "stdout", stdout, "stderr", stderr) + // The operator will attempt to write schemas for the users in the spec if + // * the feature gate is enabled and + // * the cluster is annotated. + if feature.Enabled(ctx, feature.AutoCreateUserSchema) { + autoCreateUserSchemaAnnotationValue, annotationExists := cluster.Annotations[naming.AutoCreateUserSchemaAnnotation] + if annotationExists && strings.EqualFold(autoCreateUserSchemaAnnotationValue, "true") { + log.V(1).Info("Writing schemas for users.") + err = WriteUsersSchemasInPostgreSQL(ctx, exec, users) + } + } + + return err +} + +// WriteUsersSchemasInPostgreSQL will create a schema for each user in each database that user has access to +func WriteUsersSchemasInPostgreSQL(ctx context.Context, exec Executor, + users []v1beta1.PostgresUserSpec) error { + + log := logging.FromContext(ctx) + + var err error + var stdout string + var stderr string + + for i := range users { + spec := users[i] + + // We skip if the user has the name of a reserved schema + if RESERVED_SCHEMA_NAMES[string(spec.Name)] { + log.V(1).Info("Skipping schema creation for user with reserved name", + "name", string(spec.Name)) + continue + } + + // We skip if the user has no databases + if len(spec.Databases) == 0 { + continue + } + + var sql bytes.Buffer + + // Prevent unexpected dereferences by emptying "search_path". The "pg_catalog" + // schema is still searched, and only temporary objects can be created. + // - https://www.postgresql.org/docs/current/runtime-config-client.html#GUC-SEARCH-PATH + _, _ = sql.WriteString(`SET search_path TO '';`) + + _, _ = sql.WriteString(`SELECT * FROM json_array_elements_text(:'databases');`) + + databases, _ := json.Marshal(spec.Databases) + + stdout, stderr, err = exec.ExecInDatabasesFromQuery(ctx, + sql.String(), + strings.Join([]string{ + // Quiet NOTICE messages from IF EXISTS statements. + // - https://www.postgresql.org/docs/current/runtime-config-client.html + `SET client_min_messages = WARNING;`, + + // Creates a schema named after and owned by the user + // - https://www.postgresql.org/docs/current/ddl-schemas.html + // - https://www.postgresql.org/docs/current/sql-createschema.html + + // We create a schema named after the user because + // the PG search_path does not need to be updated, + // since search_path defaults to "$user", public. + // - https://www.postgresql.org/docs/current/ddl-schemas.html#DDL-SCHEMAS-PATH + `CREATE SCHEMA IF NOT EXISTS :"username" AUTHORIZATION :"username";`, + }, "\n"), + map[string]string{ + "databases": string(databases), + "username": string(spec.Name), + + "ON_ERROR_STOP": "on", // Abort when any one statement fails. + "QUIET": "on", // Do not print successful commands to stdout. + }, + ) + + log.V(1).Info("wrote PostgreSQL schemas", "stdout", stdout, "stderr", stderr) + } return err } diff --git a/internal/postgres/users_test.go b/internal/postgres/users_test.go index 10dda0a5bf..141175c78e 100644 --- a/internal/postgres/users_test.go +++ b/internal/postgres/users_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgres @@ -19,6 +8,7 @@ import ( "context" "errors" "io" + "regexp" "strings" "testing" @@ -28,6 +18,24 @@ import ( "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) +func TestSanitizeAlterRoleOptions(t *testing.T) { + assert.Equal(t, sanitizeAlterRoleOptions(""), "") + assert.Equal(t, sanitizeAlterRoleOptions(" login other stuff"), "", + "expected non-options to be removed") + + t.Run("RemovesPassword", func(t *testing.T) { + assert.Equal(t, sanitizeAlterRoleOptions("password 'anything'"), "") + assert.Equal(t, sanitizeAlterRoleOptions("password $wild$ dollar quoting $wild$ login"), "LOGIN") + assert.Equal(t, sanitizeAlterRoleOptions(" login password '' replication "), "LOGIN REPLICATION") + }) + + t.Run("RemovesComments", func(t *testing.T) { + assert.Equal(t, sanitizeAlterRoleOptions("login -- asdf"), "LOGIN") + assert.Equal(t, sanitizeAlterRoleOptions("login /*"), "") + assert.Equal(t, sanitizeAlterRoleOptions("login /* createdb */ createrole"), "LOGIN CREATEROLE") + }) +} + func TestWriteUsersInPostgreSQL(t *testing.T) { ctx := context.Background() @@ -41,7 +49,8 @@ func TestWriteUsersInPostgreSQL(t *testing.T) { return expected } - assert.Equal(t, expected, WriteUsersInPostgreSQL(ctx, exec, nil, nil)) + cluster := new(v1beta1.PostgresCluster) + assert.Equal(t, expected, WriteUsersInPostgreSQL(ctx, cluster, exec, nil, nil)) }) t.Run("Empty", func(t *testing.T) { @@ -86,17 +95,19 @@ COMMIT;`)) return nil } - assert.NilError(t, WriteUsersInPostgreSQL(ctx, exec, nil, nil)) + cluster := new(v1beta1.PostgresCluster) + assert.NilError(t, WriteUsersInPostgreSQL(ctx, cluster, exec, nil, nil)) assert.Equal(t, calls, 1) - assert.NilError(t, WriteUsersInPostgreSQL(ctx, exec, []v1beta1.PostgresUserSpec{}, nil)) + assert.NilError(t, WriteUsersInPostgreSQL(ctx, cluster, exec, []v1beta1.PostgresUserSpec{}, nil)) assert.Equal(t, calls, 2) - assert.NilError(t, WriteUsersInPostgreSQL(ctx, exec, nil, map[string]string{})) + assert.NilError(t, WriteUsersInPostgreSQL(ctx, cluster, exec, nil, map[string]string{})) assert.Equal(t, calls, 3) }) t.Run("OptionalFields", func(t *testing.T) { + cluster := new(v1beta1.PostgresCluster) calls := 0 exec := func( _ context.Context, stdin io.Reader, _, _ io.Writer, command ...string, @@ -108,14 +119,15 @@ COMMIT;`)) assert.Assert(t, cmp.Contains(string(b), ` \copy input (data) from stdin with (format text) {"databases":["db1"],"options":"","username":"user-no-options","verifier":""} -{"databases":null,"options":"some options here","username":"user-no-databases","verifier":""} +{"databases":null,"options":"CREATEDB CREATEROLE","username":"user-no-databases","verifier":""} {"databases":null,"options":"","username":"user-with-verifier","verifier":"some$verifier"} +{"databases":null,"options":"LOGIN","username":"user-invalid-options","verifier":""} \. `)) return nil } - assert.NilError(t, WriteUsersInPostgreSQL(ctx, exec, + assert.NilError(t, WriteUsersInPostgreSQL(ctx, cluster, exec, []v1beta1.PostgresUserSpec{ { Name: "user-no-options", @@ -123,11 +135,15 @@ COMMIT;`)) }, { Name: "user-no-databases", - Options: "some options here", + Options: "createdb createrole", }, { Name: "user-with-verifier", }, + { + Name: "user-invalid-options", + Options: "login password 'doot' --", + }, }, map[string]string{ "no-user": "ignored", @@ -139,6 +155,7 @@ COMMIT;`)) t.Run("PostgresSuperuser", func(t *testing.T) { calls := 0 + cluster := new(v1beta1.PostgresCluster) exec := func( _ context.Context, stdin io.Reader, _, _ io.Writer, command ...string, ) error { @@ -154,7 +171,7 @@ COMMIT;`)) return nil } - assert.NilError(t, WriteUsersInPostgreSQL(ctx, exec, + assert.NilError(t, WriteUsersInPostgreSQL(ctx, cluster, exec, []v1beta1.PostgresUserSpec{ { Name: "postgres", @@ -169,3 +186,52 @@ COMMIT;`)) assert.Equal(t, calls, 1) }) } + +func TestWriteUsersSchemasInPostgreSQL(t *testing.T) { + ctx := context.Background() + + t.Run("Mixed users", func(t *testing.T) { + calls := 0 + exec := func( + _ context.Context, stdin io.Reader, _, _ io.Writer, command ...string, + ) error { + calls++ + + b, err := io.ReadAll(stdin) + assert.NilError(t, err) + + // The command strings will contain either of two possibilities, depending on the user called. + commands := strings.Join(command, ",") + re := regexp.MustCompile("--set=databases=\\[\"db1\"\\],--set=username=user-single-db|--set=databases=\\[\"db1\",\"db2\"\\],--set=username=user-multi-db") + assert.Assert(t, cmp.Regexp(re, commands)) + + assert.Assert(t, cmp.Contains(string(b), `CREATE SCHEMA IF NOT EXISTS :"username" AUTHORIZATION :"username";`)) + return nil + } + + assert.NilError(t, WriteUsersSchemasInPostgreSQL(ctx, exec, + []v1beta1.PostgresUserSpec{ + { + Name: "user-single-db", + Databases: []v1beta1.PostgresIdentifier{"db1"}, + }, + { + Name: "user-no-databases", + }, + { + Name: "user-multi-dbs", + Databases: []v1beta1.PostgresIdentifier{"db1", "db2"}, + }, + { + Name: "public", + Databases: []v1beta1.PostgresIdentifier{"db3"}, + }, + }, + )) + // The spec.users has four elements, but two will be skipped: + // * the user with the reserved name `public` + // * the user with 0 databases + assert.Equal(t, calls, 2) + }) + +} diff --git a/internal/postgres/wal.md b/internal/postgres/wal.md index bb8097e4d2..afb094c20e 100644 --- a/internal/postgres/wal.md +++ b/internal/postgres/wal.md @@ -1,16 +1,7 @@ PostgreSQL commits transactions by storing changes in its [write-ahead log][WAL]. diff --git a/internal/registration/interface.go b/internal/registration/interface.go new file mode 100644 index 0000000000..578a064e2b --- /dev/null +++ b/internal/registration/interface.go @@ -0,0 +1,67 @@ +// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package registration + +import ( + "fmt" + "os" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +type Registration interface { + // Required returns true when registration is required but the token is missing or invalid. + Required(record.EventRecorder, client.Object, *[]metav1.Condition) bool +} + +var URL = os.Getenv("REGISTRATION_URL") + +func SetAdvanceWarning(recorder record.EventRecorder, object client.Object, conditions *[]metav1.Condition) { + recorder.Eventf(object, corev1.EventTypeWarning, "Register Soon", + "Crunchy Postgres for Kubernetes requires registration for upgrades."+ + " Register now to be ready for your next upgrade. See %s for details.", URL) + + meta.SetStatusCondition(conditions, metav1.Condition{ + Type: v1beta1.Registered, + Status: metav1.ConditionFalse, + Reason: "TokenRequired", + Message: fmt.Sprintf( + "Crunchy Postgres for Kubernetes requires registration for upgrades."+ + " Register now to be ready for your next upgrade. See %s for details.", URL), + ObservedGeneration: object.GetGeneration(), + }) +} + +func SetRequiredWarning(recorder record.EventRecorder, object client.Object, conditions *[]metav1.Condition) { + recorder.Eventf(object, corev1.EventTypeWarning, "Registration Required", + "Crunchy Postgres for Kubernetes requires registration for upgrades."+ + " Register now to be ready for your next upgrade. See %s for details.", URL) + + meta.SetStatusCondition(conditions, metav1.Condition{ + Type: v1beta1.Registered, + Status: metav1.ConditionFalse, + Reason: "TokenRequired", + Message: fmt.Sprintf( + "Crunchy Postgres for Kubernetes requires registration for upgrades."+ + " Upgrade suspended. See %s for details.", URL), + ObservedGeneration: object.GetGeneration(), + }) +} + +func emitFailedWarning(recorder record.EventRecorder, object client.Object) { + recorder.Eventf(object, corev1.EventTypeWarning, "Token Authentication Failed", + "See %s for details.", URL) +} + +func emitVerifiedEvent(recorder record.EventRecorder, object client.Object) { + recorder.Event(object, corev1.EventTypeNormal, "Token Verified", + "Thank you for registering your installation of Crunchy Postgres for Kubernetes.") +} diff --git a/internal/registration/runner.go b/internal/registration/runner.go new file mode 100644 index 0000000000..0d607e1e94 --- /dev/null +++ b/internal/registration/runner.go @@ -0,0 +1,191 @@ +// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package registration + +import ( + "context" + "crypto/rsa" + "errors" + "os" + "strings" + "sync" + "time" + + "github.com/golang-jwt/jwt/v5" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/manager" + + "github.com/crunchydata/postgres-operator/internal/logging" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +// Runner implements [Registration] by loading and validating the token at a +// fixed path. Its methods are safe to call concurrently. +type Runner struct { + changed func() + enabled bool + publicKey *rsa.PublicKey + refresh time.Duration + tokenPath string + + token struct { + sync.RWMutex + Exists bool `json:"-"` + + jwt.RegisteredClaims + Iteration int `json:"itr"` + } +} + +// Runner implements [Registration] and [manager.Runnable]. +var _ Registration = (*Runner)(nil) +var _ manager.Runnable = (*Runner)(nil) + +// NewRunner creates a [Runner] that periodically checks the validity of the +// token at tokenPath. It calls changed when the validity of the token changes. +func NewRunner(publicKey, tokenPath string, changed func()) (*Runner, error) { + runner := &Runner{ + changed: changed, + refresh: time.Minute, + tokenPath: tokenPath, + } + + var err error + switch { + case publicKey != "" && tokenPath != "": + if !strings.HasPrefix(strings.TrimSpace(publicKey), "-") { + publicKey = "-----BEGIN -----\n" + publicKey + "\n-----END -----" + } + + runner.enabled = true + runner.publicKey, err = jwt.ParseRSAPublicKeyFromPEM([]byte(publicKey)) + + case publicKey == "" && tokenPath != "": + err = errors.New("registration: missing public key") + + case publicKey != "" && tokenPath == "": + err = errors.New("registration: missing token path") + } + + return runner, err +} + +// CheckToken loads and verifies the configured token, returning an error when +// the file exists but cannot be verified, and +// returning the token if it can be verified. +// NOTE(upgradecheck): return the token/nil so that we can use the token +// in upgradecheck; currently a refresh of the token will cause a restart of the pod +// meaning that the token used in upgradecheck is always the current token. +// But if the restart behavior changes, we might drop the token return in main.go +// and change upgradecheck to retrieve the token itself +func (r *Runner) CheckToken() (*jwt.Token, error) { + data, errFile := os.ReadFile(r.tokenPath) + key := func(*jwt.Token) (any, error) { return r.publicKey, nil } + + // Assume [jwt] and [os] functions could do something unexpected; use defer + // to safely write to the token. + r.token.Lock() + defer r.token.Unlock() + + token, errToken := jwt.ParseWithClaims(string(data), &r.token, key, + jwt.WithExpirationRequired(), + jwt.WithValidMethods([]string{"RS256"}), + ) + + // The error from [os.ReadFile] indicates whether a token file exists. + r.token.Exists = !os.IsNotExist(errFile) + + // Reset most claims if there is any problem loading, parsing, validating, or + // verifying the token file. + if errFile != nil || errToken != nil { + r.token.RegisteredClaims = jwt.RegisteredClaims{} + } + + switch { + case !r.enabled || !r.token.Exists: + return nil, nil + case errFile != nil: + return nil, errFile + default: + return token, errToken + } +} + +func (r *Runner) state() (failed, required bool) { + // Assume [time] functions could do something unexpected; use defer to safely + // read the token. + r.token.RLock() + defer r.token.RUnlock() + + failed = r.token.Exists && r.token.ExpiresAt == nil + required = r.enabled && + (!r.token.Exists || failed || r.token.ExpiresAt.Before(time.Now())) + return +} + +// Required returns true when registration is required but the token is missing or invalid. +func (r *Runner) Required( + recorder record.EventRecorder, object client.Object, conditions *[]metav1.Condition, +) bool { + failed, required := r.state() + + if r.enabled && failed { + emitFailedWarning(recorder, object) + } + + if !required && conditions != nil { + before := len(*conditions) + meta.RemoveStatusCondition(conditions, v1beta1.Registered) + meta.RemoveStatusCondition(conditions, "RegistrationRequired") + meta.RemoveStatusCondition(conditions, "TokenRequired") + found := len(*conditions) != before + + if r.enabled && found { + emitVerifiedEvent(recorder, object) + } + } + + return required +} + +// NeedLeaderElection returns true so that r runs only on the single +// [manager.Manager] that is elected leader in the Kubernetes namespace. +func (r *Runner) NeedLeaderElection() bool { return true } + +// Start watches for a mounted registration token when enabled. It blocks +// until ctx is cancelled. +func (r *Runner) Start(ctx context.Context) error { + var ticks <-chan time.Time + + if r.enabled { + ticker := time.NewTicker(r.refresh) + defer ticker.Stop() + ticks = ticker.C + } + + log := logging.FromContext(ctx).WithValues("controller", "registration") + + for { + select { + case <-ticks: + _, before := r.state() + if _, err := r.CheckToken(); err != nil { + log.Error(err, "Unable to validate token") + } + if _, after := r.state(); before != after && r.changed != nil { + r.changed() + } + case <-ctx.Done(): + // https://github.com/kubernetes-sigs/controller-runtime/issues/1927 + if errors.Is(ctx.Err(), context.Canceled) { + return nil + } + return ctx.Err() + } + } +} diff --git a/internal/registration/runner_test.go b/internal/registration/runner_test.go new file mode 100644 index 0000000000..8e75848986 --- /dev/null +++ b/internal/registration/runner_test.go @@ -0,0 +1,574 @@ +// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package registration + +import ( + "context" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "os" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/golang-jwt/jwt/v5" + "gotest.tools/v3/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/manager" + + "github.com/crunchydata/postgres-operator/internal/testing/events" +) + +func TestNewRunner(t *testing.T) { + t.Parallel() + + key, err := rsa.GenerateKey(rand.Reader, 2048) + assert.NilError(t, err) + + der, err := x509.MarshalPKIXPublicKey(&key.PublicKey) + assert.NilError(t, err) + + public := pem.EncodeToMemory(&pem.Block{Bytes: der}) + assert.Assert(t, len(public) != 0) + + t.Run("Disabled", func(t *testing.T) { + runner, err := NewRunner("", "", nil) + assert.NilError(t, err) + assert.Assert(t, runner != nil) + assert.Assert(t, !runner.enabled) + }) + + t.Run("ConfiguredCorrectly", func(t *testing.T) { + runner, err := NewRunner(string(public), "any", nil) + assert.NilError(t, err) + assert.Assert(t, runner != nil) + assert.Assert(t, runner.enabled) + + t.Run("ExtraLines", func(t *testing.T) { + input := "\n\n" + strings.ReplaceAll(string(public), "\n", "\n\n") + "\n\n" + + runner, err := NewRunner(input, "any", nil) + assert.NilError(t, err) + assert.Assert(t, runner != nil) + assert.Assert(t, runner.enabled) + }) + + t.Run("WithoutPEMBoundaries", func(t *testing.T) { + lines := strings.Split(strings.TrimSpace(string(public)), "\n") + lines = lines[1 : len(lines)-1] + + for _, input := range []string{ + strings.Join(lines, ""), // single line + strings.Join(lines, "\n"), // multi-line + "\n\n" + strings.Join(lines, "\n\n") + "\n\n", // extra lines + } { + runner, err := NewRunner(input, "any", nil) + assert.NilError(t, err) + assert.Assert(t, runner != nil) + assert.Assert(t, runner.enabled) + } + }) + }) + + t.Run("ConfiguredIncorrectly", func(t *testing.T) { + for _, tt := range []struct { + key, path, msg string + }{ + {msg: "public key", key: "", path: "any"}, + {msg: "token path", key: "bad", path: ""}, + {msg: "invalid key", key: "bad", path: "any"}, + {msg: "token path", key: string(public), path: ""}, + } { + _, err := NewRunner(tt.key, tt.path, nil) + assert.ErrorContains(t, err, tt.msg, "(key=%q, path=%q)", tt.key, tt.path) + } + }) +} + +func TestRunnerCheckToken(t *testing.T) { + t.Parallel() + + dir := t.TempDir() + key, err := rsa.GenerateKey(rand.Reader, 2048) + assert.NilError(t, err) + + t.Run("SafeToCallDisabled", func(t *testing.T) { + r := Runner{enabled: false} + _, err := r.CheckToken() + assert.NilError(t, err) + }) + + t.Run("FileMissing", func(t *testing.T) { + r := Runner{enabled: true, tokenPath: filepath.Join(dir, "nope")} + _, err := r.CheckToken() + assert.NilError(t, err) + }) + + t.Run("FileUnreadable", func(t *testing.T) { + r := Runner{enabled: true, tokenPath: filepath.Join(dir, "nope")} + assert.NilError(t, os.WriteFile(r.tokenPath, nil, 0o200)) // Writeable + + _, err := r.CheckToken() + assert.ErrorContains(t, err, "permission") + assert.Assert(t, r.token.ExpiresAt == nil) + }) + + t.Run("FileEmpty", func(t *testing.T) { + r := Runner{enabled: true, tokenPath: filepath.Join(dir, "empty")} + assert.NilError(t, os.WriteFile(r.tokenPath, nil, 0o400)) // Readable + + _, err := r.CheckToken() + assert.ErrorContains(t, err, "malformed") + assert.Assert(t, r.token.ExpiresAt == nil) + }) + + t.Run("WrongAlgorithm", func(t *testing.T) { + r := Runner{ + enabled: true, + publicKey: &key.PublicKey, + tokenPath: filepath.Join(dir, "hs256"), + } + + // Maliciously treating an RSA public key as an HMAC secret. + // - https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/ + public, err := x509.MarshalPKIXPublicKey(r.publicKey) + assert.NilError(t, err) + data, err := jwt.New(jwt.SigningMethodHS256).SignedString(public) + assert.NilError(t, err) + assert.NilError(t, os.WriteFile(r.tokenPath, []byte(data), 0o400)) // Readable + + _, err = r.CheckToken() + assert.Assert(t, err != nil, "HMAC algorithm should be rejected") + assert.Assert(t, r.token.ExpiresAt == nil) + }) + + t.Run("MissingExpiration", func(t *testing.T) { + r := Runner{ + enabled: true, + publicKey: &key.PublicKey, + tokenPath: filepath.Join(dir, "no-claims"), + } + + data, err := jwt.New(jwt.SigningMethodRS256).SignedString(key) + assert.NilError(t, err) + assert.NilError(t, os.WriteFile(r.tokenPath, []byte(data), 0o400)) // Readable + + _, err = r.CheckToken() + assert.ErrorContains(t, err, "exp claim is required") + assert.Assert(t, r.token.ExpiresAt == nil) + }) + + t.Run("ExpiredToken", func(t *testing.T) { + r := Runner{ + enabled: true, + publicKey: &key.PublicKey, + tokenPath: filepath.Join(dir, "expired"), + } + + data, err := jwt.NewWithClaims(jwt.SigningMethodRS256, jwt.MapClaims{ + "exp": jwt.NewNumericDate(time.Date(2020, 1, 1, 1, 1, 1, 1, time.UTC)), + }).SignedString(key) + assert.NilError(t, err) + assert.NilError(t, os.WriteFile(r.tokenPath, []byte(data), 0o400)) // Readable + + _, err = r.CheckToken() + assert.ErrorContains(t, err, "is expired") + assert.Assert(t, r.token.ExpiresAt == nil) + }) + + t.Run("ValidToken", func(t *testing.T) { + r := Runner{ + enabled: true, + publicKey: &key.PublicKey, + tokenPath: filepath.Join(dir, "valid"), + } + + expiration := jwt.NewNumericDate(time.Now().Add(time.Hour)) + data, err := jwt.NewWithClaims(jwt.SigningMethodRS256, jwt.MapClaims{ + "exp": expiration, + }).SignedString(key) + assert.NilError(t, err) + assert.NilError(t, os.WriteFile(r.tokenPath, []byte(data), 0o400)) // Readable + + token, err := r.CheckToken() + assert.NilError(t, err) + assert.Assert(t, r.token.ExpiresAt != nil) + assert.Assert(t, token.Valid) + exp, err := token.Claims.GetExpirationTime() + assert.NilError(t, err) + assert.Equal(t, exp.Time, expiration.Time) + }) +} + +func TestRunnerLeaderElectionRunnable(t *testing.T) { + var runner manager.LeaderElectionRunnable = &Runner{} + + assert.Assert(t, runner.NeedLeaderElection()) +} + +func TestRunnerRequiredConditions(t *testing.T) { + t.Parallel() + + t.Run("RegistrationDisabled", func(t *testing.T) { + r := Runner{enabled: false} + + for _, tt := range []struct { + before, after []metav1.Condition + }{ + { + before: []metav1.Condition{}, + after: []metav1.Condition{}, + }, + { + before: []metav1.Condition{{Type: "ExistingOther"}}, + after: []metav1.Condition{{Type: "ExistingOther"}}, + }, + { + before: []metav1.Condition{{Type: "Registered"}, {Type: "ExistingOther"}}, + after: []metav1.Condition{{Type: "ExistingOther"}}, + }, + { + before: []metav1.Condition{ + {Type: "Registered"}, + {Type: "ExistingOther"}, + {Type: "RegistrationRequired"}, + }, + after: []metav1.Condition{{Type: "ExistingOther"}}, + }, + { + before: []metav1.Condition{{Type: "TokenRequired"}}, + after: []metav1.Condition{}, + }, + } { + for _, exists := range []bool{false, true} { + for _, expires := range []time.Time{ + time.Now().Add(time.Hour), + time.Now().Add(-time.Hour), + } { + r.token.Exists = exists + r.token.ExpiresAt = jwt.NewNumericDate(expires) + + conditions := append([]metav1.Condition{}, tt.before...) + discard := new(events.Recorder) + object := &corev1.ConfigMap{} + + result := r.Required(discard, object, &conditions) + + assert.Equal(t, result, false, "expected registration not required") + assert.DeepEqual(t, conditions, tt.after) + } + } + } + }) + + t.Run("RegistrationRequired", func(t *testing.T) { + r := Runner{enabled: true} + + for _, tt := range []struct { + exists bool + expires time.Time + before []metav1.Condition + }{ + { + exists: false, expires: time.Now().Add(time.Hour), + before: []metav1.Condition{{Type: "Registered"}, {Type: "ExistingOther"}}, + }, + { + exists: false, expires: time.Now().Add(-time.Hour), + before: []metav1.Condition{{Type: "Registered"}, {Type: "ExistingOther"}}, + }, + { + exists: true, expires: time.Now().Add(-time.Hour), + before: []metav1.Condition{{Type: "Registered"}, {Type: "ExistingOther"}}, + }, + } { + r.token.Exists = tt.exists + r.token.ExpiresAt = jwt.NewNumericDate(tt.expires) + + conditions := append([]metav1.Condition{}, tt.before...) + discard := new(events.Recorder) + object := &corev1.ConfigMap{} + + result := r.Required(discard, object, &conditions) + + assert.Equal(t, result, true, "expected registration required") + assert.DeepEqual(t, conditions, tt.before) + } + }) + + t.Run("Registered", func(t *testing.T) { + r := Runner{} + r.token.Exists = true + r.token.ExpiresAt = jwt.NewNumericDate(time.Now().Add(time.Hour)) + + for _, tt := range []struct { + before, after []metav1.Condition + }{ + { + before: []metav1.Condition{}, + after: []metav1.Condition{}, + }, + { + before: []metav1.Condition{{Type: "ExistingOther"}}, + after: []metav1.Condition{{Type: "ExistingOther"}}, + }, + { + before: []metav1.Condition{{Type: "Registered"}, {Type: "ExistingOther"}}, + after: []metav1.Condition{{Type: "ExistingOther"}}, + }, + { + before: []metav1.Condition{ + {Type: "Registered"}, + {Type: "ExistingOther"}, + {Type: "RegistrationRequired"}, + }, + after: []metav1.Condition{{Type: "ExistingOther"}}, + }, + { + before: []metav1.Condition{{Type: "TokenRequired"}}, + after: []metav1.Condition{}, + }, + } { + for _, enabled := range []bool{false, true} { + r.enabled = enabled + + conditions := append([]metav1.Condition{}, tt.before...) + discard := new(events.Recorder) + object := &corev1.ConfigMap{} + + result := r.Required(discard, object, &conditions) + + assert.Equal(t, result, false, "expected registration not required") + assert.DeepEqual(t, conditions, tt.after) + } + } + }) +} + +func TestRunnerRequiredEvents(t *testing.T) { + t.Parallel() + + t.Run("RegistrationDisabled", func(t *testing.T) { + r := Runner{enabled: false} + + for _, tt := range []struct { + before []metav1.Condition + }{ + { + before: []metav1.Condition{}, + }, + { + before: []metav1.Condition{{Type: "ExistingOther"}}, + }, + { + before: []metav1.Condition{{Type: "Registered"}, {Type: "ExistingOther"}}, + }, + } { + for _, exists := range []bool{false, true} { + for _, expires := range []time.Time{ + time.Now().Add(time.Hour), + time.Now().Add(-time.Hour), + } { + r.token.Exists = exists + r.token.ExpiresAt = jwt.NewNumericDate(expires) + + conditions := append([]metav1.Condition{}, tt.before...) + object := &corev1.ConfigMap{} + recorder := events.NewRecorder(t, scheme.Scheme) + + result := r.Required(recorder, object, &conditions) + + assert.Equal(t, result, false, "expected registration not required") + assert.Equal(t, len(recorder.Events), 0, "expected no events") + } + } + } + }) + + t.Run("RegistrationRequired", func(t *testing.T) { + r := Runner{enabled: true} + + t.Run("MissingToken", func(t *testing.T) { + r.token.Exists = false + + for _, tt := range []struct { + before []metav1.Condition + }{ + { + before: []metav1.Condition{}, + }, + { + before: []metav1.Condition{{Type: "ExistingOther"}}, + }, + { + before: []metav1.Condition{{Type: "Registered"}, {Type: "ExistingOther"}}, + }, + } { + conditions := append([]metav1.Condition{}, tt.before...) + object := &corev1.ConfigMap{} + recorder := events.NewRecorder(t, scheme.Scheme) + + result := r.Required(recorder, object, &conditions) + + assert.Equal(t, result, true, "expected registration required") + assert.Equal(t, len(recorder.Events), 0, "expected no events") + } + }) + + t.Run("InvalidToken", func(t *testing.T) { + r.token.Exists = true + r.token.ExpiresAt = nil + + for _, tt := range []struct { + before []metav1.Condition + }{ + { + before: []metav1.Condition{}, + }, + { + before: []metav1.Condition{{Type: "ExistingOther"}}, + }, + { + before: []metav1.Condition{{Type: "Registered"}, {Type: "ExistingOther"}}, + }, + } { + conditions := append([]metav1.Condition{}, tt.before...) + object := &corev1.ConfigMap{} + recorder := events.NewRecorder(t, scheme.Scheme) + + result := r.Required(recorder, object, &conditions) + + assert.Equal(t, result, true, "expected registration required") + assert.Equal(t, len(recorder.Events), 1, "expected one event") + assert.Equal(t, recorder.Events[0].Type, "Warning") + assert.Equal(t, recorder.Events[0].Reason, "Token Authentication Failed") + } + }) + }) + + t.Run("Registered", func(t *testing.T) { + r := Runner{} + r.token.Exists = true + r.token.ExpiresAt = jwt.NewNumericDate(time.Now().Add(time.Hour)) + + t.Run("AlwaysRegistered", func(t *testing.T) { + // No prior registration conditions + for _, tt := range []struct { + before []metav1.Condition + }{ + { + before: []metav1.Condition{}, + }, + { + before: []metav1.Condition{{Type: "ExistingOther"}}, + }, + } { + for _, enabled := range []bool{false, true} { + r.enabled = enabled + + conditions := append([]metav1.Condition{}, tt.before...) + object := &corev1.ConfigMap{} + recorder := events.NewRecorder(t, scheme.Scheme) + + result := r.Required(recorder, object, &conditions) + + assert.Equal(t, result, false, "expected registration not required") + assert.Equal(t, len(recorder.Events), 0, "expected no events") + } + } + }) + + t.Run("PreviouslyUnregistered", func(t *testing.T) { + r.enabled = true + + // One or more prior registration conditions + for _, tt := range []struct { + before []metav1.Condition + }{ + { + before: []metav1.Condition{{Type: "Registered"}, {Type: "ExistingOther"}}, + }, + { + before: []metav1.Condition{ + {Type: "Registered"}, + {Type: "ExistingOther"}, + {Type: "RegistrationRequired"}, + }, + }, + { + before: []metav1.Condition{{Type: "TokenRequired"}}, + }, + } { + conditions := append([]metav1.Condition{}, tt.before...) + object := &corev1.ConfigMap{} + recorder := events.NewRecorder(t, scheme.Scheme) + + result := r.Required(recorder, object, &conditions) + + assert.Equal(t, result, false, "expected registration not required") + assert.Equal(t, len(recorder.Events), 1, "expected one event") + assert.Equal(t, recorder.Events[0].Type, "Normal") + assert.Equal(t, recorder.Events[0].Reason, "Token Verified") + } + }) + }) +} + +func TestRunnerStart(t *testing.T) { + t.Parallel() + + dir := t.TempDir() + key, err := rsa.GenerateKey(rand.Reader, 2048) + assert.NilError(t, err) + + token, err := jwt.NewWithClaims(jwt.SigningMethodRS256, jwt.MapClaims{ + "exp": jwt.NewNumericDate(time.Now().Add(time.Hour)), + }).SignedString(key) + assert.NilError(t, err) + + t.Run("DisabledDoesNothing", func(t *testing.T) { + runner := &Runner{ + enabled: false, + refresh: time.Nanosecond, + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) + defer cancel() + + assert.ErrorIs(t, runner.Start(ctx), context.DeadlineExceeded, + "expected it to block until context is canceled") + }) + + t.Run("WithCallback", func(t *testing.T) { + called := false + runner := &Runner{ + changed: func() { called = true }, + enabled: true, + publicKey: &key.PublicKey, + refresh: time.Second, + tokenPath: filepath.Join(dir, "token"), + } + + // Begin with an invalid token. + assert.NilError(t, os.WriteFile(runner.tokenPath, nil, 0o600)) + _, err = runner.CheckToken() + assert.Assert(t, err != nil) + + // Replace it with a valid token. + assert.NilError(t, os.WriteFile(runner.tokenPath, []byte(token), 0o600)) + + // Run with a timeout that exceeds the refresh interval. + ctx, cancel := context.WithTimeout(context.Background(), runner.refresh*3/2) + defer cancel() + + assert.ErrorIs(t, runner.Start(ctx), context.DeadlineExceeded) + assert.Assert(t, called, "expected a call back") + }) +} diff --git a/internal/registration/testing.go b/internal/registration/testing.go new file mode 100644 index 0000000000..1418f6d2d3 --- /dev/null +++ b/internal/registration/testing.go @@ -0,0 +1,21 @@ +// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package registration + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// NOTE: This type can go away following https://go.dev/issue/47487. + +type RegistrationFunc func(record.EventRecorder, client.Object, *[]metav1.Condition) bool + +func (fn RegistrationFunc) Required(rec record.EventRecorder, obj client.Object, conds *[]metav1.Condition) bool { + return fn(rec, obj, conds) +} + +var _ Registration = RegistrationFunc(nil) diff --git a/internal/testing/cmp/cmp.go b/internal/testing/cmp/cmp.go index 3a723794db..265a598064 100644 --- a/internal/testing/cmp/cmp.go +++ b/internal/testing/cmp/cmp.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package cmp diff --git a/internal/testing/events/recorder.go b/internal/testing/events/recorder.go index 610141f570..23c03a4c40 100644 --- a/internal/testing/events/recorder.go +++ b/internal/testing/events/recorder.go @@ -1,17 +1,6 @@ -/* - Copyright 2022 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2022 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package events diff --git a/internal/testing/require/exec.go b/internal/testing/require/exec.go index ebf817d54a..c182e84996 100644 --- a/internal/testing/require/exec.go +++ b/internal/testing/require/exec.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package require diff --git a/internal/testing/require/kubernetes.go b/internal/testing/require/kubernetes.go new file mode 100644 index 0000000000..df21bca058 --- /dev/null +++ b/internal/testing/require/kubernetes.go @@ -0,0 +1,167 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package require + +import ( + "context" + "os" + "path/filepath" + goruntime "runtime" + "strings" + "sync" + "testing" + + "gotest.tools/v3/assert" + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + + "github.com/crunchydata/postgres-operator/internal/controller/runtime" +) + +// https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/envtest#pkg-constants +var envtestVarsSet = os.Getenv("KUBEBUILDER_ASSETS") != "" || + strings.EqualFold(os.Getenv("USE_EXISTING_CLUSTER"), "true") + +// EnvTest returns an unstarted Environment with crds. It calls t.Skip when +// the "KUBEBUILDER_ASSETS" and "USE_EXISTING_CLUSTER" environment variables +// are unset. +func EnvTest(t testing.TB, crds envtest.CRDInstallOptions) *envtest.Environment { + t.Helper() + + if !envtestVarsSet { + t.SkipNow() + } + + return &envtest.Environment{ + CRDInstallOptions: crds, + Scheme: crds.Scheme, + } +} + +var kubernetes struct { + sync.Mutex + + // Count references to the started Environment. + count int + env *envtest.Environment +} + +// Kubernetes starts or connects to a Kubernetes API and returns a client that uses it. +// When starting a local API, the client is a member of the "system:masters" group. +// +// It calls t.Fatal when something fails. It stops the local API using t.Cleanup. +// It calls t.Skip when the "KUBEBUILDER_ASSETS" and "USE_EXISTING_CLUSTER" environment +// variables are unset. +// +// Tests that call t.Parallel might share the same local API. Call t.Parallel after this +// function to ensure they share. +func Kubernetes(t testing.TB) client.Client { + t.Helper() + _, cc := kubernetes3(t) + return cc +} + +// Kubernetes2 is the same as [Kubernetes] but also returns a copy of the client +// configuration. +func Kubernetes2(t testing.TB) (*rest.Config, client.Client) { + t.Helper() + env, cc := kubernetes3(t) + return rest.CopyConfig(env.Config), cc +} + +func kubernetes3(t testing.TB) (*envtest.Environment, client.Client) { + t.Helper() + + if !envtestVarsSet { + t.SkipNow() + } + + frames := func() *goruntime.Frames { + var pcs [5]uintptr + n := goruntime.Callers(2, pcs[:]) + return goruntime.CallersFrames(pcs[0:n]) + }() + + // Calculate the project directory as reported by [goruntime.CallersFrames]. + frame, ok := frames.Next() + self := frame.File + root := strings.TrimSuffix(self, + filepath.Join("internal", "testing", "require", "kubernetes.go")) + + // Find the first caller that is not in this file. + for ok && frame.File == self { + frame, ok = frames.Next() + } + caller := frame.File + + // Calculate the project directory path relative to the caller. + base, err := filepath.Rel(filepath.Dir(caller), root) + assert.NilError(t, err) + + kubernetes.Lock() + defer kubernetes.Unlock() + + if kubernetes.env == nil { + env := EnvTest(t, envtest.CRDInstallOptions{ + ErrorIfPathMissing: true, + Paths: []string{ + filepath.Join(base, "config", "crd", "bases"), + filepath.Join(base, "hack", "tools", "external-snapshotter", "client", "config", "crd"), + }, + Scheme: runtime.Scheme, + }) + + _, err := env.Start() + assert.NilError(t, err) + + kubernetes.env = env + } + + kubernetes.count++ + + t.Cleanup(func() { + kubernetes.Lock() + defer kubernetes.Unlock() + + kubernetes.count-- + + if kubernetes.count == 0 { + assert.Check(t, kubernetes.env.Stop()) + kubernetes.env = nil + } + }) + + cc, err := client.New(kubernetes.env.Config, client.Options{ + Scheme: kubernetes.env.Scheme, + }) + assert.NilError(t, err) + + return kubernetes.env, cc +} + +// Namespace creates a random namespace that is deleted by t.Cleanup. It calls +// t.Fatal when creation fails. The caller may delete the namespace at any time. +func Namespace(t testing.TB, cc client.Client) *corev1.Namespace { + t.Helper() + + // Remove / that shows up when running a sub-test + // TestSomeThing/test_some_specific_thing + name, _, _ := strings.Cut(t.Name(), "/") + + ns := &corev1.Namespace{} + ns.GenerateName = "postgres-operator-test-" + ns.Labels = map[string]string{"postgres-operator-test": name} + + ctx := context.Background() + assert.NilError(t, cc.Create(ctx, ns)) + + t.Cleanup(func() { + assert.Check(t, client.IgnoreNotFound(cc.Delete(ctx, ns))) + }) + + return ns +} diff --git a/internal/testing/require/parallel.go b/internal/testing/require/parallel.go index c164eb08c8..4fbdf42284 100644 --- a/internal/testing/require/parallel.go +++ b/internal/testing/require/parallel.go @@ -1,17 +1,6 @@ -/* - Copyright 2022 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2022 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package require diff --git a/internal/testing/invalid_token b/internal/testing/token_invalid similarity index 100% rename from internal/testing/invalid_token rename to internal/testing/token_invalid diff --git a/cpk_rsa_key.pub b/internal/testing/token_rsa_key.pub similarity index 100% rename from cpk_rsa_key.pub rename to internal/testing/token_rsa_key.pub diff --git a/internal/testing/cpk_token b/internal/testing/token_valid similarity index 100% rename from internal/testing/cpk_token rename to internal/testing/token_valid diff --git a/internal/testing/validation/postgrescluster_test.go b/internal/testing/validation/postgrescluster_test.go new file mode 100644 index 0000000000..e71ff22b2e --- /dev/null +++ b/internal/testing/validation/postgrescluster_test.go @@ -0,0 +1,125 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package validation + +import ( + "context" + "fmt" + "testing" + + "gotest.tools/v3/assert" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/yaml" + + "github.com/crunchydata/postgres-operator/internal/testing/cmp" + "github.com/crunchydata/postgres-operator/internal/testing/require" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +func TestPostgresUserOptions(t *testing.T) { + ctx := context.Background() + cc := require.Kubernetes(t) + t.Parallel() + + namespace := require.Namespace(t, cc) + base := v1beta1.NewPostgresCluster() + + // Start with a bunch of required fields. + assert.NilError(t, yaml.Unmarshal([]byte(`{ + postgresVersion: 16, + backups: { + pgbackrest: { + repos: [{ name: repo1 }], + }, + }, + instances: [{ + dataVolumeClaimSpec: { + accessModes: [ReadWriteOnce], + resources: { requests: { storage: 1Mi } }, + }, + }], + }`), &base.Spec)) + + base.Namespace = namespace.Name + base.Name = "postgres-user-options" + + assert.NilError(t, cc.Create(ctx, base.DeepCopy(), client.DryRunAll), + "expected this base cluster to be valid") + + // See [internal/controller/postgrescluster.TestValidatePostgresUsers] + + t.Run("NoComments", func(t *testing.T) { + cluster := base.DeepCopy() + cluster.Spec.Users = []v1beta1.PostgresUserSpec{ + {Name: "dashes", Options: "ANY -- comment"}, + {Name: "block-open", Options: "/* asdf"}, + {Name: "block-close", Options: " qw */ rt"}, + } + + err := cc.Create(ctx, cluster, client.DryRunAll) + assert.Assert(t, apierrors.IsInvalid(err)) + assert.ErrorContains(t, err, "cannot contain comments") + + //nolint:errorlint // This is a test, and a panic is unlikely. + status := err.(apierrors.APIStatus).Status() + assert.Assert(t, status.Details != nil) + assert.Equal(t, len(status.Details.Causes), 3) + + for i, cause := range status.Details.Causes { + assert.Equal(t, cause.Field, fmt.Sprintf("spec.users[%d].options", i)) + assert.Assert(t, cmp.Contains(cause.Message, "cannot contain comments")) + } + }) + + t.Run("NoPassword", func(t *testing.T) { + cluster := base.DeepCopy() + cluster.Spec.Users = []v1beta1.PostgresUserSpec{ + {Name: "uppercase", Options: "SUPERUSER PASSWORD ''"}, + {Name: "lowercase", Options: "password 'asdf'"}, + } + + err := cc.Create(ctx, cluster, client.DryRunAll) + assert.Assert(t, apierrors.IsInvalid(err)) + assert.ErrorContains(t, err, "cannot assign password") + + //nolint:errorlint // This is a test, and a panic is unlikely. + status := err.(apierrors.APIStatus).Status() + assert.Assert(t, status.Details != nil) + assert.Equal(t, len(status.Details.Causes), 2) + + for i, cause := range status.Details.Causes { + assert.Equal(t, cause.Field, fmt.Sprintf("spec.users[%d].options", i)) + assert.Assert(t, cmp.Contains(cause.Message, "cannot assign password")) + } + }) + + t.Run("NoTerminators", func(t *testing.T) { + cluster := base.DeepCopy() + cluster.Spec.Users = []v1beta1.PostgresUserSpec{ + {Name: "semicolon", Options: "some ;where"}, + } + + err := cc.Create(ctx, cluster, client.DryRunAll) + assert.Assert(t, apierrors.IsInvalid(err)) + assert.ErrorContains(t, err, "should match") + + //nolint:errorlint // This is a test, and a panic is unlikely. + status := err.(apierrors.APIStatus).Status() + assert.Assert(t, status.Details != nil) + assert.Equal(t, len(status.Details.Causes), 1) + assert.Equal(t, status.Details.Causes[0].Field, "spec.users[0].options") + }) + + t.Run("Valid", func(t *testing.T) { + cluster := base.DeepCopy() + cluster.Spec.Users = []v1beta1.PostgresUserSpec{ + {Name: "normal", Options: "CREATEDB valid until '2006-01-02'"}, + {Name: "very-full", Options: "NOSUPERUSER NOCREATEDB NOCREATEROLE NOINHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT 5"}, + } + + assert.NilError(t, cc.Create(ctx, cluster, client.DryRunAll)) + }) +} diff --git a/internal/upgradecheck/header.go b/internal/upgradecheck/header.go index f6f43916f8..a1d56ef442 100644 --- a/internal/upgradecheck/header.go +++ b/internal/upgradecheck/header.go @@ -1,24 +1,14 @@ -package upgradecheck - -/* - Copyright 2017 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +// Copyright 2017 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +package upgradecheck import ( "context" "encoding/json" "net/http" + "os" googleuuid "github.com/google/uuid" corev1 "k8s.io/api/core/v1" @@ -28,6 +18,8 @@ import ( "k8s.io/client-go/rest" crclient "sigs.k8s.io/controller-runtime/pkg/client" + "github.com/crunchydata/postgres-operator/internal/controller/postgrescluster" + "github.com/crunchydata/postgres-operator/internal/feature" "github.com/crunchydata/postgres-operator/internal/logging" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" @@ -44,24 +36,36 @@ var ( // Extensible struct for client upgrade data type clientUpgradeData struct { - DeploymentID string `json:"deployment_id"` - KubernetesEnv string `json:"kubernetes_env"` - PGOClustersTotal int `json:"pgo_clusters_total"` - PGOVersion string `json:"pgo_version"` - IsOpenShift bool `json:"is_open_shift"` + BridgeClustersTotal int `json:"bridge_clusters_total"` + BuildSource string `json:"build_source"` + DeploymentID string `json:"deployment_id"` + FeatureGatesEnabled string `json:"feature_gates_enabled"` + IsOpenShift bool `json:"is_open_shift"` + KubernetesEnv string `json:"kubernetes_env"` + PGOClustersTotal int `json:"pgo_clusters_total"` + PGOInstaller string `json:"pgo_installer"` + PGOInstallerOrigin string `json:"pgo_installer_origin"` + PGOVersion string `json:"pgo_version"` + RegistrationToken string `json:"registration_token"` } // generateHeader aggregates data and returns a struct of that data // If any errors are encountered, it logs those errors and uses the default values func generateHeader(ctx context.Context, cfg *rest.Config, crClient crclient.Client, - pgoVersion string, isOpenShift bool) *clientUpgradeData { + pgoVersion string, isOpenShift bool, registrationToken string) *clientUpgradeData { return &clientUpgradeData{ - PGOVersion: pgoVersion, - IsOpenShift: isOpenShift, - DeploymentID: ensureDeploymentID(ctx, crClient), - PGOClustersTotal: getManagedClusters(ctx, crClient), - KubernetesEnv: getServerVersion(ctx, cfg), + BridgeClustersTotal: getBridgeClusters(ctx, crClient), + BuildSource: os.Getenv("BUILD_SOURCE"), + DeploymentID: ensureDeploymentID(ctx, crClient), + FeatureGatesEnabled: feature.ShowGates(ctx), + IsOpenShift: isOpenShift, + KubernetesEnv: getServerVersion(ctx, cfg), + PGOClustersTotal: getManagedClusters(ctx, crClient), + PGOInstaller: os.Getenv("PGO_INSTALLER"), + PGOInstallerOrigin: os.Getenv("PGO_INSTALLER_ORIGIN"), + PGOVersion: pgoVersion, + RegistrationToken: registrationToken, } } @@ -125,7 +129,7 @@ func manageUpgradeCheckConfigMap(ctx context.Context, crClient crclient.Client, } } - err = applyConfigMap(ctx, crClient, cm, currentID) + err = applyConfigMap(ctx, crClient, cm, postgrescluster.ControllerName) if err != nil { log.V(1).Info("upgrade check issue: could not apply configmap", "response", err.Error()) @@ -169,6 +173,22 @@ func getManagedClusters(ctx context.Context, crClient crclient.Client) int { return count } +// getBridgeClusters returns a count of Bridge clusters managed by this PGO instance +// Any errors encountered will be logged and the count result will be 0 +func getBridgeClusters(ctx context.Context, crClient crclient.Client) int { + var count int + clusters := &v1beta1.CrunchyBridgeClusterList{} + err := crClient.List(ctx, clusters) + if err != nil { + log := logging.FromContext(ctx) + log.V(1).Info("upgrade check issue: could not count bridge clusters", + "response", err.Error()) + } else { + count = len(clusters.Items) + } + return count +} + // getServerVersion returns the stringified server version (i.e., the same info `kubectl version` // returns for the server) // Any errors encountered will be logged and will return an empty string diff --git a/internal/upgradecheck/header_test.go b/internal/upgradecheck/header_test.go index d8630865b8..c144e7629b 100644 --- a/internal/upgradecheck/header_test.go +++ b/internal/upgradecheck/header_test.go @@ -1,28 +1,14 @@ -//go:build envtest -// +build envtest +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package upgradecheck -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - import ( "context" "encoding/json" "net/http" - "path/filepath" + "strings" "testing" "gotest.tools/v3/assert" @@ -33,31 +19,19 @@ import ( // Google Kubernetes Engine / Google Cloud Platform authentication provider _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" "k8s.io/client-go/rest" - crclient "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/envtest" "github.com/crunchydata/postgres-operator/internal/controller/postgrescluster" - "github.com/crunchydata/postgres-operator/internal/controller/runtime" + "github.com/crunchydata/postgres-operator/internal/feature" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/internal/testing/cmp" + "github.com/crunchydata/postgres-operator/internal/testing/require" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) func TestGenerateHeader(t *testing.T) { setupDeploymentID(t) ctx := context.Background() - env := &envtest.Environment{ - CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")}, - } - cfg, err := env.Start() - assert.NilError(t, err) - t.Cleanup(func() { assert.Check(t, env.Stop()) }) - - pgoScheme, err := runtime.CreatePostgresOperatorScheme() - assert.NilError(t, err) - cc, err := crclient.New(cfg, crclient.Options{Scheme: pgoScheme}) - assert.NilError(t, err) - + cfg, cc := require.Kubernetes2(t) setupNamespace(t, cc) dc, err := discovery.NewDiscoveryClientForConfig(cfg) @@ -67,6 +41,10 @@ func TestGenerateHeader(t *testing.T) { reconciler := postgrescluster.Reconciler{Client: cc} + t.Setenv("PGO_INSTALLER", "test") + t.Setenv("PGO_INSTALLER_ORIGIN", "test-origin") + t.Setenv("BUILD_SOURCE", "developer") + t.Run("error ensuring ID", func(t *testing.T) { fakeClientWithOptionalError := &fakeClientWithError{ cc, "patch error", @@ -74,7 +52,7 @@ func TestGenerateHeader(t *testing.T) { ctx, calls := setupLogCapture(ctx) res := generateHeader(ctx, cfg, fakeClientWithOptionalError, - "1.2.3", reconciler.IsOpenShift) + "1.2.3", reconciler.IsOpenShift, "") assert.Equal(t, len(*calls), 1) assert.Assert(t, cmp.Contains((*calls)[0], `upgrade check issue: could not apply configmap`)) assert.Equal(t, res.IsOpenShift, reconciler.IsOpenShift) @@ -83,8 +61,15 @@ func TestGenerateHeader(t *testing.T) { err := cc.List(ctx, &pgoList) assert.NilError(t, err) assert.Equal(t, len(pgoList.Items), res.PGOClustersTotal) + bridgeList := v1beta1.CrunchyBridgeClusterList{} + err = cc.List(ctx, &bridgeList) + assert.NilError(t, err) + assert.Equal(t, len(bridgeList.Items), res.BridgeClustersTotal) assert.Equal(t, "1.2.3", res.PGOVersion) assert.Equal(t, server.String(), res.KubernetesEnv) + assert.Equal(t, "test", res.PGOInstaller) + assert.Equal(t, "test-origin", res.PGOInstallerOrigin) + assert.Equal(t, "developer", res.BuildSource) }) t.Run("error getting cluster count", func(t *testing.T) { @@ -94,14 +79,21 @@ func TestGenerateHeader(t *testing.T) { ctx, calls := setupLogCapture(ctx) res := generateHeader(ctx, cfg, fakeClientWithOptionalError, - "1.2.3", reconciler.IsOpenShift) - assert.Equal(t, len(*calls), 1) - assert.Assert(t, cmp.Contains((*calls)[0], `upgrade check issue: could not count postgres clusters`)) + "1.2.3", reconciler.IsOpenShift, "") + assert.Equal(t, len(*calls), 2) + // Aggregating the logs since we cannot determine which call will be first + callsAggregate := strings.Join(*calls, " ") + assert.Assert(t, cmp.Contains(callsAggregate, `upgrade check issue: could not count postgres clusters`)) + assert.Assert(t, cmp.Contains(callsAggregate, `upgrade check issue: could not count bridge clusters`)) assert.Equal(t, res.IsOpenShift, reconciler.IsOpenShift) assert.Equal(t, deploymentID, res.DeploymentID) assert.Equal(t, 0, res.PGOClustersTotal) + assert.Equal(t, 0, res.BridgeClustersTotal) assert.Equal(t, "1.2.3", res.PGOVersion) assert.Equal(t, server.String(), res.KubernetesEnv) + assert.Equal(t, "test", res.PGOInstaller) + assert.Equal(t, "test-origin", res.PGOInstallerOrigin) + assert.Equal(t, "developer", res.BuildSource) }) t.Run("error getting server version info", func(t *testing.T) { @@ -109,7 +101,7 @@ func TestGenerateHeader(t *testing.T) { badcfg := &rest.Config{} res := generateHeader(ctx, badcfg, cc, - "1.2.3", reconciler.IsOpenShift) + "1.2.3", reconciler.IsOpenShift, "") assert.Equal(t, len(*calls), 1) assert.Assert(t, cmp.Contains((*calls)[0], `upgrade check issue: could not retrieve server version`)) assert.Equal(t, res.IsOpenShift, reconciler.IsOpenShift) @@ -120,13 +112,21 @@ func TestGenerateHeader(t *testing.T) { assert.Equal(t, len(pgoList.Items), res.PGOClustersTotal) assert.Equal(t, "1.2.3", res.PGOVersion) assert.Equal(t, "", res.KubernetesEnv) + assert.Equal(t, "test", res.PGOInstaller) + assert.Equal(t, "test-origin", res.PGOInstallerOrigin) + assert.Equal(t, "developer", res.BuildSource) }) t.Run("success", func(t *testing.T) { ctx, calls := setupLogCapture(ctx) + gate := feature.NewGate() + assert.NilError(t, gate.SetFromMap(map[string]bool{ + feature.TablespaceVolumes: true, + })) + ctx = feature.NewContext(ctx, gate) res := generateHeader(ctx, cfg, cc, - "1.2.3", reconciler.IsOpenShift) + "1.2.3", reconciler.IsOpenShift, "") assert.Equal(t, len(*calls), 0) assert.Equal(t, res.IsOpenShift, reconciler.IsOpenShift) assert.Equal(t, deploymentID, res.DeploymentID) @@ -136,19 +136,16 @@ func TestGenerateHeader(t *testing.T) { assert.Equal(t, len(pgoList.Items), res.PGOClustersTotal) assert.Equal(t, "1.2.3", res.PGOVersion) assert.Equal(t, server.String(), res.KubernetesEnv) + assert.Equal(t, "TablespaceVolumes=true", res.FeatureGatesEnabled) + assert.Equal(t, "test", res.PGOInstaller) + assert.Equal(t, "test-origin", res.PGOInstallerOrigin) + assert.Equal(t, "developer", res.BuildSource) }) } func TestEnsureID(t *testing.T) { ctx := context.Background() - env := &envtest.Environment{} - config, err := env.Start() - assert.NilError(t, err) - t.Cleanup(func() { assert.Check(t, env.Stop()) }) - - cc, err := crclient.New(config, crclient.Options{}) - assert.NilError(t, err) - + cc := require.Kubernetes(t) setupNamespace(t, cc) t.Run("success, no id set in mem or configmap", func(t *testing.T) { @@ -284,14 +281,7 @@ func TestEnsureID(t *testing.T) { func TestManageUpgradeCheckConfigMap(t *testing.T) { ctx := context.Background() - env := &envtest.Environment{} - config, err := env.Start() - assert.NilError(t, err) - t.Cleanup(func() { assert.Check(t, env.Stop()) }) - - cc, err := crclient.New(config, crclient.Options{}) - assert.NilError(t, err) - + cc := require.Kubernetes(t) setupNamespace(t, cc) t.Run("no namespace given", func(t *testing.T) { @@ -417,14 +407,7 @@ func TestManageUpgradeCheckConfigMap(t *testing.T) { func TestApplyConfigMap(t *testing.T) { ctx := context.Background() - env := &envtest.Environment{} - config, err := env.Start() - assert.NilError(t, err) - t.Cleanup(func() { assert.Check(t, env.Stop()) }) - - cc, err := crclient.New(config, crclient.Options{}) - assert.NilError(t, err) - + cc := require.Kubernetes(t) setupNamespace(t, cc) t.Run("successful create", func(t *testing.T) { @@ -549,12 +532,35 @@ func TestGetManagedClusters(t *testing.T) { } ctx, calls := setupLogCapture(ctx) count := getManagedClusters(ctx, fakeClientWithOptionalError) - assert.Equal(t, len(*calls), 1) + assert.Assert(t, len(*calls) > 0) assert.Assert(t, cmp.Contains((*calls)[0], `upgrade check issue: could not count postgres clusters`)) assert.Assert(t, count == 0) }) } +func TestGetBridgeClusters(t *testing.T) { + ctx := context.Background() + + t.Run("success", func(t *testing.T) { + fakeClient := setupFakeClientWithPGOScheme(t, true) + ctx, calls := setupLogCapture(ctx) + count := getBridgeClusters(ctx, fakeClient) + assert.Equal(t, len(*calls), 0) + assert.Assert(t, count == 2) + }) + + t.Run("list throw error", func(t *testing.T) { + fakeClientWithOptionalError := &fakeClientWithError{ + setupFakeClientWithPGOScheme(t, true), "list error", + } + ctx, calls := setupLogCapture(ctx) + count := getBridgeClusters(ctx, fakeClientWithOptionalError) + assert.Assert(t, len(*calls) > 0) + assert.Assert(t, cmp.Contains((*calls)[0], `upgrade check issue: could not count bridge clusters`)) + assert.Assert(t, count == 0) + }) +} + func TestGetServerVersion(t *testing.T) { t.Run("success", func(t *testing.T) { expect, server := setupVersionServer(t, true) diff --git a/internal/upgradecheck/helpers_test.go b/internal/upgradecheck/helpers_test.go index 686343672e..63184184db 100644 --- a/internal/upgradecheck/helpers_test.go +++ b/internal/upgradecheck/helpers_test.go @@ -1,19 +1,8 @@ -package upgradecheck - -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +package upgradecheck import ( "context" @@ -38,20 +27,23 @@ import ( "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) +// fakeClientWithError is a controller runtime client and an error type to force type fakeClientWithError struct { crclient.Client errorType string } -func (f *fakeClientWithError) Get(ctx context.Context, key types.NamespacedName, obj crclient.Object) error { +// Get returns the client.get OR an Error (`get error`) if the fakeClientWithError is set to error that way +func (f *fakeClientWithError) Get(ctx context.Context, key types.NamespacedName, obj crclient.Object, opts ...crclient.GetOption) error { switch f.errorType { case "get error": return fmt.Errorf("get error") default: - return f.Client.Get(ctx, key, obj) + return f.Client.Get(ctx, key, obj, opts...) } } +// Patch returns the client.get OR an Error (`patch error`) if the fakeClientWithError is set to error that way // TODO: PatchType is not supported currently by fake // - https://github.com/kubernetes/client-go/issues/970 // Once that gets fixed, we can test without envtest @@ -65,6 +57,7 @@ func (f *fakeClientWithError) Patch(ctx context.Context, obj crclient.Object, } } +// List returns the client.get OR an Error (`list error`) if the fakeClientWithError is set to error that way func (f *fakeClientWithError) List(ctx context.Context, objList crclient.ObjectList, opts ...crclient.ListOption) error { switch f.errorType { @@ -75,18 +68,18 @@ func (f *fakeClientWithError) List(ctx context.Context, objList crclient.ObjectL } } +// setupDeploymentID returns a UUID func setupDeploymentID(t *testing.T) string { t.Helper() deploymentID = string(uuid.NewUUID()) return deploymentID } +// setupFakeClientWithPGOScheme returns a fake client with the PGO scheme added; +// if `includeCluster` is true, also adds some empty PostgresCluster and CrunchyBridgeCluster +// items to the client func setupFakeClientWithPGOScheme(t *testing.T, includeCluster bool) crclient.Client { t.Helper() - pgoScheme, err := runtime.CreatePostgresOperatorScheme() - if err != nil { - t.Fatal(err) - } if includeCluster { pc := &v1beta1.PostgresClusterList{ Items: []v1beta1.PostgresCluster{ @@ -102,11 +95,31 @@ func setupFakeClientWithPGOScheme(t *testing.T, includeCluster bool) crclient.Cl }, }, } - return fake.NewClientBuilder().WithScheme(pgoScheme).WithLists(pc).Build() + + bcl := &v1beta1.CrunchyBridgeClusterList{ + Items: []v1beta1.CrunchyBridgeCluster{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "hippo", + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "elephant", + }, + }, + }, + } + + return fake.NewClientBuilder(). + WithScheme(runtime.Scheme). + WithLists(pc, bcl). + Build() } - return fake.NewClientBuilder().WithScheme(pgoScheme).Build() + return fake.NewClientBuilder().WithScheme(runtime.Scheme).Build() } +// setupVersionServer sets up and tears down a server and version info for testing func setupVersionServer(t *testing.T, works bool) (version.Info, *httptest.Server) { t.Helper() expect := version.Info{ @@ -131,6 +144,7 @@ func setupVersionServer(t *testing.T, works bool) (version.Info, *httptest.Serve return expect, server } +// setupLogCapture captures the logs and keeps count of the logs captured func setupLogCapture(ctx context.Context) (context.Context, *[]string) { calls := []string{} testlog := funcr.NewJSON(func(object string) { diff --git a/internal/upgradecheck/http.go b/internal/upgradecheck/http.go index 8077b4b947..71a3c465c0 100644 --- a/internal/upgradecheck/http.go +++ b/internal/upgradecheck/http.go @@ -1,17 +1,6 @@ -/* - Copyright 2017 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2017 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package upgradecheck @@ -22,6 +11,7 @@ import ( "net/http" "time" + "github.com/golang-jwt/jwt/v5" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/rest" crclient "sigs.k8s.io/controller-runtime/pkg/client" @@ -77,7 +67,7 @@ func init() { func checkForUpgrades(ctx context.Context, url, versionString string, backoff wait.Backoff, crclient crclient.Client, cfg *rest.Config, - isOpenShift bool) (message string, header string, err error) { + isOpenShift bool, registrationToken string) (message string, header string, err error) { var headerPayloadStruct *clientUpgradeData // Prep request @@ -86,7 +76,7 @@ func checkForUpgrades(ctx context.Context, url, versionString string, backoff wa // generateHeader always returns some sort of struct, using defaults/nil values // in case some of the checks return errors headerPayloadStruct = generateHeader(ctx, cfg, crclient, - versionString, isOpenShift) + versionString, isOpenShift, registrationToken) req, err = addHeader(req, headerPayloadStruct) } @@ -136,24 +126,37 @@ type CheckForUpgradesScheduler struct { Client crclient.Client Config *rest.Config - OpenShift bool - Refresh time.Duration - URL, Version string + OpenShift bool + Refresh time.Duration + RegistrationToken string + URL, Version string } // ManagedScheduler creates a [CheckForUpgradesScheduler] and adds it to m. -func ManagedScheduler(m manager.Manager, openshift bool, url, version string) error { +// NOTE(registration): This takes a token/nil parameter when the operator is started. +// Currently the operator restarts when the token is updated, +// so this token is always current; but if that restart behavior is changed, +// we will want the upgrade mechanism to instantiate its own registration runner +// or otherwise get the most recent token. +func ManagedScheduler(m manager.Manager, openshift bool, + url, version string, registrationToken *jwt.Token) error { if url == "" { url = upgradeCheckURL } + var token string + if registrationToken != nil { + token = registrationToken.Raw + } + return m.Add(&CheckForUpgradesScheduler{ - Client: m.GetClient(), - Config: m.GetConfig(), - OpenShift: openshift, - Refresh: 24 * time.Hour, - URL: url, - Version: version, + Client: m.GetClient(), + Config: m.GetConfig(), + OpenShift: openshift, + Refresh: 24 * time.Hour, + RegistrationToken: token, + URL: url, + Version: version, }) } @@ -188,7 +191,7 @@ func (s *CheckForUpgradesScheduler) check(ctx context.Context) { }() info, header, err := checkForUpgrades(ctx, - s.URL, s.Version, backoff, s.Client, s.Config, s.OpenShift) + s.URL, s.Version, backoff, s.Client, s.Config, s.OpenShift, s.RegistrationToken) if err != nil { log.V(1).Info("could not complete upgrade check", "response", err.Error()) diff --git a/internal/upgradecheck/http_test.go b/internal/upgradecheck/http_test.go index 2e6e9528b4..9535f942ea 100644 --- a/internal/upgradecheck/http_test.go +++ b/internal/upgradecheck/http_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package upgradecheck @@ -32,6 +21,7 @@ import ( "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/manager" + "github.com/crunchydata/postgres-operator/internal/feature" "github.com/crunchydata/postgres-operator/internal/logging" "github.com/crunchydata/postgres-operator/internal/testing/cmp" ) @@ -58,10 +48,16 @@ func (m *MockClient) Do(req *http.Request) (*http.Response, error) { } func TestCheckForUpgrades(t *testing.T) { - fakeClient := setupFakeClientWithPGOScheme(t, false) - ctx := logging.NewContext(context.Background(), logging.Discard()) + fakeClient := setupFakeClientWithPGOScheme(t, true) cfg := &rest.Config{} + ctx := logging.NewContext(context.Background(), logging.Discard()) + gate := feature.NewGate() + assert.NilError(t, gate.SetFromMap(map[string]bool{ + feature.TablespaceVolumes: true, + })) + ctx = feature.NewContext(ctx, gate) + // Pass *testing.T to allows the correct messages from the assert package // in the event of certain failures. checkData := func(t *testing.T, header string) { @@ -70,6 +66,10 @@ func TestCheckForUpgrades(t *testing.T) { assert.NilError(t, err) assert.Assert(t, data.DeploymentID != "") assert.Equal(t, data.PGOVersion, "4.7.3") + assert.Equal(t, data.RegistrationToken, "speakFriend") + assert.Equal(t, data.BridgeClustersTotal, 2) + assert.Equal(t, data.PGOClustersTotal, 2) + assert.Equal(t, data.FeatureGatesEnabled, "TablespaceVolumes=true") } t.Run("success", func(t *testing.T) { @@ -83,7 +83,7 @@ func TestCheckForUpgrades(t *testing.T) { } res, header, err := checkForUpgrades(ctx, "", "4.7.3", backoff, - fakeClient, cfg, false) + fakeClient, cfg, false, "speakFriend") assert.NilError(t, err) assert.Equal(t, res, `{"pgo_versions":[{"tag":"v5.0.4"},{"tag":"v5.0.3"},{"tag":"v5.0.2"},{"tag":"v5.0.1"},{"tag":"v5.0.0"}]}`) checkData(t, header) @@ -98,7 +98,7 @@ func TestCheckForUpgrades(t *testing.T) { } res, header, err := checkForUpgrades(ctx, "", "4.7.3", backoff, - fakeClient, cfg, false) + fakeClient, cfg, false, "speakFriend") // Two failed calls because of env var assert.Equal(t, counter, 2) assert.Equal(t, res, "") @@ -118,7 +118,7 @@ func TestCheckForUpgrades(t *testing.T) { } res, header, err := checkForUpgrades(ctx, "", "4.7.3", backoff, - fakeClient, cfg, false) + fakeClient, cfg, false, "speakFriend") assert.Equal(t, res, "") // Two failed calls because of env var assert.Equal(t, counter, 2) @@ -147,7 +147,7 @@ func TestCheckForUpgrades(t *testing.T) { } res, header, err := checkForUpgrades(ctx, "", "4.7.3", backoff, - fakeClient, cfg, false) + fakeClient, cfg, false, "speakFriend") assert.Equal(t, counter, 2) assert.NilError(t, err) assert.Equal(t, res, `{"pgo_versions":[{"tag":"v5.0.4"},{"tag":"v5.0.3"},{"tag":"v5.0.2"},{"tag":"v5.0.1"},{"tag":"v5.0.0"}]}`) diff --git a/internal/util/README.md b/internal/util/README.md deleted file mode 100644 index cc87fd37b7..0000000000 --- a/internal/util/README.md +++ /dev/null @@ -1,120 +0,0 @@ - - - -## Feature Gates - -Feature gates allow users to enable or disable -certain features by setting the "PGO_FEATURE_GATES" environment -variable to a list similar to "feature1=true,feature2=false,..." -in the PGO Deployment. - -This capability leverages the relevant Kubernetes packages. Documentation and -code implementation examples are given below. - -- Documentation: - - https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/ - -- Package Information: - - https://pkg.go.dev/k8s.io/component-base@v0.20.1/featuregate - -- Adding the feature gate key: - - https://releases.k8s.io/v1.20.0/pkg/features/kube_features.go#L27 - -- Adding the feature gate to the known features map: - - https://releases.k8s.io/v1.20.0/pkg/features/kube_features.go#L729-732 - -- Adding features to the featureGate - - https://releases.k8s.io/v1.20.0/staging/src/k8s.io/component-base/featuregate/feature_gate.go#L110-L111 - -- Setting the feature gates - - https://releases.k8s.io/v1.20.0/staging/src/k8s.io/component-base/featuregate/feature_gate.go#L105-L107 - -## Developing with Feature Gates in PGO - -To add a new feature gate, a few steps are required. First, in -`internal/util/features.go`, you will add a feature gate key name. As an example, -for a new feature called 'FeatureName', you would add a new constant and comment -describing what the feature gate controls at the top of the file, similar to -``` -// Enables FeatureName in PGO -FeatureName featuregate.Feature = "FeatureName" -``` - -Next, add a new entry to the `pgoFeatures` map -``` -var pgoFeatures = map[featuregate.Feature]featuregate.FeatureSpec{ - FeatureName: {Default: false, PreRelease: featuregate.Alpha}, -} -``` -where `FeatureName` is the constant defined previously, `Default: false` sets the -default behavior and `PreRelease: featuregate.Alpha`. The possible `PreRelease` -values are `Alpha`, `Beta`, `GA` and `Deprecated`. - -- https://pkg.go.dev/k8s.io/component-base@v0.20.1/featuregate#pkg-constants - -By Kubernetes convention, `Alpha` features have almost always been disabled by -default. `Beta` features are generally enabled by default. - -- https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/#feature-stages - -Prior to Kubernetes 1.24, both `Beta` features and APIs were enabled by default. -Starting in v1.24, new `Beta` APIs are generally disabled by default, while `Beta` -features remain enabled by default. - -- https://kubernetes.io/blog/2021/07/14/upcoming-changes-in-kubernetes-1-22/#kubernetes-api-removals -- https://kubernetes.io/blog/2022/05/03/kubernetes-1-24-release-announcement/#beta-apis-off-by-default -- https://github.com/kubernetes/enhancements/tree/master/keps/sig-architecture/3136-beta-apis-off-by-default#goals - -For consistency with Kubernetes, we recommend that feature-gated features be -configured as `Alpha` and disabled by default. Any `Beta` features added should -stay consistent with Kubernetes practice and be enabled by default, but we should -keep an eye out for changes to these standards and adjust as needed. - -Once the above items are set, you can then use your feature gated value in the -code base to control feature behavior using something like -``` -if util.DefaultMutableFeatureGate.Enabled(util.FeatureName) -``` - -To test the feature gate, set the `PGO_FEATURE_GATES` environment variable to -enable the new feature as follows -``` -PGO_FEATURE_GATES="FeatureName=true" -``` -Note that for more than one feature, this variable accepts a comma delimited -list, e.g. -``` -PGO_FEATURE_GATES="FeatureName=true,FeatureName2=true,FeatureName3=true" -``` - -While `PGO_FEATURE_GATES` does not have to be set, please note that the features -must be defined before use, otherwise PGO deployment will fail with the -following message -`panic: unable to parse and store configured feature gates. unrecognized feature gate` - -Also, the features must have boolean values, otherwise you will see -`panic: unable to parse and store configured feature gates. invalid value` - -When dealing with tests that do not invoke `cmd/postgres-operator/main.go`, keep -in mind that you will need to ensure that you invoke the `AddAndSetFeatureGates` -function. Otherwise, any test that references the undefined feature gate will fail -with a panic message similar to -"feature "FeatureName" is not registered in FeatureGate" - -To correct for this, you simply need a line similar to -``` -err := util.AddAndSetFeatureGates("") -``` diff --git a/internal/util/features.go b/internal/util/features.go deleted file mode 100644 index 3cc363c649..0000000000 --- a/internal/util/features.go +++ /dev/null @@ -1,92 +0,0 @@ -/* - Copyright 2017 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package util - -import ( - "fmt" - - "k8s.io/component-base/featuregate" -) - -const ( - // Every feature gate should add a key here following this template: - // - // // Enables FeatureName... - // FeatureName featuregate.Feature = "FeatureName" - // - // - https://releases.k8s.io/v1.20.0/pkg/features/kube_features.go#L27 - // - // Feature gates should be listed in alphabetical, case-sensitive - // (upper before any lower case character) order. - // - // Enables support of appending custom queries to default PGMonitor queries - AppendCustomQueries featuregate.Feature = "AppendCustomQueries" - // - BridgeIdentifiers featuregate.Feature = "BridgeIdentifiers" - // - // Enables support of custom sidecars for PostgreSQL instance Pods - InstanceSidecars featuregate.Feature = "InstanceSidecars" - // - // Enables support of custom sidecars for pgBouncer Pods - PGBouncerSidecars featuregate.Feature = "PGBouncerSidecars" - // - // Enables support of tablespace volumes - TablespaceVolumes featuregate.Feature = "TablespaceVolumes" -) - -// pgoFeatures consists of all known PGO feature keys. -// To add a new feature, define a key for it above and add it here. -// An example entry is as follows: -// -// FeatureName: {Default: false, PreRelease: featuregate.Alpha}, -// -// - https://releases.k8s.io/v1.20.0/pkg/features/kube_features.go#L729-732 -var pgoFeatures = map[featuregate.Feature]featuregate.FeatureSpec{ - AppendCustomQueries: {Default: false, PreRelease: featuregate.Alpha}, - BridgeIdentifiers: {Default: false, PreRelease: featuregate.Alpha}, - InstanceSidecars: {Default: false, PreRelease: featuregate.Alpha}, - PGBouncerSidecars: {Default: false, PreRelease: featuregate.Alpha}, - TablespaceVolumes: {Default: false, PreRelease: featuregate.Alpha}, -} - -// DefaultMutableFeatureGate is a mutable, shared global FeatureGate. -// It is used to indicate whether a given feature is enabled or not. -// -// - https://pkg.go.dev/k8s.io/apiserver/pkg/util/feature -// - https://releases.k8s.io/v1.20.0/staging/src/k8s.io/apiserver/pkg/util/feature/feature_gate.go#L24-L28 -var DefaultMutableFeatureGate featuregate.MutableFeatureGate = featuregate.NewFeatureGate() - -// AddAndSetFeatureGates utilizes the Kubernetes feature gate packages to first -// add the default PGO features to the featureGate and then set the values provided -// via the 'PGO_FEATURE_GATES' environment variable. This function expects a string -// like feature1=true,feature2=false,... -// -// - https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/ -// - https://pkg.go.dev/k8s.io/component-base@v0.20.1/featuregate -func AddAndSetFeatureGates(features string) error { - // Add PGO features to the featureGate - // - https://releases.k8s.io/v1.20.0/staging/src/k8s.io/component-base/featuregate/feature_gate.go#L110-L111 - if err := DefaultMutableFeatureGate.Add(pgoFeatures); err != nil { - return fmt.Errorf("unable to add PGO features to the featureGate. %w", err) - } - - // Set the feature gates from environment variable config - // - https://releases.k8s.io/v1.20.0/staging/src/k8s.io/component-base/featuregate/feature_gate.go#L105-L107 - if err := DefaultMutableFeatureGate.Set(features); err != nil { - return fmt.Errorf("unable to parse and store configured feature gates. %w", err) - } - return nil -} diff --git a/internal/util/features_test.go b/internal/util/features_test.go deleted file mode 100644 index 1c93c5206b..0000000000 --- a/internal/util/features_test.go +++ /dev/null @@ -1,77 +0,0 @@ -/* - Copyright 2017 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package util - -import ( - "testing" - - "gotest.tools/v3/assert" - "k8s.io/component-base/featuregate" -) - -func TestAddAndSetFeatureGates(t *testing.T) { - - // set test features - const TestGate1 featuregate.Feature = "TestGate1" - const TestGate2 featuregate.Feature = "TestGate2" - const TestGate3 featuregate.Feature = "TestGate3" - - pgoFeatures = map[featuregate.Feature]featuregate.FeatureSpec{ - TestGate1: {Default: false, PreRelease: featuregate.Beta}, - TestGate2: {Default: false, PreRelease: featuregate.Beta}, - TestGate3: {Default: false, PreRelease: featuregate.Beta}, - } - - t.Run("No feature gates set", func(t *testing.T) { - err := AddAndSetFeatureGates("") - assert.NilError(t, err) - }) - - t.Run("One feature gate set", func(t *testing.T) { - err := AddAndSetFeatureGates("TestGate1=true") - assert.NilError(t, err) - }) - - t.Run("Two feature gates set", func(t *testing.T) { - err := AddAndSetFeatureGates("TestGate1=true,TestGate3=true") - assert.NilError(t, err) - }) - - t.Run("All available feature gates set", func(t *testing.T) { - err := AddAndSetFeatureGates("TestGate1=true,TestGate2=true,TestGate3=true") - assert.NilError(t, err) - }) - - t.Run("One unrecognized gate set", func(t *testing.T) { - err := AddAndSetFeatureGates("NotAGate=true") - assert.ErrorContains(t, err, "unrecognized feature gate: NotAGate") - }) - - t.Run("One recognized gate, one unrecognized gate", func(t *testing.T) { - err := AddAndSetFeatureGates("TestGate1=true,NotAGate=true") - assert.ErrorContains(t, err, "unrecognized feature gate: NotAGate") - }) - - t.Run("Gate value not set", func(t *testing.T) { - err := AddAndSetFeatureGates("GateNotSet") - assert.ErrorContains(t, err, "missing bool value for GateNotSet") - }) - - t.Run("Gate value not boolean", func(t *testing.T) { - err := AddAndSetFeatureGates("GateNotSet=foo") - assert.ErrorContains(t, err, "invalid value of GateNotSet=foo, err: strconv.ParseBool") - }) -} diff --git a/internal/util/registration.go b/internal/util/registration.go deleted file mode 100644 index 494f3b64d4..0000000000 --- a/internal/util/registration.go +++ /dev/null @@ -1,105 +0,0 @@ -package util - -import ( - "crypto/rsa" - "encoding/json" - "errors" - "os" - "strings" - - "github.com/go-logr/logr" - "github.com/golang-jwt/jwt/v5" - - "github.com/crunchydata/postgres-operator/internal/config" -) - -// Registration is required only for OLM installations of the operator. -type Registration struct { - // Registration token status. - Authenticated bool `json:"authenticated"` - TokenFileFound bool `json:"tokenFileFound"` - - // Token claims. - Aud string `json:"aud"` - Exp int64 `json:"exp"` - Iat int64 `json:"iat"` - Iss string `json:"iss"` - Nbf int64 `json:"nbf"` - Sub string `json:"sub"` -} - -func parseRSAPublicKey(rawKey string) (*rsa.PublicKey, error) { - var rsaPublicKey *rsa.PublicKey - rsaPublicKey, err := jwt.ParseRSAPublicKeyFromPEM([]byte(rawKey)) - return rsaPublicKey, err -} - -func getToken(tokenPath string) (string, error) { - if _, err := os.Stat(tokenPath); err != nil { - return "", err - } - - bs, err := os.ReadFile(tokenPath) - if err != nil { - return "", err - } - - token := string(bs) - if token == "" { - return "", errors.New("token cannot be empty") - } - - return token, nil -} - -// GetRegistration returns an empty struct if registration is not required. -func GetRegistration(rawKey string, tokenPath string, log logr.Logger) Registration { - registration := Registration{} - - if !config.RegistrationRequired() { - return registration - } - - // If the key is invalid, registration cannot be enforced. - key, err := parseRSAPublicKey(rawKey) - if err != nil { - log.Error(err, "Error parsing RSA key") - return registration - } - - // If there is no token, an operator installation cannot be registered. - token, err := getToken(tokenPath) - if err != nil { - log.Error(err, "Error getting token: "+tokenPath) - return registration - } - - // Acknowledge that a token was provided, even if it isn't valid. - registration.TokenFileFound = true - - // Decode the token signature. - parts := strings.Split(token, ".") - sig, _ := jwt.NewParser().DecodeSegment(parts[2]) - - // Claims consist of header and payload. - claims := strings.Join(parts[0:2], ".") - - // Verify the token. - method := jwt.GetSigningMethod("RS256") - err = method.Verify(claims, sig, key) - if err == nil { - log.Info("token authentication succeeded") - registration.Authenticated = true - } else { - log.Error(err, "token authentication failed") - } - - // Populate Registration with token payload. - payloadStr, _ := jwt.NewParser().DecodeSegment(parts[1]) - err = json.Unmarshal(payloadStr, ®istration) - if err != nil { - log.Error(err, "token error") - } - - return registration -} diff --git a/internal/util/secrets.go b/internal/util/secrets.go index 5e86524717..82768c9386 100644 --- a/internal/util/secrets.go +++ b/internal/util/secrets.go @@ -1,17 +1,6 @@ -/* - Copyright 2017 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2017 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package util diff --git a/internal/util/secrets_test.go b/internal/util/secrets_test.go index b237636e49..5d549ca89e 100644 --- a/internal/util/secrets_test.go +++ b/internal/util/secrets_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package util @@ -65,7 +54,7 @@ func TestGenerateAlphaNumericPassword(t *testing.T) { assert.Assert(t, cmp.Regexp(`^[A-Za-z0-9]*$`, password)) } - previous := sets.String{} + previous := sets.Set[string]{} for i := 0; i < 10; i++ { password, err := GenerateAlphaNumericPassword(5) @@ -90,7 +79,7 @@ func TestGenerateASCIIPassword(t *testing.T) { } } - previous := sets.String{} + previous := sets.Set[string]{} for i := 0; i < 10; i++ { password, err := GenerateASCIIPassword(5) diff --git a/internal/util/util.go b/internal/util/util.go index 03eeafb547..72634ebbc6 100644 --- a/internal/util/util.go +++ b/internal/util/util.go @@ -1,19 +1,8 @@ -package util - -/* - Copyright 2017 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 +// Copyright 2017 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +package util import ( "strings" @@ -23,7 +12,7 @@ import ( // be used as part of an SQL statement. // // Any double quotes in name will be escaped. The quoted identifier will be -// case sensitive when used in a query. If the input string contains a zero +// case-sensitive when used in a query. If the input string contains a zero // byte, the result will be truncated immediately before it. // // Implementation borrowed from lib/pq: https://github.com/lib/pq which is diff --git a/licenses/LICENSE.txt b/licenses/LICENSE.txt index e6aaa01f9a..e799dc3209 100644 --- a/licenses/LICENSE.txt +++ b/licenses/LICENSE.txt @@ -176,7 +176,7 @@ END OF TERMS AND CONDITIONS - Copyright 2017 - 2023 Crunchy Data Solutions, Inc. + Copyright 2017 - 2024 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/crunchy_bridgecluster_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/crunchy_bridgecluster_types.go new file mode 100644 index 0000000000..0b94a4dae1 --- /dev/null +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/crunchy_bridgecluster_types.go @@ -0,0 +1,239 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// CrunchyBridgeClusterSpec defines the desired state of CrunchyBridgeCluster +// to be managed by Crunchy Data Bridge +type CrunchyBridgeClusterSpec struct { + // +optional + Metadata *Metadata `json:"metadata,omitempty"` + + // Whether the cluster is high availability, + // meaning that it has a secondary it can fail over to quickly + // in case the primary becomes unavailable. + // +kubebuilder:validation:Required + IsHA bool `json:"isHa"` + + // Whether the cluster is protected. Protected clusters can't be destroyed until + // their protected flag is removed + // +kubebuilder:validation:Optional + IsProtected bool `json:"isProtected,omitempty"` + + // The name of the cluster + // --- + // According to Bridge API/GUI errors, + // "Field name should be between 5 and 50 characters in length, containing only unicode characters, unicode numbers, hyphens, spaces, or underscores, and starting with a character", and ending with a character or number. + // +kubebuilder:validation:MinLength=5 + // +kubebuilder:validation:MaxLength=50 + // +kubebuilder:validation:Pattern=`^[A-Za-z][A-Za-z0-9\-_ ]*[A-Za-z0-9]$` + // +kubebuilder:validation:Required + // +kubebuilder:validation:Type=string + ClusterName string `json:"clusterName"` + + // The ID of the cluster's plan. Determines instance, CPU, and memory. + // +kubebuilder:validation:Required + Plan string `json:"plan"` + + // The ID of the cluster's major Postgres version. + // Currently Bridge offers 13-17 + // +kubebuilder:validation:Required + // +kubebuilder:validation:Minimum=13 + // +kubebuilder:validation:Maximum=17 + // +operator-sdk:csv:customresourcedefinitions:type=spec,order=1 + PostgresVersion int `json:"majorVersion"` + + // The cloud provider where the cluster is located. + // Currently Bridge offers aws, azure, and gcp only + // +kubebuilder:validation:Required + // +kubebuilder:validation:Enum={aws,azure,gcp} + // +kubebuilder:validation:XValidation:rule=`self == oldSelf`,message="immutable" + Provider string `json:"provider"` + + // The provider region where the cluster is located. + // +kubebuilder:validation:Required + // +kubebuilder:validation:XValidation:rule=`self == oldSelf`,message="immutable" + Region string `json:"region"` + + // Roles for which to create Secrets that contain their credentials which + // are retrieved from the Bridge API. An empty list creates no role secrets. + // Removing a role from this list does NOT drop the role nor revoke their + // access, but it will delete that role's secret from the kube cluster. + // +kubebuilder:validation:Optional + // +listType=map + // +listMapKey=name + Roles []*CrunchyBridgeClusterRoleSpec `json:"roles,omitempty"` + + // The name of the secret containing the API key and team id + // +kubebuilder:validation:Required + Secret string `json:"secret"` + + // The amount of storage available to the cluster in gigabytes. + // The amount must be an integer, followed by Gi (gibibytes) or G (gigabytes) to match Kubernetes conventions. + // If the amount is given in Gi, we round to the nearest G value. + // The minimum value allowed by Bridge is 10 GB. + // The maximum value allowed by Bridge is 65535 GB. + // +kubebuilder:validation:Required + Storage resource.Quantity `json:"storage"` +} + +type CrunchyBridgeClusterRoleSpec struct { + // Name of the role within Crunchy Bridge. + // More info: https://docs.crunchybridge.com/concepts/users + // +kubebuilder:validation:Required + Name string `json:"name"` + + // The name of the Secret that will hold the role credentials. + // +kubebuilder:validation:Required + // +kubebuilder:validation:Pattern=`^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$` + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:Type=string + SecretName string `json:"secretName"` +} + +// CrunchyBridgeClusterStatus defines the observed state of CrunchyBridgeCluster +type CrunchyBridgeClusterStatus struct { + // The name of the cluster in Bridge. + // +optional + ClusterName string `json:"name,omitempty"` + + // conditions represent the observations of postgres cluster's current state. + // +optional + // +listType=map + // +listMapKey=type + // +operator-sdk:csv:customresourcedefinitions:type=status,xDescriptors={"urn:alm:descriptor:io.kubernetes.conditions"} + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // The Hostname of the postgres cluster in Bridge, provided by Bridge API and null until then. + // +optional + Host string `json:"host,omitempty"` + + // The ID of the postgres cluster in Bridge, provided by Bridge API and null until then. + // +optional + ID string `json:"id,omitempty"` + + // Whether the cluster is high availability, meaning that it has a secondary it can fail + // over to quickly in case the primary becomes unavailable. + // +optional + IsHA *bool `json:"isHa"` + + // Whether the cluster is protected. Protected clusters can't be destroyed until + // their protected flag is removed + // +optional + IsProtected *bool `json:"isProtected"` + + // The cluster's major Postgres version. + // +optional + MajorVersion int `json:"majorVersion"` + + // observedGeneration represents the .metadata.generation on which the status was based. + // +optional + // +kubebuilder:validation:Minimum=0 + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // The cluster upgrade as represented by Bridge + // +optional + OngoingUpgrade []*UpgradeOperation `json:"ongoingUpgrade,omitempty"` + + // The ID of the cluster's plan. Determines instance, CPU, and memory. + // +optional + Plan string `json:"plan"` + + // Most recent, raw responses from Bridge API + // +optional + // +kubebuilder:pruning:PreserveUnknownFields + // +kubebuilder:validation:Schemaless + // +kubebuilder:validation:Type=object + Responses APIResponses `json:"responses"` + + // State of cluster in Bridge. + // +optional + State string `json:"state,omitempty"` + + // The amount of storage available to the cluster. + // +optional + Storage *resource.Quantity `json:"storage"` +} + +type APIResponses struct { + Cluster SchemalessObject `json:"cluster,omitempty"` + Status SchemalessObject `json:"status,omitempty"` + Upgrade SchemalessObject `json:"upgrade,omitempty"` +} + +type ClusterUpgrade struct { + Operations []*UpgradeOperation `json:"operations,omitempty"` +} + +type UpgradeOperation struct { + Flavor string `json:"flavor"` + StartingFrom string `json:"starting_from"` + State string `json:"state"` +} + +// TODO(crunchybridgecluster) Think through conditions +// CrunchyBridgeClusterStatus condition types. +const ( + ConditionUnknown = "" + ConditionUpgrading = "Upgrading" + ConditionReady = "Ready" + ConditionDeleting = "Deleting" +) + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +operator-sdk:csv:customresourcedefinitions:resources={{ConfigMap,v1},{Secret,v1},{Service,v1},{CronJob,v1beta1},{Deployment,v1},{Job,v1},{StatefulSet,v1},{PersistentVolumeClaim,v1}} + +// CrunchyBridgeCluster is the Schema for the crunchybridgeclusters API +type CrunchyBridgeCluster struct { + // ObjectMeta.Name is a DNS subdomain. + // - https://docs.k8s.io/concepts/overview/working-with-objects/names/#dns-subdomain-names + // - https://releases.k8s.io/v1.21.0/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/validator.go#L60 + + // In Bridge json, meta.name is "name" + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // NOTE(cbandy): Every CrunchyBridgeCluster needs a Spec, but it is optional here + // so ObjectMeta can be managed independently. + + Spec CrunchyBridgeClusterSpec `json:"spec,omitempty"` + Status CrunchyBridgeClusterStatus `json:"status,omitempty"` +} + +// Default implements "sigs.k8s.io/controller-runtime/pkg/webhook.Defaulter" so +// a webhook can be registered for the type. +// - https://book.kubebuilder.io/reference/webhook-overview.html +func (c *CrunchyBridgeCluster) Default() { + if len(c.APIVersion) == 0 { + c.APIVersion = GroupVersion.String() + } + if len(c.Kind) == 0 { + c.Kind = "CrunchyBridgeCluster" + } +} + +// +kubebuilder:object:root=true + +// CrunchyBridgeClusterList contains a list of CrunchyBridgeCluster +type CrunchyBridgeClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []CrunchyBridgeCluster `json:"items"` +} + +func init() { + SchemeBuilder.Register(&CrunchyBridgeCluster{}, &CrunchyBridgeClusterList{}) +} + +func NewCrunchyBridgeCluster() *CrunchyBridgeCluster { + cluster := &CrunchyBridgeCluster{} + cluster.SetGroupVersionKind(GroupVersion.WithKind("CrunchyBridgeCluster")) + return cluster +} diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/groupversion_info.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/groupversion_info.go index c7b9a5fe00..15773a1815 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/groupversion_info.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/groupversion_info.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 // Package v1beta1 contains API Schema definitions for the postgres-operator v1beta1 API group // +kubebuilder:object:generate=true diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/patroni_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/patroni_types.go index 5abdc6fd95..2f01399372 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/patroni_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/patroni_types.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package v1beta1 @@ -19,7 +8,7 @@ type PatroniSpec struct { // Patroni dynamic configuration settings. Changes to this value will be // automatically reloaded without validation. Changes to certain PostgreSQL // parameters cause PostgreSQL to restart. - // More info: https://patroni.readthedocs.io/en/latest/SETTINGS.html + // More info: https://patroni.readthedocs.io/en/latest/dynamic_configuration.html // +optional // +kubebuilder:pruning:PreserveUnknownFields // +kubebuilder:validation:Schemaless diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgadmin_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgadmin_types.go index dff484e9e6..06c7321bc4 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgadmin_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgadmin_types.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package v1beta1 diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgbackrest_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgbackrest_types.go index 95edadde95..3e3098a602 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgbackrest_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgbackrest_types.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package v1beta1 @@ -60,15 +49,15 @@ type PGBackRestJobStatus struct { type PGBackRestScheduledBackupStatus struct { // The name of the associated pgBackRest scheduled backup CronJob - // +kubebuilder:validation:Required + // +kubebuilder:validation:Optional CronJobName string `json:"cronJobName,omitempty"` // The name of the associated pgBackRest repository - // +kubebuilder:validation:Required + // +kubebuilder:validation:Optional RepoName string `json:"repo,omitempty"` // The pgBackRest backup type for this Job - // +kubebuilder:validation:Required + // +kubebuilder:validation:Optional Type string `json:"type,omitempty"` // Represents the time the manual backup Job was acknowledged by the Job controller. @@ -311,7 +300,7 @@ type PGBackRestRepo struct { // will be applicable once implemented: // https://github.com/kubernetes/enhancements/tree/master/keps/sig-api-machinery/1027-api-unions - // The name of the the repository + // The name of the repository // +kubebuilder:validation:Required // +kubebuilder:validation:Pattern=^repo[1-4] Name string `json:"name"` @@ -353,7 +342,20 @@ type RepoHostStatus struct { type RepoPVC struct { // Defines a PersistentVolumeClaim spec used to create and/or bind a volume + // --- // +kubebuilder:validation:Required + // + // NOTE(validation): Every PVC must have at least one accessMode. NOTE(KEP-4153) + // TODO(k8s-1.28): fieldPath=`.accessModes`,reason="FieldValueRequired" + // - https://releases.k8s.io/v1.25.0/pkg/apis/core/validation/validation.go#L2098-L2100 + // - https://releases.k8s.io/v1.31.0/pkg/apis/core/validation/validation.go#L2292-L2294 + // +kubebuilder:validation:XValidation:rule=`has(self.accessModes) && size(self.accessModes) > 0`,message=`missing accessModes` + // + // NOTE(validation): Every PVC must have a positive storage request. NOTE(KEP-4153) + // TODO(k8s-1.28): fieldPath=`.resources.requests.storage`,reason="FieldValueRequired" + // - https://releases.k8s.io/v1.25.0/pkg/apis/core/validation/validation.go#L2126-L2133 + // - https://releases.k8s.io/v1.31.0/pkg/apis/core/validation/validation.go#L2318-L2325 + // +kubebuilder:validation:XValidation:rule=`has(self.resources) && has(self.resources.requests) && has(self.resources.requests.storage)`,message=`missing storage request` VolumeClaimSpec corev1.PersistentVolumeClaimSpec `json:"volumeClaimSpec"` } @@ -414,7 +416,7 @@ type RepoStatus struct { ReplicaCreateBackupComplete bool `json:"replicaCreateBackupComplete,omitempty"` // A hash of the required fields in the spec for defining an Azure, GCS or S3 repository, - // Utilizd to detect changes to these fields and then execute pgBackRest stanza-create + // Utilized to detect changes to these fields and then execute pgBackRest stanza-create // commands accordingly. // +optional RepoOptionsHash string `json:"repoOptionsHash,omitempty"` diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgbouncer_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgbouncer_types.go index af1b8cca96..e940a9300d 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgbouncer_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgbouncer_types.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package v1beta1 diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgmonitor_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgmonitor_types.go index 9e49d281cb..f2cd78335a 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgmonitor_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgmonitor_types.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package v1beta1 diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgupgrade_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgupgrade_types.go index 21cdab5993..8e99f8239f 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgupgrade_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgupgrade_types.go @@ -1,16 +1,6 @@ -// Copyright 2021 - 2023 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package v1beta1 @@ -58,8 +48,8 @@ type PGUpgradeSpec struct { // The major version of PostgreSQL before the upgrade. // +kubebuilder:validation:Required - // +kubebuilder:validation:Minimum=10 - // +kubebuilder:validation:Maximum=16 + // +kubebuilder:validation:Minimum=11 + // +kubebuilder:validation:Maximum=17 FromPostgresVersion int `json:"fromPostgresVersion"` // TODO(benjaminjb): define webhook validation to make sure @@ -69,8 +59,8 @@ type PGUpgradeSpec struct { // The major version of PostgreSQL to be upgraded to. // +kubebuilder:validation:Required - // +kubebuilder:validation:Minimum=10 - // +kubebuilder:validation:Maximum=16 + // +kubebuilder:validation:Minimum=11 + // +kubebuilder:validation:Maximum=17 ToPostgresVersion int `json:"toPostgresVersion"` // The image name to use for PostgreSQL containers after upgrade. diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgres_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgres_types.go index 9f5018e919..b7baa72942 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgres_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgres_types.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package v1beta1 @@ -60,7 +49,10 @@ type PostgresUserSpec struct { // ALTER ROLE options except for PASSWORD. This field is ignored for the // "postgres" user. // More info: https://www.postgresql.org/docs/current/role-attributes.html + // +kubebuilder:validation:MaxLength=200 // +kubebuilder:validation:Pattern=`^[^;]*$` + // +kubebuilder:validation:XValidation:rule=`!self.matches("(?i:PASSWORD)")`,message="cannot assign password" + // +kubebuilder:validation:XValidation:rule=`!self.matches("(?:--|/[*]|[*]/)")`,message="cannot contain comments" // +optional Options string `json:"options,omitempty"` diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_test.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_test.go index 26431d13d5..83396902d0 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_test.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package v1beta1 diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_types.go index 318ee097da..54e42baa3b 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_types.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package v1beta1 @@ -33,8 +22,8 @@ type PostgresClusterSpec struct { DataSource *DataSource `json:"dataSource,omitempty"` // PostgreSQL backup configuration - // +kubebuilder:validation:Required - Backups Backups `json:"backups"` + // +optional + Backups Backups `json:"backups,omitempty"` // The secret containing the Certificates and Keys to encrypt PostgreSQL // traffic will need to contain the server TLS certificate, TLS key and the @@ -122,8 +111,8 @@ type PostgresClusterSpec struct { // The major version of PostgreSQL installed in the PostgreSQL image // +kubebuilder:validation:Required - // +kubebuilder:validation:Minimum=10 - // +kubebuilder:validation:Maximum=16 + // +kubebuilder:validation:Minimum=11 + // +kubebuilder:validation:Maximum=17 // +operator-sdk:csv:customresourcedefinitions:type=spec,order=1 PostgresVersion int `json:"postgresVersion"` @@ -148,6 +137,10 @@ type PostgresClusterSpec struct { // +optional Service *ServiceSpec `json:"service,omitempty"` + // Specification of the service that exposes PostgreSQL replica instances + // +optional + ReplicaService *ServiceSpec `json:"replicaService,omitempty"` + // Whether or not the PostgreSQL cluster should be stopped. // When this is true, workloads are scaled to zero and CronJobs // are suspended. @@ -162,7 +155,17 @@ type PostgresClusterSpec struct { // A list of group IDs applied to the process of a container. These can be // useful when accessing shared file systems with constrained permissions. // More info: https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context - // +optional + // --- + // +kubebuilder:validation:Optional + // + // Containers should not run with a root GID. + // - https://kubernetes.io/docs/concepts/security/pod-security-standards/ + // +kubebuilder:validation:items:Minimum=1 + // + // Supplementary GIDs must fit within int32. + // - https://releases.k8s.io/v1.18.0/pkg/apis/core/validation/validation.go#L3659-L3663 + // - https://releases.k8s.io/v1.22.0/pkg/apis/core/validation/validation.go#L3923-L3927 + // +kubebuilder:validation:items:Maximum=2147483647 SupplementalGroups []int64 `json:"supplementalGroups,omitempty"` // Users to create inside PostgreSQL and the databases they should access. @@ -171,6 +174,7 @@ type PostgresClusterSpec struct { // from this list does NOT drop the user nor revoke their access. // +listType=map // +listMapKey=name + // +kubebuilder:validation:MaxItems=64 // +optional Users []PostgresUserSpec `json:"users,omitempty"` @@ -180,7 +184,7 @@ type PostgresClusterSpec struct { // DataSource defines data sources for a new PostgresCluster. type DataSource struct { // Defines a pgBackRest cloud-based data source that can be used to pre-populate the - // the PostgreSQL data directory for a new PostgreSQL cluster using a pgBackRest restore. + // PostgreSQL data directory for a new PostgreSQL cluster using a pgBackRest restore. // The PGBackRest field is incompatible with the PostgresCluster field: only one // data source can be used for pre-populating a new PostgreSQL cluster // +optional @@ -217,7 +221,7 @@ type DataSourceVolumes struct { PGBackRestVolume *DataSourceVolume `json:"pgBackRestVolume,omitempty"` } -// DataSourceVolume defines the PVC name and data diretory path for an existing cluster volume. +// DataSourceVolume defines the PVC name and data directory path for an existing cluster volume. type DataSourceVolume struct { // The existing PVC name. PVCName string `json:"pvcName"` @@ -317,8 +321,12 @@ func (s *PostgresClusterSpec) Default() { type Backups struct { // pgBackRest archive configuration - // +kubebuilder:validation:Required + // +optional PGBackRest PGBackRestArchive `json:"pgbackrest"` + + // VolumeSnapshot configuration + // +optional + Snapshots *VolumeSnapshots `json:"snapshots,omitempty"` } // PostgresClusterStatus defines the observed state of PostgresCluster @@ -340,11 +348,9 @@ type PostgresClusterStatus struct { // +optional PGBackRest *PGBackRestStatus `json:"pgbackrest,omitempty"` - // Version information for installations with a registration requirement. // +optional RegistrationRequired *RegistrationRequirementStatus `json:"registrationRequired,omitempty"` - // Signals the need for a token to be applied when registration is required. // +optional TokenRequired string `json:"tokenRequired,omitempty"` @@ -401,8 +407,7 @@ const ( PersistentVolumeResizing = "PersistentVolumeResizing" PostgresClusterProgressing = "Progressing" ProxyAvailable = "ProxyAvailable" - RegistrationRequired = "RegistrationRequired" - TokenRequired = "TokenRequired" + Registered = "Registered" ) type PostgresInstanceSetSpec struct { @@ -445,7 +450,20 @@ type PostgresInstanceSetSpec struct { // Defines a PersistentVolumeClaim for PostgreSQL data. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes + // --- // +kubebuilder:validation:Required + // + // NOTE(validation): Every PVC must have at least one accessMode. NOTE(KEP-4153) + // TODO(k8s-1.28): fieldPath=`.accessModes`,reason="FieldValueRequired" + // - https://releases.k8s.io/v1.25.0/pkg/apis/core/validation/validation.go#L2098-L2100 + // - https://releases.k8s.io/v1.31.0/pkg/apis/core/validation/validation.go#L2292-L2294 + // +kubebuilder:validation:XValidation:rule=`has(self.accessModes) && size(self.accessModes) > 0`,message=`missing accessModes` + // + // NOTE(validation): Every PVC must have a positive storage request. NOTE(KEP-4153) + // TODO(k8s-1.28): fieldPath=`.resources.requests.storage`,reason="FieldValueRequired" + // - https://releases.k8s.io/v1.25.0/pkg/apis/core/validation/validation.go#L2126-L2133 + // - https://releases.k8s.io/v1.31.0/pkg/apis/core/validation/validation.go#L2318-L2325 + // +kubebuilder:validation:XValidation:rule=`has(self.resources) && has(self.resources.requests) && has(self.resources.requests.storage)`,message=`missing storage request` DataVolumeClaimSpec corev1.PersistentVolumeClaimSpec `json:"dataVolumeClaimSpec"` // Priority class name for the PostgreSQL pod. Changing this value causes @@ -486,7 +504,20 @@ type PostgresInstanceSetSpec struct { // Defines a separate PersistentVolumeClaim for PostgreSQL's write-ahead log. // More info: https://www.postgresql.org/docs/current/wal.html - // +optional + // --- + // +kubebuilder:validation:Optional + // + // NOTE(validation): Every PVC must have at least one accessMode. NOTE(KEP-4153) + // TODO(k8s-1.28): fieldPath=`.accessModes`,reason="FieldValueRequired" + // - https://releases.k8s.io/v1.25.0/pkg/apis/core/validation/validation.go#L2098-L2100 + // - https://releases.k8s.io/v1.31.0/pkg/apis/core/validation/validation.go#L2292-L2294 + // +kubebuilder:validation:XValidation:rule=`has(self.accessModes) && size(self.accessModes) > 0`,message=`missing accessModes` + // + // NOTE(validation): Every PVC must have a positive storage request. NOTE(KEP-4153) + // TODO(k8s-1.28): fieldPath=`.resources.requests.storage`,reason="FieldValueRequired" + // - https://releases.k8s.io/v1.25.0/pkg/apis/core/validation/validation.go#L2126-L2133 + // - https://releases.k8s.io/v1.31.0/pkg/apis/core/validation/validation.go#L2318-L2325 + // +kubebuilder:validation:XValidation:rule=`has(self.resources) && has(self.resources.requests) && has(self.resources.requests.storage)`,message=`missing storage request` WALVolumeClaimSpec *corev1.PersistentVolumeClaimSpec `json:"walVolumeClaimSpec,omitempty"` // The list of tablespaces volumes to mount for this postgrescluster @@ -515,7 +546,20 @@ type TablespaceVolume struct { // Defines a PersistentVolumeClaim for a tablespace. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes + // --- // +kubebuilder:validation:Required + // + // NOTE(validation): Every PVC must have at least one accessMode. NOTE(KEP-4153) + // TODO(k8s-1.28): fieldPath=`.accessModes`,reason="FieldValueRequired" + // - https://releases.k8s.io/v1.25.0/pkg/apis/core/validation/validation.go#L2098-L2100 + // - https://releases.k8s.io/v1.31.0/pkg/apis/core/validation/validation.go#L2292-L2294 + // +kubebuilder:validation:XValidation:rule=`has(self.accessModes) && size(self.accessModes) > 0`,message=`missing accessModes` + // + // NOTE(validation): Every PVC must have a positive storage request. NOTE(KEP-4153) + // TODO(k8s-1.28): fieldPath=`.resources.requests.storage`,reason="FieldValueRequired" + // - https://releases.k8s.io/v1.25.0/pkg/apis/core/validation/validation.go#L2126-L2133 + // - https://releases.k8s.io/v1.31.0/pkg/apis/core/validation/validation.go#L2318-L2325 + // +kubebuilder:validation:XValidation:rule=`has(self.resources) && has(self.resources.requests) && has(self.resources.requests.storage)`,message=`missing storage request` DataVolumeClaimSpec corev1.PersistentVolumeClaimSpec `json:"dataVolumeClaimSpec"` } @@ -552,6 +596,10 @@ type PostgresInstanceSetStatus struct { // Total number of pods that have the desired specification. // +optional UpdatedReplicas int32 `json:"updatedReplicas,omitempty"` + + // Desired Size of the pgData volume + // +optional + DesiredPGDataVolume map[string]string `json:"desiredPGDataVolume,omitempty"` } // PostgresProxySpec is a union of the supported PostgreSQL proxies. @@ -690,3 +738,11 @@ func NewPostgresCluster() *PostgresCluster { cluster.SetGroupVersionKind(GroupVersion.WithKind("PostgresCluster")) return cluster } + +// VolumeSnapshots defines the configuration for VolumeSnapshots +type VolumeSnapshots struct { + // Name of the VolumeSnapshotClass that should be used by VolumeSnapshots + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + VolumeSnapshotClassName string `json:"volumeSnapshotClassName"` +} diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/shared_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/shared_types.go index a8e63e9c25..1dc4e3627e 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/shared_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/shared_types.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package v1beta1 @@ -55,6 +44,18 @@ type ServiceSpec struct { // +kubebuilder:default=ClusterIP // +kubebuilder:validation:Enum={ClusterIP,NodePort,LoadBalancer} Type string `json:"type"` + + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#traffic-policies + // + // +optional + // +kubebuilder:validation:Enum={Cluster,Local} + InternalTrafficPolicy *corev1.ServiceInternalTrafficPolicyType `json:"internalTrafficPolicy,omitempty"` + + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#traffic-policies + // + // +optional + // +kubebuilder:validation:Enum={Cluster,Local} + ExternalTrafficPolicy *corev1.ServiceExternalTrafficPolicyType `json:"externalTrafficPolicy,omitempty"` } // Sidecar defines the configuration of a sidecar container diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/shared_types_test.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/shared_types_test.go index 82f76e6f7a..96cd4da073 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/shared_types_test.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/shared_types_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2022 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2022 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package v1beta1 diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/standalone_pgadmin_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/standalone_pgadmin_types.go index d88815d19d..4fbc90a3b9 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/standalone_pgadmin_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/standalone_pgadmin_types.go @@ -1,16 +1,6 @@ -// Copyright 2023 Crunchy Data Solutions, Inc. +// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package v1beta1 @@ -23,8 +13,22 @@ import ( type StandalonePGAdminConfiguration struct { // Files allows the user to mount projected volumes into the pgAdmin // container so that files can be referenced by pgAdmin as needed. + // +optional Files []corev1.VolumeProjection `json:"files,omitempty"` + // A Secret containing the value for the CONFIG_DATABASE_URI setting. + // More info: https://www.pgadmin.org/docs/pgadmin4/latest/external_database.html + // +optional + ConfigDatabaseURI *corev1.SecretKeySelector `json:"configDatabaseURI,omitempty"` + + // Settings for the gunicorn server. + // More info: https://docs.gunicorn.org/en/latest/settings.html + // +optional + // +kubebuilder:pruning:PreserveUnknownFields + // +kubebuilder:validation:Schemaless + // +kubebuilder:validation:Type=object + Gunicorn SchemalessObject `json:"gunicorn,omitempty"` + // A Secret containing the value for the LDAP_BIND_PASSWORD setting. // More info: https://www.pgadmin.org/docs/pgadmin4/latest/ldap.html // +optional @@ -101,30 +105,76 @@ type PGAdminSpec struct { // added manually. // +optional ServerGroups []ServerGroup `json:"serverGroups"` + + // pgAdmin users that are managed via the PGAdmin spec. Users can still + // be added via the pgAdmin GUI, but those users will not show up here. + // +listType=map + // +listMapKey=username + // +optional + Users []PGAdminUser `json:"users,omitempty"` + + // ServiceName will be used as the name of a ClusterIP service pointing + // to the pgAdmin pod and port. If the service already exists, PGO will + // update the service. For more information about services reference + // the Kubernetes and CrunchyData documentation. + // https://kubernetes.io/docs/concepts/services-networking/service/ + // +optional + ServiceName string `json:"serviceName,omitempty"` } +// +kubebuilder:validation:XValidation:rule=`[has(self.postgresClusterName),has(self.postgresClusterSelector)].exists_one(x,x)`,message=`exactly one of "postgresClusterName" or "postgresClusterSelector" is required` type ServerGroup struct { // The name for the ServerGroup in pgAdmin. // Must be unique in the pgAdmin's ServerGroups since it becomes the ServerGroup name in pgAdmin. + // +kubebuilder:validation:Required Name string `json:"name"` + // PostgresClusterName selects one cluster to add to pgAdmin by name. + // +kubebuilder:validation:Optional + PostgresClusterName string `json:"postgresClusterName,omitempty"` + // PostgresClusterSelector selects clusters to dynamically add to pgAdmin by matching labels. // An empty selector like `{}` will select ALL clusters in the namespace. - PostgresClusterSelector metav1.LabelSelector `json:"postgresClusterSelector"` + // +kubebuilder:validation:Optional + PostgresClusterSelector metav1.LabelSelector `json:"postgresClusterSelector,omitempty"` +} + +type PGAdminUser struct { + // A reference to the secret that holds the user's password. + // +kubebuilder:validation:Required + PasswordRef *corev1.SecretKeySelector `json:"passwordRef"` + + // Role determines whether the user has admin privileges or not. + // Defaults to User. Valid options are Administrator and User. + // +kubebuilder:validation:Enum={Administrator,User} + // +optional + Role string `json:"role,omitempty"` + + // The username for User in pgAdmin. + // Must be unique in the pgAdmin's users list. + // +kubebuilder:validation:Required + Username string `json:"username"` } // PGAdminStatus defines the observed state of PGAdmin type PGAdminStatus struct { - // conditions represent the observations of pgadmin's current state. - // Known .status.conditions.type are: "PersistentVolumeResizing", - // "Progressing", "ProxyAvailable" + // conditions represent the observations of pgAdmin's current state. + // Known .status.conditions.type is: "PersistentVolumeResizing" // +optional // +listType=map // +listMapKey=type // +operator-sdk:csv:customresourcedefinitions:type=status,xDescriptors={"urn:alm:descriptor:io.kubernetes.conditions"} Conditions []metav1.Condition `json:"conditions,omitempty"` + // ImageSHA represents the image SHA for the container running pgAdmin. + // +optional + ImageSHA string `json:"imageSHA,omitempty"` + + // MajorVersion represents the major version of the running pgAdmin. + // +optional + MajorVersion int `json:"majorVersion,omitempty"` + // observedGeneration represents the .metadata.generation on which the status was based. // +optional // +kubebuilder:validation:Minimum=0 @@ -134,7 +184,7 @@ type PGAdminStatus struct { //+kubebuilder:object:root=true //+kubebuilder:subresource:status -// PGAdmin is the Schema for the pgadmins API +// PGAdmin is the Schema for the PGAdmin API type PGAdmin struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/zz_generated.deepcopy.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/zz_generated.deepcopy.go index 01dbd2c980..fa32069d0f 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/zz_generated.deepcopy.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/zz_generated.deepcopy.go @@ -1,32 +1,38 @@ //go:build !ignore_autogenerated -// +build !ignore_autogenerated -/* - Copyright 2021 - 2023 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 // Code generated by controller-gen. DO NOT EDIT. package v1beta1 import ( - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIResponses) DeepCopyInto(out *APIResponses) { + *out = *in + in.Cluster.DeepCopyInto(&out.Cluster) + in.Status.DeepCopyInto(&out.Status) + in.Upgrade.DeepCopyInto(&out.Upgrade) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIResponses. +func (in *APIResponses) DeepCopy() *APIResponses { + if in == nil { + return nil + } + out := new(APIResponses) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BackupJobs) DeepCopyInto(out *BackupJobs) { *out = *in @@ -38,12 +44,12 @@ func (in *BackupJobs) DeepCopyInto(out *BackupJobs) { } if in.Affinity != nil { in, out := &in.Affinity, &out.Affinity - *out = new(v1.Affinity) + *out = new(corev1.Affinity) (*in).DeepCopyInto(*out) } if in.Tolerations != nil { in, out := &in.Tolerations, &out.Tolerations - *out = make([]v1.Toleration, len(*in)) + *out = make([]corev1.Toleration, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -69,6 +75,11 @@ func (in *BackupJobs) DeepCopy() *BackupJobs { func (in *Backups) DeepCopyInto(out *Backups) { *out = *in in.PGBackRest.DeepCopyInto(&out.PGBackRest) + if in.Snapshots != nil { + in, out := &in.Snapshots, &out.Snapshots + *out = new(VolumeSnapshots) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Backups. @@ -81,6 +92,187 @@ func (in *Backups) DeepCopy() *Backups { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterUpgrade) DeepCopyInto(out *ClusterUpgrade) { + *out = *in + if in.Operations != nil { + in, out := &in.Operations, &out.Operations + *out = make([]*UpgradeOperation, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(UpgradeOperation) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterUpgrade. +func (in *ClusterUpgrade) DeepCopy() *ClusterUpgrade { + if in == nil { + return nil + } + out := new(ClusterUpgrade) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CrunchyBridgeCluster) DeepCopyInto(out *CrunchyBridgeCluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrunchyBridgeCluster. +func (in *CrunchyBridgeCluster) DeepCopy() *CrunchyBridgeCluster { + if in == nil { + return nil + } + out := new(CrunchyBridgeCluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CrunchyBridgeCluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CrunchyBridgeClusterList) DeepCopyInto(out *CrunchyBridgeClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CrunchyBridgeCluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrunchyBridgeClusterList. +func (in *CrunchyBridgeClusterList) DeepCopy() *CrunchyBridgeClusterList { + if in == nil { + return nil + } + out := new(CrunchyBridgeClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CrunchyBridgeClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CrunchyBridgeClusterRoleSpec) DeepCopyInto(out *CrunchyBridgeClusterRoleSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrunchyBridgeClusterRoleSpec. +func (in *CrunchyBridgeClusterRoleSpec) DeepCopy() *CrunchyBridgeClusterRoleSpec { + if in == nil { + return nil + } + out := new(CrunchyBridgeClusterRoleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CrunchyBridgeClusterSpec) DeepCopyInto(out *CrunchyBridgeClusterSpec) { + *out = *in + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = new(Metadata) + (*in).DeepCopyInto(*out) + } + if in.Roles != nil { + in, out := &in.Roles, &out.Roles + *out = make([]*CrunchyBridgeClusterRoleSpec, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(CrunchyBridgeClusterRoleSpec) + **out = **in + } + } + } + out.Storage = in.Storage.DeepCopy() +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrunchyBridgeClusterSpec. +func (in *CrunchyBridgeClusterSpec) DeepCopy() *CrunchyBridgeClusterSpec { + if in == nil { + return nil + } + out := new(CrunchyBridgeClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CrunchyBridgeClusterStatus) DeepCopyInto(out *CrunchyBridgeClusterStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IsHA != nil { + in, out := &in.IsHA, &out.IsHA + *out = new(bool) + **out = **in + } + if in.IsProtected != nil { + in, out := &in.IsProtected, &out.IsProtected + *out = new(bool) + **out = **in + } + if in.OngoingUpgrade != nil { + in, out := &in.OngoingUpgrade, &out.OngoingUpgrade + *out = make([]*UpgradeOperation, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(UpgradeOperation) + **out = **in + } + } + } + in.Responses.DeepCopyInto(&out.Responses) + if in.Storage != nil { + in, out := &in.Storage, &out.Storage + x := (*in).DeepCopy() + *out = &x + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrunchyBridgeClusterStatus. +func (in *CrunchyBridgeClusterStatus) DeepCopy() *CrunchyBridgeClusterStatus { + if in == nil { + return nil + } + out := new(CrunchyBridgeClusterStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DataSource) DeepCopyInto(out *DataSource) { *out = *in @@ -176,14 +368,14 @@ func (in *ExporterSpec) DeepCopyInto(out *ExporterSpec) { *out = *in if in.Configuration != nil { in, out := &in.Configuration, &out.Configuration - *out = make([]v1.VolumeProjection, len(*in)) + *out = make([]corev1.VolumeProjection, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.CustomTLSSecret != nil { in, out := &in.CustomTLSSecret, &out.CustomTLSSecret - *out = new(v1.SecretProjection) + *out = new(corev1.SecretProjection) (*in).DeepCopyInto(*out) } in.Resources.DeepCopyInto(&out.Resources) @@ -315,14 +507,14 @@ func (in *PGAdminConfiguration) DeepCopyInto(out *PGAdminConfiguration) { *out = *in if in.Files != nil { in, out := &in.Files, &out.Files - *out = make([]v1.VolumeProjection, len(*in)) + *out = make([]corev1.VolumeProjection, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.LDAPBindPassword != nil { in, out := &in.LDAPBindPassword, &out.LDAPBindPassword - *out = new(v1.SecretKeySelector) + *out = new(corev1.SecretKeySelector) (*in).DeepCopyInto(*out) } in.Settings.DeepCopyInto(&out.Settings) @@ -380,7 +572,7 @@ func (in *PGAdminPodSpec) DeepCopyInto(out *PGAdminPodSpec) { } if in.Affinity != nil { in, out := &in.Affinity, &out.Affinity - *out = new(v1.Affinity) + *out = new(corev1.Affinity) (*in).DeepCopyInto(*out) } in.Config.DeepCopyInto(&out.Config) @@ -403,14 +595,14 @@ func (in *PGAdminPodSpec) DeepCopyInto(out *PGAdminPodSpec) { } if in.Tolerations != nil { in, out := &in.Tolerations, &out.Tolerations - *out = make([]v1.Toleration, len(*in)) + *out = make([]corev1.Toleration, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.TopologySpreadConstraints != nil { in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints - *out = make([]v1.TopologySpreadConstraint, len(*in)) + *out = make([]corev1.TopologySpreadConstraint, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -459,13 +651,13 @@ func (in *PGAdminSpec) DeepCopyInto(out *PGAdminSpec) { } if in.ImagePullSecrets != nil { in, out := &in.ImagePullSecrets, &out.ImagePullSecrets - *out = make([]v1.LocalObjectReference, len(*in)) + *out = make([]corev1.LocalObjectReference, len(*in)) copy(*out, *in) } in.Resources.DeepCopyInto(&out.Resources) if in.Affinity != nil { in, out := &in.Affinity, &out.Affinity - *out = new(v1.Affinity) + *out = new(corev1.Affinity) (*in).DeepCopyInto(*out) } if in.PriorityClassName != nil { @@ -475,7 +667,7 @@ func (in *PGAdminSpec) DeepCopyInto(out *PGAdminSpec) { } if in.Tolerations != nil { in, out := &in.Tolerations, &out.Tolerations - *out = make([]v1.Toleration, len(*in)) + *out = make([]corev1.Toleration, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -487,6 +679,13 @@ func (in *PGAdminSpec) DeepCopyInto(out *PGAdminSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.Users != nil { + in, out := &in.Users, &out.Users + *out = make([]PGAdminUser, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PGAdminSpec. @@ -504,7 +703,7 @@ func (in *PGAdminStatus) DeepCopyInto(out *PGAdminStatus) { *out = *in if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make([]metav1.Condition, len(*in)) + *out = make([]v1.Condition, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -521,6 +720,26 @@ func (in *PGAdminStatus) DeepCopy() *PGAdminStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PGAdminUser) DeepCopyInto(out *PGAdminUser) { + *out = *in + if in.PasswordRef != nil { + in, out := &in.PasswordRef, &out.PasswordRef + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PGAdminUser. +func (in *PGAdminUser) DeepCopy() *PGAdminUser { + if in == nil { + return nil + } + out := new(PGAdminUser) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PGBackRestArchive) DeepCopyInto(out *PGBackRestArchive) { *out = *in @@ -531,7 +750,7 @@ func (in *PGBackRestArchive) DeepCopyInto(out *PGBackRestArchive) { } if in.Configuration != nil { in, out := &in.Configuration, &out.Configuration - *out = make([]v1.VolumeProjection, len(*in)) + *out = make([]corev1.VolumeProjection, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -622,7 +841,7 @@ func (in *PGBackRestDataSource) DeepCopyInto(out *PGBackRestDataSource) { *out = *in if in.Configuration != nil { in, out := &in.Configuration, &out.Configuration - *out = make([]v1.VolumeProjection, len(*in)) + *out = make([]corev1.VolumeProjection, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -643,7 +862,7 @@ func (in *PGBackRestDataSource) DeepCopyInto(out *PGBackRestDataSource) { in.Resources.DeepCopyInto(&out.Resources) if in.Affinity != nil { in, out := &in.Affinity, &out.Affinity - *out = new(v1.Affinity) + *out = new(corev1.Affinity) (*in).DeepCopyInto(*out) } if in.PriorityClassName != nil { @@ -653,7 +872,7 @@ func (in *PGBackRestDataSource) DeepCopyInto(out *PGBackRestDataSource) { } if in.Tolerations != nil { in, out := &in.Tolerations, &out.Tolerations - *out = make([]v1.Toleration, len(*in)) + *out = make([]corev1.Toleration, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -758,7 +977,7 @@ func (in *PGBackRestRepoHost) DeepCopyInto(out *PGBackRestRepoHost) { *out = *in if in.Affinity != nil { in, out := &in.Affinity, &out.Affinity - *out = new(v1.Affinity) + *out = new(corev1.Affinity) (*in).DeepCopyInto(*out) } if in.PriorityClassName != nil { @@ -769,26 +988,26 @@ func (in *PGBackRestRepoHost) DeepCopyInto(out *PGBackRestRepoHost) { in.Resources.DeepCopyInto(&out.Resources) if in.Tolerations != nil { in, out := &in.Tolerations, &out.Tolerations - *out = make([]v1.Toleration, len(*in)) + *out = make([]corev1.Toleration, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.TopologySpreadConstraints != nil { in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints - *out = make([]v1.TopologySpreadConstraint, len(*in)) + *out = make([]corev1.TopologySpreadConstraint, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.SSHConfiguration != nil { in, out := &in.SSHConfiguration, &out.SSHConfiguration - *out = new(v1.ConfigMapProjection) + *out = new(corev1.ConfigMapProjection) (*in).DeepCopyInto(*out) } if in.SSHSecret != nil { in, out := &in.SSHSecret, &out.SSHSecret - *out = new(v1.SecretProjection) + *out = new(corev1.SecretProjection) (*in).DeepCopyInto(*out) } } @@ -923,7 +1142,7 @@ func (in *PGBouncerConfiguration) DeepCopyInto(out *PGBouncerConfiguration) { *out = *in if in.Files != nil { in, out := &in.Files, &out.Files - *out = make([]v1.VolumeProjection, len(*in)) + *out = make([]corev1.VolumeProjection, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -971,20 +1190,20 @@ func (in *PGBouncerPodSpec) DeepCopyInto(out *PGBouncerPodSpec) { } if in.Affinity != nil { in, out := &in.Affinity, &out.Affinity - *out = new(v1.Affinity) + *out = new(corev1.Affinity) (*in).DeepCopyInto(*out) } in.Config.DeepCopyInto(&out.Config) if in.Containers != nil { in, out := &in.Containers, &out.Containers - *out = make([]v1.Container, len(*in)) + *out = make([]corev1.Container, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.CustomTLSSecret != nil { in, out := &in.CustomTLSSecret, &out.CustomTLSSecret - *out = new(v1.SecretProjection) + *out = new(corev1.SecretProjection) (*in).DeepCopyInto(*out) } if in.Port != nil { @@ -1020,14 +1239,14 @@ func (in *PGBouncerPodSpec) DeepCopyInto(out *PGBouncerPodSpec) { } if in.Tolerations != nil { in, out := &in.Tolerations, &out.Tolerations - *out = make([]v1.Toleration, len(*in)) + *out = make([]corev1.Toleration, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.TopologySpreadConstraints != nil { in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints - *out = make([]v1.TopologySpreadConstraint, len(*in)) + *out = make([]corev1.TopologySpreadConstraint, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -1173,13 +1392,13 @@ func (in *PGUpgradeSpec) DeepCopyInto(out *PGUpgradeSpec) { } if in.ImagePullSecrets != nil { in, out := &in.ImagePullSecrets, &out.ImagePullSecrets - *out = make([]v1.LocalObjectReference, len(*in)) + *out = make([]corev1.LocalObjectReference, len(*in)) copy(*out, *in) } in.Resources.DeepCopyInto(&out.Resources) if in.Affinity != nil { in, out := &in.Affinity, &out.Affinity - *out = new(v1.Affinity) + *out = new(corev1.Affinity) (*in).DeepCopyInto(*out) } if in.PriorityClassName != nil { @@ -1189,7 +1408,7 @@ func (in *PGUpgradeSpec) DeepCopyInto(out *PGUpgradeSpec) { } if in.Tolerations != nil { in, out := &in.Tolerations, &out.Tolerations - *out = make([]v1.Toleration, len(*in)) + *out = make([]corev1.Toleration, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -1211,7 +1430,7 @@ func (in *PGUpgradeStatus) DeepCopyInto(out *PGUpgradeStatus) { *out = *in if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make([]metav1.Condition, len(*in)) + *out = make([]v1.Condition, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -1314,7 +1533,7 @@ func (in *PostgresAdditionalConfig) DeepCopyInto(out *PostgresAdditionalConfig) *out = *in if in.Files != nil { in, out := &in.Files, &out.Files - *out = make([]v1.VolumeProjection, len(*in)) + *out = make([]corev1.VolumeProjection, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -1369,7 +1588,7 @@ func (in *PostgresClusterDataSource) DeepCopyInto(out *PostgresClusterDataSource in.Resources.DeepCopyInto(&out.Resources) if in.Affinity != nil { in, out := &in.Affinity, &out.Affinity - *out = new(v1.Affinity) + *out = new(corev1.Affinity) (*in).DeepCopyInto(*out) } if in.PriorityClassName != nil { @@ -1379,7 +1598,7 @@ func (in *PostgresClusterDataSource) DeepCopyInto(out *PostgresClusterDataSource } if in.Tolerations != nil { in, out := &in.Tolerations, &out.Tolerations - *out = make([]v1.Toleration, len(*in)) + *out = make([]corev1.Toleration, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -1444,12 +1663,12 @@ func (in *PostgresClusterSpec) DeepCopyInto(out *PostgresClusterSpec) { in.Backups.DeepCopyInto(&out.Backups) if in.CustomTLSSecret != nil { in, out := &in.CustomTLSSecret, &out.CustomTLSSecret - *out = new(v1.SecretProjection) + *out = new(corev1.SecretProjection) (*in).DeepCopyInto(*out) } if in.CustomReplicationClientTLSSecret != nil { in, out := &in.CustomReplicationClientTLSSecret, &out.CustomReplicationClientTLSSecret - *out = new(v1.SecretProjection) + *out = new(corev1.SecretProjection) (*in).DeepCopyInto(*out) } if in.DatabaseInitSQL != nil { @@ -1464,7 +1683,7 @@ func (in *PostgresClusterSpec) DeepCopyInto(out *PostgresClusterSpec) { } if in.ImagePullSecrets != nil { in, out := &in.ImagePullSecrets, &out.ImagePullSecrets - *out = make([]v1.LocalObjectReference, len(*in)) + *out = make([]corev1.LocalObjectReference, len(*in)) copy(*out, *in) } if in.InstanceSets != nil { @@ -1514,6 +1733,11 @@ func (in *PostgresClusterSpec) DeepCopyInto(out *PostgresClusterSpec) { *out = new(ServiceSpec) (*in).DeepCopyInto(*out) } + if in.ReplicaService != nil { + in, out := &in.ReplicaService, &out.ReplicaService + *out = new(ServiceSpec) + (*in).DeepCopyInto(*out) + } if in.Shutdown != nil { in, out := &in.Shutdown, &out.Shutdown *out = new(bool) @@ -1555,7 +1779,9 @@ func (in *PostgresClusterStatus) DeepCopyInto(out *PostgresClusterStatus) { if in.InstanceSets != nil { in, out := &in.InstanceSets, &out.InstanceSets *out = make([]PostgresInstanceSetStatus, len(*in)) - copy(*out, *in) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } in.Patroni.DeepCopyInto(&out.Patroni) if in.PGBackRest != nil { @@ -1582,7 +1808,7 @@ func (in *PostgresClusterStatus) DeepCopyInto(out *PostgresClusterStatus) { } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make([]metav1.Condition, len(*in)) + *out = make([]v1.Condition, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -1609,12 +1835,12 @@ func (in *PostgresInstanceSetSpec) DeepCopyInto(out *PostgresInstanceSetSpec) { } if in.Affinity != nil { in, out := &in.Affinity, &out.Affinity - *out = new(v1.Affinity) + *out = new(corev1.Affinity) (*in).DeepCopyInto(*out) } if in.Containers != nil { in, out := &in.Containers, &out.Containers - *out = make([]v1.Container, len(*in)) + *out = make([]corev1.Container, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -1643,21 +1869,21 @@ func (in *PostgresInstanceSetSpec) DeepCopyInto(out *PostgresInstanceSetSpec) { } if in.Tolerations != nil { in, out := &in.Tolerations, &out.Tolerations - *out = make([]v1.Toleration, len(*in)) + *out = make([]corev1.Toleration, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.TopologySpreadConstraints != nil { in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints - *out = make([]v1.TopologySpreadConstraint, len(*in)) + *out = make([]corev1.TopologySpreadConstraint, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.WALVolumeClaimSpec != nil { in, out := &in.WALVolumeClaimSpec, &out.WALVolumeClaimSpec - *out = new(v1.PersistentVolumeClaimSpec) + *out = new(corev1.PersistentVolumeClaimSpec) (*in).DeepCopyInto(*out) } if in.TablespaceVolumes != nil { @@ -1682,6 +1908,13 @@ func (in *PostgresInstanceSetSpec) DeepCopy() *PostgresInstanceSetSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PostgresInstanceSetStatus) DeepCopyInto(out *PostgresInstanceSetStatus) { *out = *in + if in.DesiredPGDataVolume != nil { + in, out := &in.DesiredPGDataVolume, &out.DesiredPGDataVolume + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresInstanceSetStatus. @@ -1951,6 +2184,16 @@ func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) { *out = new(int32) **out = **in } + if in.InternalTrafficPolicy != nil { + in, out := &in.InternalTrafficPolicy, &out.InternalTrafficPolicy + *out = new(corev1.ServiceInternalTrafficPolicy) + **out = **in + } + if in.ExternalTrafficPolicy != nil { + in, out := &in.ExternalTrafficPolicy, &out.ExternalTrafficPolicy + *out = new(corev1.ServiceExternalTrafficPolicy) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceSpec. @@ -1968,7 +2211,7 @@ func (in *Sidecar) DeepCopyInto(out *Sidecar) { *out = *in if in.Resources != nil { in, out := &in.Resources, &out.Resources - *out = new(v1.ResourceRequirements) + *out = new(corev1.ResourceRequirements) (*in).DeepCopyInto(*out) } } @@ -1988,14 +2231,20 @@ func (in *StandalonePGAdminConfiguration) DeepCopyInto(out *StandalonePGAdminCon *out = *in if in.Files != nil { in, out := &in.Files, &out.Files - *out = make([]v1.VolumeProjection, len(*in)) + *out = make([]corev1.VolumeProjection, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.ConfigDatabaseURI != nil { + in, out := &in.ConfigDatabaseURI, &out.ConfigDatabaseURI + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + in.Gunicorn.DeepCopyInto(&out.Gunicorn) if in.LDAPBindPassword != nil { in, out := &in.LDAPBindPassword, &out.LDAPBindPassword - *out = new(v1.SecretKeySelector) + *out = new(corev1.SecretKeySelector) (*in).DeepCopyInto(*out) } in.Settings.DeepCopyInto(&out.Settings) @@ -2027,6 +2276,21 @@ func (in *TablespaceVolume) DeepCopy() *TablespaceVolume { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpgradeOperation) DeepCopyInto(out *UpgradeOperation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpgradeOperation. +func (in *UpgradeOperation) DeepCopy() *UpgradeOperation { + if in == nil { + return nil + } + out := new(UpgradeOperation) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *UserInterfaceSpec) DeepCopyInto(out *UserInterfaceSpec) { *out = *in @@ -2046,3 +2310,18 @@ func (in *UserInterfaceSpec) DeepCopy() *UserInterfaceSpec { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeSnapshots) DeepCopyInto(out *VolumeSnapshots) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSnapshots. +func (in *VolumeSnapshots) DeepCopy() *VolumeSnapshots { + if in == nil { + return nil + } + out := new(VolumeSnapshots) + in.DeepCopyInto(out) + return out +} diff --git a/testing/kuttl/e2e-other/autogrow-volume/00-assert.yaml b/testing/kuttl/e2e-other/autogrow-volume/00-assert.yaml new file mode 100644 index 0000000000..b4372b75e7 --- /dev/null +++ b/testing/kuttl/e2e-other/autogrow-volume/00-assert.yaml @@ -0,0 +1,7 @@ +# Ensure that the default StorageClass supports VolumeExpansion +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + annotations: + storageclass.kubernetes.io/is-default-class: "true" +allowVolumeExpansion: true diff --git a/testing/kuttl/e2e-other/autogrow-volume/01-create.yaml b/testing/kuttl/e2e-other/autogrow-volume/01-create.yaml new file mode 100644 index 0000000000..fc947a538f --- /dev/null +++ b/testing/kuttl/e2e-other/autogrow-volume/01-create.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/01-create-cluster.yaml +assert: +- files/01-cluster-and-pvc-created.yaml diff --git a/testing/kuttl/e2e-other/autogrow-volume/02-add-data.yaml b/testing/kuttl/e2e-other/autogrow-volume/02-add-data.yaml new file mode 100644 index 0000000000..261c274a51 --- /dev/null +++ b/testing/kuttl/e2e-other/autogrow-volume/02-add-data.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/02-create-data.yaml +assert: +- files/02-create-data-completed.yaml diff --git a/testing/kuttl/e2e-other/autogrow-volume/03-assert.yaml b/testing/kuttl/e2e-other/autogrow-volume/03-assert.yaml new file mode 100644 index 0000000000..ad31b61401 --- /dev/null +++ b/testing/kuttl/e2e-other/autogrow-volume/03-assert.yaml @@ -0,0 +1,12 @@ +--- +# Check that annotation is set +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: auto-grow-volume + postgres-operator.crunchydata.com/data: postgres + postgres-operator.crunchydata.com/instance-set: instance1 + postgres-operator.crunchydata.com/patroni: auto-grow-volume-ha + annotations: + suggested-pgdata-pvc-size: 1461Mi diff --git a/testing/kuttl/e2e-other/autogrow-volume/04-assert.yaml b/testing/kuttl/e2e-other/autogrow-volume/04-assert.yaml new file mode 100644 index 0000000000..d486f9de18 --- /dev/null +++ b/testing/kuttl/e2e-other/autogrow-volume/04-assert.yaml @@ -0,0 +1,19 @@ +# We know that the PVC sizes have changed so now we can check that they have been +# updated to have the expected size +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + postgres-operator.crunchydata.com/cluster: auto-grow-volume + postgres-operator.crunchydata.com/instance-set: instance1 +spec: + resources: + requests: + storage: 1461Mi +status: + accessModes: + - ReadWriteOnce + capacity: + storage: 2Gi + phase: Bound diff --git a/testing/kuttl/e2e-other/autogrow-volume/05-check-event.yaml b/testing/kuttl/e2e-other/autogrow-volume/05-check-event.yaml new file mode 100644 index 0000000000..475177d242 --- /dev/null +++ b/testing/kuttl/e2e-other/autogrow-volume/05-check-event.yaml @@ -0,0 +1,12 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + # Verify expected event has occurred + - script: | + EVENT=$( + kubectl get events --namespace="${NAMESPACE}" \ + --field-selector reason="VolumeAutoGrow" --output=jsonpath={.items..message} + ) + + if [[ "${EVENT}" != "pgData volume expansion to 1461Mi requested for auto-grow-volume/instance1." ]]; then exit 1; fi diff --git a/testing/kuttl/e2e-other/autogrow-volume/README.md b/testing/kuttl/e2e-other/autogrow-volume/README.md new file mode 100644 index 0000000000..674bc69b40 --- /dev/null +++ b/testing/kuttl/e2e-other/autogrow-volume/README.md @@ -0,0 +1,9 @@ +### AutoGrow Volume + +* 00: Assert the storage class allows volume expansion +* 01: Create and verify PostgresCluster and PVC +* 02: Add data to trigger growth and verify Job completes +* 03: Verify annotation on the instance Pod +* 04: Verify the PVC request has been set and the PVC has grown +* 05: Verify the expansion request Event has been created + Note: This Event should be created between steps 03 and 04 but is checked at the end for timing purposes. diff --git a/testing/kuttl/e2e-other/autogrow-volume/files/01-cluster-and-pvc-created.yaml b/testing/kuttl/e2e-other/autogrow-volume/files/01-cluster-and-pvc-created.yaml new file mode 100644 index 0000000000..17804b8205 --- /dev/null +++ b/testing/kuttl/e2e-other/autogrow-volume/files/01-cluster-and-pvc-created.yaml @@ -0,0 +1,27 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: auto-grow-volume +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + postgres-operator.crunchydata.com/cluster: auto-grow-volume + postgres-operator.crunchydata.com/instance-set: instance1 +spec: + resources: + requests: + storage: 1Gi +status: + accessModes: + - ReadWriteOnce + capacity: + storage: 1Gi + phase: Bound diff --git a/testing/kuttl/e2e-other/autogrow-volume/files/01-create-cluster.yaml b/testing/kuttl/e2e-other/autogrow-volume/files/01-create-cluster.yaml new file mode 100644 index 0000000000..01eaf7a684 --- /dev/null +++ b/testing/kuttl/e2e-other/autogrow-volume/files/01-create-cluster.yaml @@ -0,0 +1,27 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: auto-grow-volume +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + limits: + storage: 2Gi + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi diff --git a/testing/kuttl/e2e-other/autogrow-volume/files/02-create-data-completed.yaml b/testing/kuttl/e2e-other/autogrow-volume/files/02-create-data-completed.yaml new file mode 100644 index 0000000000..fdb42e68f5 --- /dev/null +++ b/testing/kuttl/e2e-other/autogrow-volume/files/02-create-data-completed.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: create-data +status: + succeeded: 1 diff --git a/testing/kuttl/e2e-other/autogrow-volume/files/02-create-data.yaml b/testing/kuttl/e2e-other/autogrow-volume/files/02-create-data.yaml new file mode 100644 index 0000000000..c42f0dec10 --- /dev/null +++ b/testing/kuttl/e2e-other/autogrow-volume/files/02-create-data.yaml @@ -0,0 +1,32 @@ +--- +# Create some data that should be present after resizing. +apiVersion: batch/v1 +kind: Job +metadata: + name: create-data + labels: { postgres-operator-test: kuttl } +spec: + backoffLimit: 3 + template: + metadata: + labels: { postgres-operator-test: kuttl } + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + env: + - name: PGURI + valueFrom: { secretKeyRef: { name: auto-grow-volume-pguser-auto-grow-volume, key: uri } } + + # Do not wait indefinitely, but leave enough time to create the data. + - { name: PGCONNECT_TIMEOUT, value: '60' } + + command: + - psql + - $(PGURI) + - --set=ON_ERROR_STOP=1 + - --command + - | # create schema for user and add enough data to get over 75% usage + CREATE SCHEMA "auto-grow-volume" AUTHORIZATION "auto-grow-volume"; + CREATE TABLE big_table AS SELECT 'data' || s AS mydata FROM generate_series(1,6000000) AS s; diff --git a/testing/kuttl/e2e-other/cluster-migrate/README.md b/testing/kuttl/e2e-other/cluster-migrate/README.md index b2becc9ffb..09026f9e8b 100644 --- a/testing/kuttl/e2e-other/cluster-migrate/README.md +++ b/testing/kuttl/e2e-other/cluster-migrate/README.md @@ -24,7 +24,7 @@ WARNING: database \"postgres\" has a collation version mismatch DETAIL: The database was created using collation version 2.31, but the operating system provides version 2.28 ``` -This error occured in `reconcilePostgresDatabases` and prevented PGO from finishing the reconcile +This error occurred in `reconcilePostgresDatabases` and prevented PGO from finishing the reconcile loop. For _testing purposes_, this problem is worked around in steps 06 and 07, which wait for the PG pod to be ready and then send a command to `REFRESH COLLATION VERSION` on the `postgres` and `template1` databases (which were the only databases where this error was observed during @@ -39,7 +39,7 @@ as an automatic step. User intervention and supervision is recommended in that c * 02: Create data on that cluster * 03: Alter the Reclaim policy of the PV so that it will survive deletion of the cluster * 04: Delete the original cluster, leaving the PV -* 05: Create a PGO-managed `postgrescluster` with the remaing PV as the datasource +* 05: Create a PGO-managed `postgrescluster` with the remaining PV as the datasource * 06-07: Wait for the PG pod to be ready and alter the collation (PG 15 only, see above) * 08: Alter the PV to the original Reclaim policy * 09: Check that the data successfully migrated diff --git a/testing/kuttl/e2e-other/replica-service/00-base-cluster.yaml b/testing/kuttl/e2e-other/replica-service/00-base-cluster.yaml new file mode 100644 index 0000000000..725f40de14 --- /dev/null +++ b/testing/kuttl/e2e-other/replica-service/00-base-cluster.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/base-cluster.yaml +assert: +- files/base-check.yaml diff --git a/testing/kuttl/e2e-other/replica-service/01-node-port.yaml b/testing/kuttl/e2e-other/replica-service/01-node-port.yaml new file mode 100644 index 0000000000..c80e947e40 --- /dev/null +++ b/testing/kuttl/e2e-other/replica-service/01-node-port.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/np-cluster.yaml +assert: +- files/np-check.yaml diff --git a/testing/kuttl/e2e-other/replica-service/02-loadbalancer.yaml b/testing/kuttl/e2e-other/replica-service/02-loadbalancer.yaml new file mode 100644 index 0000000000..f1433111db --- /dev/null +++ b/testing/kuttl/e2e-other/replica-service/02-loadbalancer.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/lb-cluster.yaml +assert: +- files/lb-check.yaml diff --git a/testing/kuttl/e2e-other/replica-service/03-cluster-ip.yaml b/testing/kuttl/e2e-other/replica-service/03-cluster-ip.yaml new file mode 100644 index 0000000000..de6055ea6b --- /dev/null +++ b/testing/kuttl/e2e-other/replica-service/03-cluster-ip.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/cip-cluster.yaml +assert: +- files/cip-check.yaml diff --git a/testing/kuttl/e2e-other/replica-service/files/base-check.yaml b/testing/kuttl/e2e-other/replica-service/files/base-check.yaml new file mode 100644 index 0000000000..a83fce0f57 --- /dev/null +++ b/testing/kuttl/e2e-other/replica-service/files/base-check.yaml @@ -0,0 +1,15 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: service +status: + instances: + - name: instance1 + readyReplicas: 2 + replicas: 2 + updatedReplicas: 2 +--- +apiVersion: v1 +kind: Service +metadata: + name: service-replicas diff --git a/testing/kuttl/e2e-other/replica-service/files/base-cluster.yaml b/testing/kuttl/e2e-other/replica-service/files/base-cluster.yaml new file mode 100644 index 0000000000..67c4481d2f --- /dev/null +++ b/testing/kuttl/e2e-other/replica-service/files/base-cluster.yaml @@ -0,0 +1,28 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: service +spec: + postgresVersion: ${KUTTL_PG_VERSION} + replicaService: + type: ClusterIP + instances: + - name: instance1 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 0.5Gi + replicas: 2 + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 0.5Gi diff --git a/testing/kuttl/e2e-other/replica-service/files/cip-check.yaml b/testing/kuttl/e2e-other/replica-service/files/cip-check.yaml new file mode 100644 index 0000000000..5bf5422bb8 --- /dev/null +++ b/testing/kuttl/e2e-other/replica-service/files/cip-check.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: Service +metadata: + name: service-replicas +spec: + type: ClusterIP + selector: + postgres-operator.crunchydata.com/cluster: service + postgres-operator.crunchydata.com/role: replica diff --git a/testing/kuttl/e2e-other/replica-service/files/cip-cluster.yaml b/testing/kuttl/e2e-other/replica-service/files/cip-cluster.yaml new file mode 100644 index 0000000000..8545aa8223 --- /dev/null +++ b/testing/kuttl/e2e-other/replica-service/files/cip-cluster.yaml @@ -0,0 +1,8 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: service +spec: + replicaService: + type: ClusterIP + nodePort: null diff --git a/testing/kuttl/e2e-other/replica-service/files/lb-check.yaml b/testing/kuttl/e2e-other/replica-service/files/lb-check.yaml new file mode 100644 index 0000000000..b8519491c7 --- /dev/null +++ b/testing/kuttl/e2e-other/replica-service/files/lb-check.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: Service +metadata: + name: service-replicas +spec: + type: LoadBalancer + selector: + postgres-operator.crunchydata.com/cluster: service + postgres-operator.crunchydata.com/role: replica diff --git a/testing/kuttl/e2e-other/replica-service/files/lb-cluster.yaml b/testing/kuttl/e2e-other/replica-service/files/lb-cluster.yaml new file mode 100644 index 0000000000..5e18f71dcd --- /dev/null +++ b/testing/kuttl/e2e-other/replica-service/files/lb-cluster.yaml @@ -0,0 +1,8 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: service +spec: + replicaService: + type: LoadBalancer + nodePort: null diff --git a/testing/kuttl/e2e-other/replica-service/files/np-check.yaml b/testing/kuttl/e2e-other/replica-service/files/np-check.yaml new file mode 100644 index 0000000000..c7d791e36a --- /dev/null +++ b/testing/kuttl/e2e-other/replica-service/files/np-check.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: service-replicas +spec: + type: NodePort + ports: + - name: postgres + port: 5432 + protocol: TCP + targetPort: postgres + selector: + postgres-operator.crunchydata.com/cluster: service + postgres-operator.crunchydata.com/role: replica diff --git a/testing/kuttl/e2e-other/replica-service/files/np-cluster.yaml b/testing/kuttl/e2e-other/replica-service/files/np-cluster.yaml new file mode 100644 index 0000000000..0b20ae63ad --- /dev/null +++ b/testing/kuttl/e2e-other/replica-service/files/np-cluster.yaml @@ -0,0 +1,7 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: service +spec: + replicaService: + type: NodePort diff --git a/testing/kuttl/e2e/cluster-pause/00--cluster.yaml b/testing/kuttl/e2e/cluster-pause/00--cluster.yaml index abf7b9f4f2..801a22d460 100644 --- a/testing/kuttl/e2e/cluster-pause/00--cluster.yaml +++ b/testing/kuttl/e2e/cluster-pause/00--cluster.yaml @@ -1,25 +1,6 @@ -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: cluster-pause -spec: - postgresVersion: ${KUTTL_PG_VERSION} - instances: - - name: instance1 - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - backups: - pgbackrest: - repos: - - name: repo1 - volume: - volumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/00-create-cluster.yaml +assert: +- files/00-cluster-created.yaml diff --git a/testing/kuttl/e2e/cluster-pause/00-assert.yaml b/testing/kuttl/e2e/cluster-pause/00-assert.yaml index 5c867a7892..a51dd3ab4a 100644 --- a/testing/kuttl/e2e/cluster-pause/00-assert.yaml +++ b/testing/kuttl/e2e/cluster-pause/00-assert.yaml @@ -1,23 +1,7 @@ -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: cluster-pause -status: - conditions: - - message: pgBackRest dedicated repository host is ready - reason: RepoHostReady - status: "True" - type: PGBackRestRepoHostReady - - message: pgBackRest replica create repo is ready for backups - reason: StanzaCreated - status: "True" - type: PGBackRestReplicaRepoReady - - message: pgBackRest replica creation is now possible - reason: RepoBackupComplete - status: "True" - type: PGBackRestReplicaCreate - instances: - - name: instance1 - readyReplicas: 1 - replicas: 1 - updatedReplicas: 1 +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +collectors: +- type: command + command: kubectl -n $NAMESPACE describe pods --selector postgres-operator.crunchydata.com/cluster=cluster-pause +- namespace: $NAMESPACE + selector: postgres-operator.crunchydata.com/cluster=cluster-pause diff --git a/testing/kuttl/e2e/cluster-pause/01--cluster-paused.yaml b/testing/kuttl/e2e/cluster-pause/01--cluster-paused.yaml index a66fe9529e..deab5e0228 100644 --- a/testing/kuttl/e2e/cluster-pause/01--cluster-paused.yaml +++ b/testing/kuttl/e2e/cluster-pause/01--cluster-paused.yaml @@ -1,16 +1,6 @@ -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: cluster-pause -spec: - paused: true - instances: - - name: instance1 - # We set replicas to 2, but this won't result in a new replica until we resume - replicas: 2 - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/01-pause-cluster.yaml +assert: +- files/01-cluster-paused.yaml diff --git a/testing/kuttl/e2e/cluster-pause/01-assert.yaml b/testing/kuttl/e2e/cluster-pause/01-assert.yaml index 8a10c9dd12..a51dd3ab4a 100644 --- a/testing/kuttl/e2e/cluster-pause/01-assert.yaml +++ b/testing/kuttl/e2e/cluster-pause/01-assert.yaml @@ -1,27 +1,7 @@ -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: cluster-pause -status: - conditions: - - message: pgBackRest dedicated repository host is ready - reason: RepoHostReady - status: "True" - type: PGBackRestRepoHostReady - - message: pgBackRest replica create repo is ready for backups - reason: StanzaCreated - status: "True" - type: PGBackRestReplicaRepoReady - - message: pgBackRest replica creation is now possible - reason: RepoBackupComplete - status: "True" - type: PGBackRestReplicaCreate - - message: No spec changes will be applied and no other statuses will be updated. - reason: Paused - status: "False" - type: Progressing - instances: - - name: instance1 - readyReplicas: 1 - replicas: 1 - updatedReplicas: 1 +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +collectors: +- type: command + command: kubectl -n $NAMESPACE describe pods --selector postgres-operator.crunchydata.com/cluster=cluster-pause +- namespace: $NAMESPACE + selector: postgres-operator.crunchydata.com/cluster=cluster-pause diff --git a/testing/kuttl/e2e/cluster-pause/02--cluster-resume.yaml b/testing/kuttl/e2e/cluster-pause/02--cluster-resume.yaml index 2f5665e146..bb1def96c5 100644 --- a/testing/kuttl/e2e/cluster-pause/02--cluster-resume.yaml +++ b/testing/kuttl/e2e/cluster-pause/02--cluster-resume.yaml @@ -1,6 +1,6 @@ -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: cluster-pause -spec: - paused: false +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/02-resume-cluster.yaml +assert: +- files/02-cluster-resumed.yaml diff --git a/testing/kuttl/e2e/cluster-pause/02-assert.yaml b/testing/kuttl/e2e/cluster-pause/02-assert.yaml index 18ead97434..a51dd3ab4a 100644 --- a/testing/kuttl/e2e/cluster-pause/02-assert.yaml +++ b/testing/kuttl/e2e/cluster-pause/02-assert.yaml @@ -1,23 +1,7 @@ -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: cluster-pause -status: - conditions: - - message: pgBackRest dedicated repository host is ready - reason: RepoHostReady - status: "True" - type: PGBackRestRepoHostReady - - message: pgBackRest replica create repo is ready for backups - reason: StanzaCreated - status: "True" - type: PGBackRestReplicaRepoReady - - message: pgBackRest replica creation is now possible - reason: RepoBackupComplete - status: "True" - type: PGBackRestReplicaCreate - instances: - - name: instance1 - readyReplicas: 2 - replicas: 2 - updatedReplicas: 2 +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +collectors: +- type: command + command: kubectl -n $NAMESPACE describe pods --selector postgres-operator.crunchydata.com/cluster=cluster-pause +- namespace: $NAMESPACE + selector: postgres-operator.crunchydata.com/cluster=cluster-pause diff --git a/testing/kuttl/e2e/cluster-pause/files/00-cluster-created.yaml b/testing/kuttl/e2e/cluster-pause/files/00-cluster-created.yaml new file mode 100644 index 0000000000..a5fe982b1a --- /dev/null +++ b/testing/kuttl/e2e/cluster-pause/files/00-cluster-created.yaml @@ -0,0 +1,10 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: cluster-pause +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 diff --git a/testing/kuttl/e2e/cluster-pause/files/00-create-cluster.yaml b/testing/kuttl/e2e/cluster-pause/files/00-create-cluster.yaml new file mode 100644 index 0000000000..9f687a1dfa --- /dev/null +++ b/testing/kuttl/e2e/cluster-pause/files/00-create-cluster.yaml @@ -0,0 +1,14 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: cluster-pause +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi diff --git a/testing/kuttl/e2e/cluster-pause/files/01-cluster-paused.yaml b/testing/kuttl/e2e/cluster-pause/files/01-cluster-paused.yaml new file mode 100644 index 0000000000..6776fc542b --- /dev/null +++ b/testing/kuttl/e2e/cluster-pause/files/01-cluster-paused.yaml @@ -0,0 +1,22 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: cluster-pause +status: + conditions: + - message: No spec changes will be applied and no other statuses will be updated. + reason: Paused + status: "False" + type: Progressing + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 +--- +apiVersion: v1 +kind: Service +metadata: + name: cluster-pause-ha +spec: + type: ClusterIP diff --git a/testing/kuttl/e2e/cluster-pause/files/01-pause-cluster.yaml b/testing/kuttl/e2e/cluster-pause/files/01-pause-cluster.yaml new file mode 100644 index 0000000000..6a21b00b22 --- /dev/null +++ b/testing/kuttl/e2e/cluster-pause/files/01-pause-cluster.yaml @@ -0,0 +1,17 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: cluster-pause +spec: + # We change the service, but this won't result in a change until we resume + service: + type: LoadBalancer + paused: true + instances: + - name: instance1 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi diff --git a/testing/kuttl/e2e/cluster-pause/files/02-cluster-resumed.yaml b/testing/kuttl/e2e/cluster-pause/files/02-cluster-resumed.yaml new file mode 100644 index 0000000000..82062fb908 --- /dev/null +++ b/testing/kuttl/e2e/cluster-pause/files/02-cluster-resumed.yaml @@ -0,0 +1,17 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: cluster-pause +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 +--- +apiVersion: v1 +kind: Service +metadata: + name: cluster-pause-ha +spec: + type: LoadBalancer diff --git a/testing/kuttl/e2e/cluster-pause/files/02-resume-cluster.yaml b/testing/kuttl/e2e/cluster-pause/files/02-resume-cluster.yaml new file mode 100644 index 0000000000..2f5665e146 --- /dev/null +++ b/testing/kuttl/e2e/cluster-pause/files/02-resume-cluster.yaml @@ -0,0 +1,6 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: cluster-pause +spec: + paused: false diff --git a/testing/kuttl/e2e/cluster-start/00--cluster.yaml b/testing/kuttl/e2e/cluster-start/00--cluster.yaml index a870d940f1..801a22d460 100644 --- a/testing/kuttl/e2e/cluster-start/00--cluster.yaml +++ b/testing/kuttl/e2e/cluster-start/00--cluster.yaml @@ -1,25 +1,6 @@ -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: cluster-start -spec: - postgresVersion: ${KUTTL_PG_VERSION} - instances: - - name: instance1 - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - backups: - pgbackrest: - repos: - - name: repo1 - volume: - volumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/00-create-cluster.yaml +assert: +- files/00-cluster-created.yaml diff --git a/testing/kuttl/e2e/cluster-start/00-assert.yaml b/testing/kuttl/e2e/cluster-start/00-assert.yaml index ecc6ab7fe8..b513f5ffda 100644 --- a/testing/kuttl/e2e/cluster-start/00-assert.yaml +++ b/testing/kuttl/e2e/cluster-start/00-assert.yaml @@ -1,24 +1,7 @@ -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: cluster-start -status: - instances: - - name: instance1 - readyReplicas: 1 - replicas: 1 - updatedReplicas: 1 ---- -apiVersion: batch/v1 -kind: Job -metadata: - labels: - postgres-operator.crunchydata.com/cluster: cluster-start - postgres-operator.crunchydata.com/pgbackrest-backup: replica-create -status: - succeeded: 1 ---- -apiVersion: v1 -kind: Service -metadata: - name: cluster-start-primary +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +collectors: +- type: command + command: kubectl -n $NAMESPACE describe pods --selector postgres-operator.crunchydata.com/cluster=cluster-start +- namespace: $NAMESPACE + selector: postgres-operator.crunchydata.com/cluster=cluster-start diff --git a/testing/kuttl/e2e/cluster-start/01--connect.yaml b/testing/kuttl/e2e/cluster-start/01--connect.yaml new file mode 100644 index 0000000000..9586a772ad --- /dev/null +++ b/testing/kuttl/e2e/cluster-start/01--connect.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/01-connect-psql.yaml +assert: +- files/01-psql-connected.yaml diff --git a/testing/kuttl/e2e/cluster-start/01-assert.yaml b/testing/kuttl/e2e/cluster-start/01-assert.yaml index e4d8bbb37a..b513f5ffda 100644 --- a/testing/kuttl/e2e/cluster-start/01-assert.yaml +++ b/testing/kuttl/e2e/cluster-start/01-assert.yaml @@ -1,6 +1,7 @@ -apiVersion: batch/v1 -kind: Job -metadata: - name: psql-connect -status: - succeeded: 1 +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +collectors: +- type: command + command: kubectl -n $NAMESPACE describe pods --selector postgres-operator.crunchydata.com/cluster=cluster-start +- namespace: $NAMESPACE + selector: postgres-operator.crunchydata.com/cluster=cluster-start diff --git a/testing/kuttl/e2e/cluster-start/files/00-cluster-created.yaml b/testing/kuttl/e2e/cluster-start/files/00-cluster-created.yaml new file mode 100644 index 0000000000..4eebece89e --- /dev/null +++ b/testing/kuttl/e2e/cluster-start/files/00-cluster-created.yaml @@ -0,0 +1,15 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: cluster-start +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 +--- +apiVersion: v1 +kind: Service +metadata: + name: cluster-start-primary diff --git a/testing/kuttl/e2e/cluster-start/files/00-create-cluster.yaml b/testing/kuttl/e2e/cluster-start/files/00-create-cluster.yaml new file mode 100644 index 0000000000..713cd14eb3 --- /dev/null +++ b/testing/kuttl/e2e/cluster-start/files/00-create-cluster.yaml @@ -0,0 +1,14 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: cluster-start +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi diff --git a/testing/kuttl/e2e/cluster-start/01--psql-connect.yaml b/testing/kuttl/e2e/cluster-start/files/01-connect-psql.yaml similarity index 100% rename from testing/kuttl/e2e/cluster-start/01--psql-connect.yaml rename to testing/kuttl/e2e/cluster-start/files/01-connect-psql.yaml diff --git a/testing/kuttl/e2e/cluster-start/files/01-psql-connected.yaml b/testing/kuttl/e2e/cluster-start/files/01-psql-connected.yaml new file mode 100644 index 0000000000..e4d8bbb37a --- /dev/null +++ b/testing/kuttl/e2e/cluster-start/files/01-psql-connected.yaml @@ -0,0 +1,6 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-connect +status: + succeeded: 1 diff --git a/testing/kuttl/e2e/delete-namespace/00-assert.yaml b/testing/kuttl/e2e/delete-namespace/00-assert.yaml new file mode 100644 index 0000000000..78aea811c3 --- /dev/null +++ b/testing/kuttl/e2e/delete-namespace/00-assert.yaml @@ -0,0 +1,7 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +collectors: +- type: command + command: kubectl -n ${KUTTL_TEST_DELETE_NAMESPACE} describe pods --selector postgres-operator.crunchydata.com/cluster=delete-namespace +- namespace: ${KUTTL_TEST_DELETE_NAMESPACE} + selector: postgres-operator.crunchydata.com/cluster=delete-namespace diff --git a/testing/kuttl/e2e/delete-namespace/00-create-cluster.yaml b/testing/kuttl/e2e/delete-namespace/00-create-cluster.yaml new file mode 100644 index 0000000000..2245df00c8 --- /dev/null +++ b/testing/kuttl/e2e/delete-namespace/00-create-cluster.yaml @@ -0,0 +1,7 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/00-create-namespace.yaml +- files/00-create-cluster.yaml +assert: +- files/00-created.yaml diff --git a/testing/kuttl/e2e/delete-namespace/01-assert.yaml b/testing/kuttl/e2e/delete-namespace/01-assert.yaml index 3d2c7ec936..78aea811c3 100644 --- a/testing/kuttl/e2e/delete-namespace/01-assert.yaml +++ b/testing/kuttl/e2e/delete-namespace/01-assert.yaml @@ -1,22 +1,7 @@ ---- -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: delete-namespace - namespace: ${KUTTL_TEST_DELETE_NAMESPACE} -status: - instances: - - name: instance1 - readyReplicas: 1 - replicas: 1 - updatedReplicas: 1 ---- -apiVersion: batch/v1 -kind: Job -metadata: - namespace: ${KUTTL_TEST_DELETE_NAMESPACE} - labels: - postgres-operator.crunchydata.com/cluster: delete-namespace - postgres-operator.crunchydata.com/pgbackrest-backup: replica-create -status: - succeeded: 1 +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +collectors: +- type: command + command: kubectl -n ${KUTTL_TEST_DELETE_NAMESPACE} describe pods --selector postgres-operator.crunchydata.com/cluster=delete-namespace +- namespace: ${KUTTL_TEST_DELETE_NAMESPACE} + selector: postgres-operator.crunchydata.com/cluster=delete-namespace diff --git a/testing/kuttl/e2e/delete-namespace/02--delete-namespace.yaml b/testing/kuttl/e2e/delete-namespace/01-delete-namespace.yaml similarity index 84% rename from testing/kuttl/e2e/delete-namespace/02--delete-namespace.yaml rename to testing/kuttl/e2e/delete-namespace/01-delete-namespace.yaml index 8987d233f1..8fed721e5e 100644 --- a/testing/kuttl/e2e/delete-namespace/02--delete-namespace.yaml +++ b/testing/kuttl/e2e/delete-namespace/01-delete-namespace.yaml @@ -6,3 +6,5 @@ delete: - apiVersion: v1 kind: Namespace name: ${KUTTL_TEST_DELETE_NAMESPACE} +error: +- files/01-errors.yaml diff --git a/testing/kuttl/e2e/delete-namespace/01--cluster.yaml b/testing/kuttl/e2e/delete-namespace/files/00-create-cluster.yaml similarity index 100% rename from testing/kuttl/e2e/delete-namespace/01--cluster.yaml rename to testing/kuttl/e2e/delete-namespace/files/00-create-cluster.yaml diff --git a/testing/kuttl/e2e/delete-namespace/00--namespace.yaml b/testing/kuttl/e2e/delete-namespace/files/00-create-namespace.yaml similarity index 100% rename from testing/kuttl/e2e/delete-namespace/00--namespace.yaml rename to testing/kuttl/e2e/delete-namespace/files/00-create-namespace.yaml diff --git a/testing/kuttl/e2e/delete-namespace/files/00-created.yaml b/testing/kuttl/e2e/delete-namespace/files/00-created.yaml new file mode 100644 index 0000000000..3d2c7ec936 --- /dev/null +++ b/testing/kuttl/e2e/delete-namespace/files/00-created.yaml @@ -0,0 +1,22 @@ +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: delete-namespace + namespace: ${KUTTL_TEST_DELETE_NAMESPACE} +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 +--- +apiVersion: batch/v1 +kind: Job +metadata: + namespace: ${KUTTL_TEST_DELETE_NAMESPACE} + labels: + postgres-operator.crunchydata.com/cluster: delete-namespace + postgres-operator.crunchydata.com/pgbackrest-backup: replica-create +status: + succeeded: 1 diff --git a/testing/kuttl/e2e/delete-namespace/02-errors.yaml b/testing/kuttl/e2e/delete-namespace/files/01-errors.yaml similarity index 100% rename from testing/kuttl/e2e/delete-namespace/02-errors.yaml rename to testing/kuttl/e2e/delete-namespace/files/01-errors.yaml diff --git a/testing/kuttl/e2e/delete/00-assert.yaml b/testing/kuttl/e2e/delete/00-assert.yaml index 6130475c07..e4d88b3031 100644 --- a/testing/kuttl/e2e/delete/00-assert.yaml +++ b/testing/kuttl/e2e/delete/00-assert.yaml @@ -1,20 +1,7 @@ ---- -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: delete -status: - instances: - - name: instance1 - readyReplicas: 1 - replicas: 1 - updatedReplicas: 1 ---- -apiVersion: batch/v1 -kind: Job -metadata: - labels: - postgres-operator.crunchydata.com/cluster: delete - postgres-operator.crunchydata.com/pgbackrest-backup: replica-create -status: - succeeded: 1 +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +collectors: +- type: command + command: kubectl -n $NAMESPACE describe pods --selector postgres-operator.crunchydata.com/cluster=delete +- namespace: $NAMESPACE + selector: postgres-operator.crunchydata.com/cluster=delete diff --git a/testing/kuttl/e2e/delete/00-create-cluster.yaml b/testing/kuttl/e2e/delete/00-create-cluster.yaml new file mode 100644 index 0000000000..801a22d460 --- /dev/null +++ b/testing/kuttl/e2e/delete/00-create-cluster.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/00-create-cluster.yaml +assert: +- files/00-cluster-created.yaml diff --git a/testing/kuttl/e2e/delete/01--delete-cluster.yaml b/testing/kuttl/e2e/delete/01-delete-cluster.yaml similarity index 79% rename from testing/kuttl/e2e/delete/01--delete-cluster.yaml rename to testing/kuttl/e2e/delete/01-delete-cluster.yaml index ccb36f0166..a1f26b39c4 100644 --- a/testing/kuttl/e2e/delete/01--delete-cluster.yaml +++ b/testing/kuttl/e2e/delete/01-delete-cluster.yaml @@ -1,8 +1,8 @@ ---- -# Remove the cluster. apiVersion: kuttl.dev/v1beta1 kind: TestStep delete: - apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster name: delete +error: +- files/01-cluster-deleted.yaml diff --git a/testing/kuttl/e2e/delete/10-assert.yaml b/testing/kuttl/e2e/delete/10-assert.yaml index 1940fc680a..a2c226cc7a 100644 --- a/testing/kuttl/e2e/delete/10-assert.yaml +++ b/testing/kuttl/e2e/delete/10-assert.yaml @@ -1,36 +1,7 @@ ---- -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: delete-with-replica -status: - instances: - - name: instance1 - readyReplicas: 2 - replicas: 2 - updatedReplicas: 2 ---- -# Patroni labels and readiness happen separately. -# The next step expects to find pods by their role label; wait for them here. -apiVersion: v1 -kind: Pod -metadata: - labels: - postgres-operator.crunchydata.com/cluster: delete-with-replica - postgres-operator.crunchydata.com/role: master ---- -apiVersion: v1 -kind: Pod -metadata: - labels: - postgres-operator.crunchydata.com/cluster: delete-with-replica - postgres-operator.crunchydata.com/role: replica ---- -apiVersion: batch/v1 -kind: Job -metadata: - labels: - postgres-operator.crunchydata.com/cluster: delete-with-replica - postgres-operator.crunchydata.com/pgbackrest-backup: replica-create -status: - succeeded: 1 +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +collectors: +- type: command + command: kubectl -n $NAMESPACE describe pods --selector postgres-operator.crunchydata.com/cluster=delete-with-replica +- namespace: $NAMESPACE + selector: postgres-operator.crunchydata.com/cluster=delete-with-replica diff --git a/testing/kuttl/e2e/delete/10-create-cluster-with-replicas.yaml b/testing/kuttl/e2e/delete/10-create-cluster-with-replicas.yaml new file mode 100644 index 0000000000..678a09c710 --- /dev/null +++ b/testing/kuttl/e2e/delete/10-create-cluster-with-replicas.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/10-create-cluster-with-replicas.yaml +assert: +- files/10-cluster-with-replicas-created.yaml diff --git a/testing/kuttl/e2e/delete/11-delete-cluster.yaml b/testing/kuttl/e2e/delete/11-delete-cluster-with-replicas.yaml similarity index 78% rename from testing/kuttl/e2e/delete/11-delete-cluster.yaml rename to testing/kuttl/e2e/delete/11-delete-cluster-with-replicas.yaml index 991d8d1c44..b2f04ea7ed 100644 --- a/testing/kuttl/e2e/delete/11-delete-cluster.yaml +++ b/testing/kuttl/e2e/delete/11-delete-cluster-with-replicas.yaml @@ -6,3 +6,5 @@ delete: - apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster name: delete-with-replica +error: +- files/11-cluster-with-replicas-deleted.yaml diff --git a/testing/kuttl/e2e/delete/20-assert.yaml b/testing/kuttl/e2e/delete/20-assert.yaml new file mode 100644 index 0000000000..d85d96101f --- /dev/null +++ b/testing/kuttl/e2e/delete/20-assert.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +collectors: +- type: command + command: kubectl -n $NAMESPACE describe pods --selector postgres-operator.crunchydata.com/cluster=delete-not-running +# This shouldn't be running, so skip logs; if there's an error, we'll be able to see it in the describe diff --git a/testing/kuttl/e2e/delete/20-create-broken-cluster.yaml b/testing/kuttl/e2e/delete/20-create-broken-cluster.yaml new file mode 100644 index 0000000000..9db684036e --- /dev/null +++ b/testing/kuttl/e2e/delete/20-create-broken-cluster.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/20-create-broken-cluster.yaml +error: +- files/20-broken-cluster-not-created.yaml diff --git a/testing/kuttl/e2e/delete/21--delete-cluster.yaml b/testing/kuttl/e2e/delete/21-delete-broken-cluster.yaml similarity index 80% rename from testing/kuttl/e2e/delete/21--delete-cluster.yaml rename to testing/kuttl/e2e/delete/21-delete-broken-cluster.yaml index b585401167..3e159f17d4 100644 --- a/testing/kuttl/e2e/delete/21--delete-cluster.yaml +++ b/testing/kuttl/e2e/delete/21-delete-broken-cluster.yaml @@ -6,3 +6,5 @@ delete: - apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster name: delete-not-running +error: +- files/21-broken-cluster-deleted.yaml diff --git a/testing/kuttl/e2e/delete/README.md b/testing/kuttl/e2e/delete/README.md index 3a7d4fd848..7e99680162 100644 --- a/testing/kuttl/e2e/delete/README.md +++ b/testing/kuttl/e2e/delete/README.md @@ -1,18 +1,18 @@ ### Delete test -#### Regular cluster delete +#### Regular cluster delete (00-01) * Start a regular cluster * Delete it * Check that nothing remains. -#### Delete cluster with replica +#### Delete cluster with replica (10-11) * Start a regular cluster with 2 replicas * Delete it * Check that nothing remains -#### Delete a cluster that never started +#### Delete a cluster that never started (20-21) * Start a cluster with a bad image * Delete it diff --git a/testing/kuttl/e2e/delete/files/00-cluster-created.yaml b/testing/kuttl/e2e/delete/files/00-cluster-created.yaml new file mode 100644 index 0000000000..6130475c07 --- /dev/null +++ b/testing/kuttl/e2e/delete/files/00-cluster-created.yaml @@ -0,0 +1,20 @@ +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: delete +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 +--- +apiVersion: batch/v1 +kind: Job +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete + postgres-operator.crunchydata.com/pgbackrest-backup: replica-create +status: + succeeded: 1 diff --git a/testing/kuttl/e2e/delete/00--cluster.yaml b/testing/kuttl/e2e/delete/files/00-create-cluster.yaml similarity index 100% rename from testing/kuttl/e2e/delete/00--cluster.yaml rename to testing/kuttl/e2e/delete/files/00-create-cluster.yaml diff --git a/testing/kuttl/e2e/delete/02-errors.yaml b/testing/kuttl/e2e/delete/files/01-cluster-deleted.yaml similarity index 100% rename from testing/kuttl/e2e/delete/02-errors.yaml rename to testing/kuttl/e2e/delete/files/01-cluster-deleted.yaml diff --git a/testing/kuttl/e2e/delete/files/10-cluster-with-replicas-created.yaml b/testing/kuttl/e2e/delete/files/10-cluster-with-replicas-created.yaml new file mode 100644 index 0000000000..1940fc680a --- /dev/null +++ b/testing/kuttl/e2e/delete/files/10-cluster-with-replicas-created.yaml @@ -0,0 +1,36 @@ +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: delete-with-replica +status: + instances: + - name: instance1 + readyReplicas: 2 + replicas: 2 + updatedReplicas: 2 +--- +# Patroni labels and readiness happen separately. +# The next step expects to find pods by their role label; wait for them here. +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete-with-replica + postgres-operator.crunchydata.com/role: master +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete-with-replica + postgres-operator.crunchydata.com/role: replica +--- +apiVersion: batch/v1 +kind: Job +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete-with-replica + postgres-operator.crunchydata.com/pgbackrest-backup: replica-create +status: + succeeded: 1 diff --git a/testing/kuttl/e2e/delete/10--cluster.yaml b/testing/kuttl/e2e/delete/files/10-create-cluster-with-replicas.yaml similarity index 100% rename from testing/kuttl/e2e/delete/10--cluster.yaml rename to testing/kuttl/e2e/delete/files/10-create-cluster-with-replicas.yaml diff --git a/testing/kuttl/e2e/delete/12-errors.yaml b/testing/kuttl/e2e/delete/files/11-cluster-with-replicas-deleted.yaml similarity index 100% rename from testing/kuttl/e2e/delete/12-errors.yaml rename to testing/kuttl/e2e/delete/files/11-cluster-with-replicas-deleted.yaml diff --git a/testing/kuttl/e2e/delete/20-errors.yaml b/testing/kuttl/e2e/delete/files/20-broken-cluster-not-created.yaml similarity index 100% rename from testing/kuttl/e2e/delete/20-errors.yaml rename to testing/kuttl/e2e/delete/files/20-broken-cluster-not-created.yaml diff --git a/testing/kuttl/e2e/delete/20--cluster.yaml b/testing/kuttl/e2e/delete/files/20-create-broken-cluster.yaml similarity index 100% rename from testing/kuttl/e2e/delete/20--cluster.yaml rename to testing/kuttl/e2e/delete/files/20-create-broken-cluster.yaml diff --git a/testing/kuttl/e2e/delete/22-errors.yaml b/testing/kuttl/e2e/delete/files/21-broken-cluster-deleted.yaml similarity index 100% rename from testing/kuttl/e2e/delete/22-errors.yaml rename to testing/kuttl/e2e/delete/files/21-broken-cluster-deleted.yaml diff --git a/testing/kuttl/e2e/empty-image-upgrade/01--valid-upgrade.yaml b/testing/kuttl/e2e/empty-image-upgrade/01--valid-upgrade.yaml deleted file mode 100644 index ff3a5f356e..0000000000 --- a/testing/kuttl/e2e/empty-image-upgrade/01--valid-upgrade.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- -# This upgrade is valid, but has no pgcluster to work on and should get that condition -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PGUpgrade -metadata: - name: empty-image-upgrade -spec: - # postgres version that is no longer available - fromPostgresVersion: 10 - toPostgresVersion: ${KUTTL_PG_UPGRADE_TO_VERSION} - postgresClusterName: missing-primary-status diff --git a/testing/kuttl/e2e/empty-image-upgrade/01-assert.yaml b/testing/kuttl/e2e/empty-image-upgrade/01-assert.yaml deleted file mode 100644 index b7d0f936fb..0000000000 --- a/testing/kuttl/e2e/empty-image-upgrade/01-assert.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PGUpgrade -metadata: - name: empty-image-upgrade -status: - conditions: - - type: "Progressing" - status: "False" - reason: "PGClusterNotFound" diff --git a/testing/kuttl/e2e/empty-image-upgrade/10--cluster.yaml b/testing/kuttl/e2e/empty-image-upgrade/10--cluster.yaml deleted file mode 100644 index f205e2bcd2..0000000000 --- a/testing/kuttl/e2e/empty-image-upgrade/10--cluster.yaml +++ /dev/null @@ -1,23 +0,0 @@ ---- -# Create the cluster we will do an actual upgrade on, but set the postgres version -# to '10' to force a missing image scenario -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: missing-primary-status -spec: - # postgres version that is no longer available - postgresVersion: 10 - patroni: - dynamicConfiguration: - postgresql: - parameters: - shared_preload_libraries: pgaudit, set_user, pg_stat_statements, pgnodemx, pg_cron - instances: - - dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } - backups: - pgbackrest: - repos: - - name: repo1 - volume: - volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } diff --git a/testing/kuttl/e2e/empty-image-upgrade/10-assert.yaml b/testing/kuttl/e2e/empty-image-upgrade/10-assert.yaml deleted file mode 100644 index 72e9ff6387..0000000000 --- a/testing/kuttl/e2e/empty-image-upgrade/10-assert.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- -# The cluster is not running due to the missing image, not due to a proper -# shutdown status. -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PGUpgrade -metadata: - name: empty-image-upgrade -status: - conditions: - - type: "Progressing" - status: "False" - reason: "PGClusterNotShutdown" diff --git a/testing/kuttl/e2e/empty-image-upgrade/11--shutdown-cluster.yaml b/testing/kuttl/e2e/empty-image-upgrade/11--shutdown-cluster.yaml deleted file mode 100644 index 6d784b682b..0000000000 --- a/testing/kuttl/e2e/empty-image-upgrade/11--shutdown-cluster.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -# Shutdown the cluster -- but without the annotation. -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: missing-primary-status -spec: - shutdown: true diff --git a/testing/kuttl/e2e/empty-image-upgrade/11-assert.yaml b/testing/kuttl/e2e/empty-image-upgrade/11-assert.yaml deleted file mode 100644 index 5bd9d447cb..0000000000 --- a/testing/kuttl/e2e/empty-image-upgrade/11-assert.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- -# Since the cluster is missing the annotation, we get this condition -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PGUpgrade -metadata: - name: empty-image-upgrade -status: - conditions: - - type: "Progressing" - status: "False" - reason: "PGClusterPrimaryNotIdentified" diff --git a/testing/kuttl/e2e/empty-image-upgrade/README.md b/testing/kuttl/e2e/empty-image-upgrade/README.md deleted file mode 100644 index 5547515d13..0000000000 --- a/testing/kuttl/e2e/empty-image-upgrade/README.md +++ /dev/null @@ -1,17 +0,0 @@ -## Empty image upgrade status tests - -This is a variation derived from our major upgrade KUTTL tests designed to -test a scenario where a required container images is not defined in either the -PostgresCluster spec or via the RELATED_IMAGES environment variables. - -### Basic PGUpgrade controller and CRD instance validation - -* 01--valid-upgrade: create a valid PGUpgrade instance -* 01-assert: check that the PGUpgrade instance exists and has the expected status - -### Verify new statuses for missing required container images - -* 10--cluster: create the cluster with an unavailable image (i.e. Postgres 10) -* 10-assert: check that the PGUpgrade instance has the expected reason: "PGClusterNotShutdown" -* 11-shutdown-cluster: set the spec.shutdown value to 'true' as required for upgrade -* 11-assert: check that the new reason is set, "PGClusterPrimaryNotIdentified" diff --git a/testing/kuttl/e2e/exporter-custom-queries/files/exporter-custom-queries-cluster.yaml b/testing/kuttl/e2e/exporter-custom-queries/files/exporter-custom-queries-cluster.yaml index 6ff8ed5e67..5356b83be9 100644 --- a/testing/kuttl/e2e/exporter-custom-queries/files/exporter-custom-queries-cluster.yaml +++ b/testing/kuttl/e2e/exporter-custom-queries/files/exporter-custom-queries-cluster.yaml @@ -7,12 +7,6 @@ spec: instances: - name: instance1 dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } - backups: - pgbackrest: - repos: - - name: repo1 - volume: - volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } monitoring: pgmonitor: exporter: diff --git a/testing/kuttl/e2e/exporter-no-tls/files/exporter-no-tls-cluster.yaml b/testing/kuttl/e2e/exporter-no-tls/files/exporter-no-tls-cluster.yaml index 9cc6ec4877..690d5b505d 100644 --- a/testing/kuttl/e2e/exporter-no-tls/files/exporter-no-tls-cluster.yaml +++ b/testing/kuttl/e2e/exporter-no-tls/files/exporter-no-tls-cluster.yaml @@ -7,12 +7,6 @@ spec: instances: - name: instance1 dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } - backups: - pgbackrest: - repos: - - name: repo1 - volume: - volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } monitoring: pgmonitor: exporter: {} diff --git a/testing/kuttl/e2e/exporter-password-change/files/initial-postgrescluster.yaml b/testing/kuttl/e2e/exporter-password-change/files/initial-postgrescluster.yaml index e3fbb7b94a..d16c898ac2 100644 --- a/testing/kuttl/e2e/exporter-password-change/files/initial-postgrescluster.yaml +++ b/testing/kuttl/e2e/exporter-password-change/files/initial-postgrescluster.yaml @@ -7,12 +7,6 @@ spec: instances: - name: instance1 dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } - backups: - pgbackrest: - repos: - - name: repo1 - volume: - volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } monitoring: pgmonitor: exporter: {} diff --git a/testing/kuttl/e2e/exporter-tls/files/exporter-tls-cluster.yaml b/testing/kuttl/e2e/exporter-tls/files/exporter-tls-cluster.yaml index d445062bf3..4fa420664a 100644 --- a/testing/kuttl/e2e/exporter-tls/files/exporter-tls-cluster.yaml +++ b/testing/kuttl/e2e/exporter-tls/files/exporter-tls-cluster.yaml @@ -7,12 +7,6 @@ spec: instances: - name: instance1 dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } - backups: - pgbackrest: - repos: - - name: repo1 - volume: - volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } monitoring: pgmonitor: exporter: diff --git a/testing/kuttl/e2e-other/major-upgrade-missing-image/01--valid-upgrade.yaml b/testing/kuttl/e2e/major-upgrade-missing-image/01--valid-upgrade.yaml similarity index 93% rename from testing/kuttl/e2e-other/major-upgrade-missing-image/01--valid-upgrade.yaml rename to testing/kuttl/e2e/major-upgrade-missing-image/01--valid-upgrade.yaml index fa3985231d..741efead41 100644 --- a/testing/kuttl/e2e-other/major-upgrade-missing-image/01--valid-upgrade.yaml +++ b/testing/kuttl/e2e/major-upgrade-missing-image/01--valid-upgrade.yaml @@ -6,6 +6,6 @@ metadata: name: empty-image-upgrade spec: # postgres version that is no longer available - fromPostgresVersion: 10 + fromPostgresVersion: 11 toPostgresVersion: ${KUTTL_PG_UPGRADE_TO_VERSION} postgresClusterName: major-upgrade-empty-image diff --git a/testing/kuttl/e2e-other/major-upgrade-missing-image/01-assert.yaml b/testing/kuttl/e2e/major-upgrade-missing-image/01-assert.yaml similarity index 100% rename from testing/kuttl/e2e-other/major-upgrade-missing-image/01-assert.yaml rename to testing/kuttl/e2e/major-upgrade-missing-image/01-assert.yaml diff --git a/testing/kuttl/e2e-other/major-upgrade-missing-image/10--cluster.yaml b/testing/kuttl/e2e/major-upgrade-missing-image/10--cluster.yaml similarity index 97% rename from testing/kuttl/e2e-other/major-upgrade-missing-image/10--cluster.yaml rename to testing/kuttl/e2e/major-upgrade-missing-image/10--cluster.yaml index c85a9b8dae..f5ef8c029e 100644 --- a/testing/kuttl/e2e-other/major-upgrade-missing-image/10--cluster.yaml +++ b/testing/kuttl/e2e/major-upgrade-missing-image/10--cluster.yaml @@ -7,7 +7,7 @@ metadata: name: major-upgrade-empty-image spec: # postgres version that is no longer available - postgresVersion: 10 + postgresVersion: 11 patroni: dynamicConfiguration: postgresql: diff --git a/testing/kuttl/e2e-other/major-upgrade-missing-image/10-assert.yaml b/testing/kuttl/e2e/major-upgrade-missing-image/10-assert.yaml similarity index 100% rename from testing/kuttl/e2e-other/major-upgrade-missing-image/10-assert.yaml rename to testing/kuttl/e2e/major-upgrade-missing-image/10-assert.yaml diff --git a/testing/kuttl/e2e-other/major-upgrade-missing-image/11--shutdown-cluster.yaml b/testing/kuttl/e2e/major-upgrade-missing-image/11--shutdown-cluster.yaml similarity index 100% rename from testing/kuttl/e2e-other/major-upgrade-missing-image/11--shutdown-cluster.yaml rename to testing/kuttl/e2e/major-upgrade-missing-image/11--shutdown-cluster.yaml diff --git a/testing/kuttl/e2e-other/major-upgrade-missing-image/11-assert.yaml b/testing/kuttl/e2e/major-upgrade-missing-image/11-assert.yaml similarity index 100% rename from testing/kuttl/e2e-other/major-upgrade-missing-image/11-assert.yaml rename to testing/kuttl/e2e/major-upgrade-missing-image/11-assert.yaml diff --git a/testing/kuttl/e2e-other/major-upgrade-missing-image/12--start-and-update-version.yaml b/testing/kuttl/e2e/major-upgrade-missing-image/12--start-and-update-version.yaml similarity index 100% rename from testing/kuttl/e2e-other/major-upgrade-missing-image/12--start-and-update-version.yaml rename to testing/kuttl/e2e/major-upgrade-missing-image/12--start-and-update-version.yaml diff --git a/testing/kuttl/e2e-other/major-upgrade-missing-image/12-assert.yaml b/testing/kuttl/e2e/major-upgrade-missing-image/12-assert.yaml similarity index 100% rename from testing/kuttl/e2e-other/major-upgrade-missing-image/12-assert.yaml rename to testing/kuttl/e2e/major-upgrade-missing-image/12-assert.yaml diff --git a/testing/kuttl/e2e-other/major-upgrade-missing-image/13--shutdown-cluster.yaml b/testing/kuttl/e2e/major-upgrade-missing-image/13--shutdown-cluster.yaml similarity index 100% rename from testing/kuttl/e2e-other/major-upgrade-missing-image/13--shutdown-cluster.yaml rename to testing/kuttl/e2e/major-upgrade-missing-image/13--shutdown-cluster.yaml diff --git a/testing/kuttl/e2e-other/major-upgrade-missing-image/13-assert.yaml b/testing/kuttl/e2e/major-upgrade-missing-image/13-assert.yaml similarity index 100% rename from testing/kuttl/e2e-other/major-upgrade-missing-image/13-assert.yaml rename to testing/kuttl/e2e/major-upgrade-missing-image/13-assert.yaml diff --git a/testing/kuttl/e2e-other/major-upgrade-missing-image/14--annotate-cluster.yaml b/testing/kuttl/e2e/major-upgrade-missing-image/14--annotate-cluster.yaml similarity index 100% rename from testing/kuttl/e2e-other/major-upgrade-missing-image/14--annotate-cluster.yaml rename to testing/kuttl/e2e/major-upgrade-missing-image/14--annotate-cluster.yaml diff --git a/testing/kuttl/e2e-other/major-upgrade-missing-image/14-assert.yaml b/testing/kuttl/e2e/major-upgrade-missing-image/14-assert.yaml similarity index 100% rename from testing/kuttl/e2e-other/major-upgrade-missing-image/14-assert.yaml rename to testing/kuttl/e2e/major-upgrade-missing-image/14-assert.yaml diff --git a/testing/kuttl/e2e-other/major-upgrade-missing-image/15--start-cluster.yaml b/testing/kuttl/e2e/major-upgrade-missing-image/15--start-cluster.yaml similarity index 100% rename from testing/kuttl/e2e-other/major-upgrade-missing-image/15--start-cluster.yaml rename to testing/kuttl/e2e/major-upgrade-missing-image/15--start-cluster.yaml diff --git a/testing/kuttl/e2e-other/major-upgrade-missing-image/15-assert.yaml b/testing/kuttl/e2e/major-upgrade-missing-image/15-assert.yaml similarity index 100% rename from testing/kuttl/e2e-other/major-upgrade-missing-image/15-assert.yaml rename to testing/kuttl/e2e/major-upgrade-missing-image/15-assert.yaml diff --git a/testing/kuttl/e2e-other/major-upgrade-missing-image/16-check-pgbackrest.yaml b/testing/kuttl/e2e/major-upgrade-missing-image/16-check-pgbackrest.yaml similarity index 100% rename from testing/kuttl/e2e-other/major-upgrade-missing-image/16-check-pgbackrest.yaml rename to testing/kuttl/e2e/major-upgrade-missing-image/16-check-pgbackrest.yaml diff --git a/testing/kuttl/e2e-other/major-upgrade-missing-image/17--check-version.yaml b/testing/kuttl/e2e/major-upgrade-missing-image/17--check-version.yaml similarity index 100% rename from testing/kuttl/e2e-other/major-upgrade-missing-image/17--check-version.yaml rename to testing/kuttl/e2e/major-upgrade-missing-image/17--check-version.yaml diff --git a/testing/kuttl/e2e-other/major-upgrade-missing-image/17-assert.yaml b/testing/kuttl/e2e/major-upgrade-missing-image/17-assert.yaml similarity index 100% rename from testing/kuttl/e2e-other/major-upgrade-missing-image/17-assert.yaml rename to testing/kuttl/e2e/major-upgrade-missing-image/17-assert.yaml diff --git a/testing/kuttl/e2e-other/major-upgrade-missing-image/README.md b/testing/kuttl/e2e/major-upgrade-missing-image/README.md similarity index 99% rename from testing/kuttl/e2e-other/major-upgrade-missing-image/README.md rename to testing/kuttl/e2e/major-upgrade-missing-image/README.md index 341cc854f7..1053da29ed 100644 --- a/testing/kuttl/e2e-other/major-upgrade-missing-image/README.md +++ b/testing/kuttl/e2e/major-upgrade-missing-image/README.md @@ -11,7 +11,7 @@ PostgresCluster spec or via the RELATED_IMAGES environment variables. ### Verify new statuses for missing required container images -* 10--cluster: create the cluster with an unavailable image (i.e. Postgres 10) +* 10--cluster: create the cluster with an unavailable image (i.e. Postgres 11) * 10-assert: check that the PGUpgrade instance has the expected reason: "PGClusterNotShutdown" * 11-shutdown-cluster: set the spec.shutdown value to 'true' as required for upgrade * 11-assert: check that the new reason is set, "PGClusterPrimaryNotIdentified" diff --git a/testing/kuttl/e2e/optional-backups/00--cluster.yaml b/testing/kuttl/e2e/optional-backups/00--cluster.yaml new file mode 100644 index 0000000000..7b927831e0 --- /dev/null +++ b/testing/kuttl/e2e/optional-backups/00--cluster.yaml @@ -0,0 +1,15 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: created-without-backups +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + diff --git a/testing/kuttl/e2e/optional-backups/00-assert.yaml b/testing/kuttl/e2e/optional-backups/00-assert.yaml new file mode 100644 index 0000000000..86392d0308 --- /dev/null +++ b/testing/kuttl/e2e/optional-backups/00-assert.yaml @@ -0,0 +1,38 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: created-without-backups +status: + instances: + - name: instance1 + pgbackrest: {} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + postgres-operator.crunchydata.com/cluster: created-without-backups + postgres-operator.crunchydata.com/data: postgres + postgres-operator.crunchydata.com/instance-set: instance1 + postgres-operator.crunchydata.com/role: pgdata +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + postgres-operator.crunchydata.com/cluster: created-without-backups + postgres-operator.crunchydata.com/data: postgres + postgres-operator.crunchydata.com/instance-set: instance1 +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: created-without-backups + postgres-operator.crunchydata.com/data: postgres + postgres-operator.crunchydata.com/instance-set: instance1 + postgres-operator.crunchydata.com/role: master +status: + containerStatuses: + - ready: true + - ready: true diff --git a/testing/kuttl/e2e/optional-backups/01-errors.yaml b/testing/kuttl/e2e/optional-backups/01-errors.yaml new file mode 100644 index 0000000000..e702fcddb4 --- /dev/null +++ b/testing/kuttl/e2e/optional-backups/01-errors.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: created-without-backups-repo1 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: created-without-backups-repo-host +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: created-without-backups-pgbackrest-config +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: created-without-backups-pgbackrest +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: created-without-backups-pgbackrest +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: created-without-backups-pgbackrest diff --git a/testing/kuttl/e2e/optional-backups/02-assert.yaml b/testing/kuttl/e2e/optional-backups/02-assert.yaml new file mode 100644 index 0000000000..eb3f70357f --- /dev/null +++ b/testing/kuttl/e2e/optional-backups/02-assert.yaml @@ -0,0 +1,15 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +commands: +- script: | + pod=$(kubectl get pods -o name -n "${NAMESPACE}" \ + -l postgres-operator.crunchydata.com/cluster=created-without-backups) + + kubectl exec --stdin "${pod}" --namespace "${NAMESPACE}" -c database \ + -- psql -qb --set ON_ERROR_STOP=1 --file=- <<'SQL' + DO $$ + BEGIN + ASSERT current_setting('archive_command') LIKE 'true', + format('expected "true", got %L', current_setting('archive_command')); + END $$ + SQL diff --git a/testing/kuttl/e2e/optional-backups/03-assert.yaml b/testing/kuttl/e2e/optional-backups/03-assert.yaml new file mode 100644 index 0000000000..17ca1e4062 --- /dev/null +++ b/testing/kuttl/e2e/optional-backups/03-assert.yaml @@ -0,0 +1,14 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +commands: +- script: | + pod=$(kubectl get pods -o name -n "${NAMESPACE}" \ + -l postgres-operator.crunchydata.com/cluster=created-without-backups) + + kubectl exec --stdin "${pod}" --namespace "${NAMESPACE}" -c database \ + -- psql -qb --set ON_ERROR_STOP=1 \ + -c "CREATE TABLE important (data) AS VALUES ('treasure');" + + kubectl exec --stdin "${pod}" --namespace "${NAMESPACE}" -c database \ + -- psql -qb --set ON_ERROR_STOP=1 \ + -c "CHECKPOINT;" diff --git a/testing/kuttl/e2e/optional-backups/04--cluster.yaml b/testing/kuttl/e2e/optional-backups/04--cluster.yaml new file mode 100644 index 0000000000..fc39ff6ebe --- /dev/null +++ b/testing/kuttl/e2e/optional-backups/04--cluster.yaml @@ -0,0 +1,16 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: created-without-backups +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + replicas: 2 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + diff --git a/testing/kuttl/e2e/optional-backups/05-assert.yaml b/testing/kuttl/e2e/optional-backups/05-assert.yaml new file mode 100644 index 0000000000..d346e01a04 --- /dev/null +++ b/testing/kuttl/e2e/optional-backups/05-assert.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: created-without-backups + postgres-operator.crunchydata.com/data: postgres + postgres-operator.crunchydata.com/instance-set: instance1 + postgres-operator.crunchydata.com/role: replica +status: + containerStatuses: + - ready: true + - ready: true diff --git a/testing/kuttl/e2e/optional-backups/06-assert.yaml b/testing/kuttl/e2e/optional-backups/06-assert.yaml new file mode 100644 index 0000000000..c366545508 --- /dev/null +++ b/testing/kuttl/e2e/optional-backups/06-assert.yaml @@ -0,0 +1,18 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +commands: +- script: | + pod=$(kubectl get pods -o name -n "${NAMESPACE}" \ + -l postgres-operator.crunchydata.com/cluster=created-without-backups \ + -l postgres-operator.crunchydata.com/role=replica) + + kubectl exec --stdin "${pod}" --namespace "${NAMESPACE}" -c database \ + -- psql -qb --set ON_ERROR_STOP=1 --file=- <<'SQL' + DO $$ + DECLARE + everything jsonb; + BEGIN + SELECT jsonb_agg(important) INTO everything FROM important; + ASSERT everything = '[{"data":"treasure"}]', format('got %L', everything); + END $$ + SQL diff --git a/installers/olm/config/examples/postgrescluster.example.yaml b/testing/kuttl/e2e/optional-backups/10--cluster.yaml similarity index 63% rename from installers/olm/config/examples/postgrescluster.example.yaml rename to testing/kuttl/e2e/optional-backups/10--cluster.yaml index 502eaff437..6da85c93f9 100644 --- a/installers/olm/config/examples/postgrescluster.example.yaml +++ b/testing/kuttl/e2e/optional-backups/10--cluster.yaml @@ -1,13 +1,15 @@ apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: - name: example + name: created-without-backups spec: - postgresVersion: 15 + postgresVersion: ${KUTTL_PG_VERSION} instances: - - replicas: 1 + - name: instance1 + replicas: 1 dataVolumeClaimSpec: - accessModes: [ReadWriteOnce] + accessModes: + - "ReadWriteOnce" resources: requests: storage: 1Gi @@ -17,7 +19,9 @@ spec: - name: repo1 volume: volumeClaimSpec: - accessModes: ["ReadWriteOnce"] + accessModes: + - "ReadWriteOnce" resources: requests: storage: 1Gi + diff --git a/testing/kuttl/e2e/optional-backups/10-assert.yaml b/testing/kuttl/e2e/optional-backups/10-assert.yaml new file mode 100644 index 0000000000..7b740b310d --- /dev/null +++ b/testing/kuttl/e2e/optional-backups/10-assert.yaml @@ -0,0 +1,79 @@ +# It should be possible to turn backups back on. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: created-without-backups +status: + pgbackrest: + repoHost: + apiVersion: apps/v1 + kind: StatefulSet + ready: true + repos: + - bound: true + name: repo1 + replicaCreateBackupComplete: true + stanzaCreated: true +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + postgres-operator.crunchydata.com/cluster: created-without-backups + postgres-operator.crunchydata.com/data: postgres + postgres-operator.crunchydata.com/instance-set: instance1 + postgres-operator.crunchydata.com/role: pgdata +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + postgres-operator.crunchydata.com/cluster: created-without-backups + postgres-operator.crunchydata.com/data: postgres + postgres-operator.crunchydata.com/instance-set: instance1 +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: created-without-backups-repo1 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: created-without-backups-repo-host +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: created-without-backups-pgbackrest-config +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: created-without-backups-pgbackrest +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: created-without-backups-pgbackrest +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: created-without-backups-pgbackrest +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: created-without-backups + postgres-operator.crunchydata.com/data: postgres + postgres-operator.crunchydata.com/instance-set: instance1 + postgres-operator.crunchydata.com/patroni: created-without-backups-ha + postgres-operator.crunchydata.com/role: master +status: + containerStatuses: + - ready: true + - ready: true + - ready: true + - ready: true diff --git a/testing/kuttl/e2e/optional-backups/11-assert.yaml b/testing/kuttl/e2e/optional-backups/11-assert.yaml new file mode 100644 index 0000000000..5976d03f41 --- /dev/null +++ b/testing/kuttl/e2e/optional-backups/11-assert.yaml @@ -0,0 +1,18 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +commands: +- script: | + pod=$(kubectl get pods -o name -n "${NAMESPACE}" \ + -l postgres-operator.crunchydata.com/cluster=created-without-backup \ + -l postgres-operator.crunchydata.com/instance-set=instance1 \ + -l postgres-operator.crunchydata.com/patroni=created-without-backups-ha \ + -l postgres-operator.crunchydata.com/role=master) + + kubectl exec --stdin "${pod}" --namespace "${NAMESPACE}" -c database \ + -- psql -qb --set ON_ERROR_STOP=1 --file=- <<'SQL' + DO $$ + BEGIN + ASSERT current_setting('archive_command') LIKE 'pgbackrest --stanza=db archive-push "%p"', + format('expected "pgbackrest --stanza=db archive-push \"%p\"", got %L', current_setting('archive_command')); + END $$ + SQL diff --git a/testing/kuttl/e2e/optional-backups/20--cluster.yaml b/testing/kuttl/e2e/optional-backups/20--cluster.yaml new file mode 100644 index 0000000000..8e0d01cbf8 --- /dev/null +++ b/testing/kuttl/e2e/optional-backups/20--cluster.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: +- command: |- + kubectl patch postgrescluster created-without-backups --type 'merge' -p '{"spec":{"backups": null}}' + namespaced: true diff --git a/testing/kuttl/e2e/optional-backups/20-assert.yaml b/testing/kuttl/e2e/optional-backups/20-assert.yaml new file mode 100644 index 0000000000..b469e277f8 --- /dev/null +++ b/testing/kuttl/e2e/optional-backups/20-assert.yaml @@ -0,0 +1,63 @@ +# It should be possible to turn backups back on. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: created-without-backups +status: + pgbackrest: + repoHost: + apiVersion: apps/v1 + kind: StatefulSet + ready: true + repos: + - bound: true + name: repo1 + replicaCreateBackupComplete: true + stanzaCreated: true +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + postgres-operator.crunchydata.com/cluster: created-without-backups + postgres-operator.crunchydata.com/data: postgres + postgres-operator.crunchydata.com/instance-set: instance1 + postgres-operator.crunchydata.com/role: pgdata +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + postgres-operator.crunchydata.com/cluster: created-without-backups + postgres-operator.crunchydata.com/data: postgres + postgres-operator.crunchydata.com/instance-set: instance1 +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: created-without-backups-repo1 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: created-without-backups-repo-host +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: created-without-backups-pgbackrest-config +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: created-without-backups-pgbackrest +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: created-without-backups-pgbackrest +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: created-without-backups-pgbackrest diff --git a/testing/kuttl/e2e/optional-backups/21-assert.yaml b/testing/kuttl/e2e/optional-backups/21-assert.yaml new file mode 100644 index 0000000000..5976d03f41 --- /dev/null +++ b/testing/kuttl/e2e/optional-backups/21-assert.yaml @@ -0,0 +1,18 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +commands: +- script: | + pod=$(kubectl get pods -o name -n "${NAMESPACE}" \ + -l postgres-operator.crunchydata.com/cluster=created-without-backup \ + -l postgres-operator.crunchydata.com/instance-set=instance1 \ + -l postgres-operator.crunchydata.com/patroni=created-without-backups-ha \ + -l postgres-operator.crunchydata.com/role=master) + + kubectl exec --stdin "${pod}" --namespace "${NAMESPACE}" -c database \ + -- psql -qb --set ON_ERROR_STOP=1 --file=- <<'SQL' + DO $$ + BEGIN + ASSERT current_setting('archive_command') LIKE 'pgbackrest --stanza=db archive-push "%p"', + format('expected "pgbackrest --stanza=db archive-push \"%p\"", got %L', current_setting('archive_command')); + END $$ + SQL diff --git a/testing/kuttl/e2e/optional-backups/22--cluster.yaml b/testing/kuttl/e2e/optional-backups/22--cluster.yaml new file mode 100644 index 0000000000..2e25309886 --- /dev/null +++ b/testing/kuttl/e2e/optional-backups/22--cluster.yaml @@ -0,0 +1,5 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: +- command: kubectl annotate postgrescluster created-without-backups postgres-operator.crunchydata.com/authorizeBackupRemoval="true" + namespaced: true diff --git a/testing/kuttl/e2e/optional-backups/23-assert.yaml b/testing/kuttl/e2e/optional-backups/23-assert.yaml new file mode 100644 index 0000000000..8748ea015c --- /dev/null +++ b/testing/kuttl/e2e/optional-backups/23-assert.yaml @@ -0,0 +1,26 @@ +# It should be possible to turn backups back on. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: created-without-backups +status: + instances: + - name: instance1 + pgbackrest: {} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + postgres-operator.crunchydata.com/cluster: created-without-backups + postgres-operator.crunchydata.com/data: postgres + postgres-operator.crunchydata.com/instance-set: instance1 + postgres-operator.crunchydata.com/role: pgdata +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + postgres-operator.crunchydata.com/cluster: created-without-backups + postgres-operator.crunchydata.com/data: postgres + postgres-operator.crunchydata.com/instance-set: instance1 diff --git a/testing/kuttl/e2e/optional-backups/24-errors.yaml b/testing/kuttl/e2e/optional-backups/24-errors.yaml new file mode 100644 index 0000000000..e702fcddb4 --- /dev/null +++ b/testing/kuttl/e2e/optional-backups/24-errors.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: created-without-backups-repo1 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: created-without-backups-repo-host +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: created-without-backups-pgbackrest-config +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: created-without-backups-pgbackrest +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: created-without-backups-pgbackrest +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: created-without-backups-pgbackrest diff --git a/testing/kuttl/e2e/optional-backups/25-assert.yaml b/testing/kuttl/e2e/optional-backups/25-assert.yaml new file mode 100644 index 0000000000..eb3f70357f --- /dev/null +++ b/testing/kuttl/e2e/optional-backups/25-assert.yaml @@ -0,0 +1,15 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +commands: +- script: | + pod=$(kubectl get pods -o name -n "${NAMESPACE}" \ + -l postgres-operator.crunchydata.com/cluster=created-without-backups) + + kubectl exec --stdin "${pod}" --namespace "${NAMESPACE}" -c database \ + -- psql -qb --set ON_ERROR_STOP=1 --file=- <<'SQL' + DO $$ + BEGIN + ASSERT current_setting('archive_command') LIKE 'true', + format('expected "true", got %L', current_setting('archive_command')); + END $$ + SQL diff --git a/testing/kuttl/e2e/optional-backups/README.md b/testing/kuttl/e2e/optional-backups/README.md new file mode 100644 index 0000000000..92c52d4136 --- /dev/null +++ b/testing/kuttl/e2e/optional-backups/README.md @@ -0,0 +1,13 @@ +## Optional backups + +### Steps + +00-02. Create cluster without backups, check that expected K8s objects do/don't exist, e.g., repo-host sts doesn't exist; check that the archive command is `true` + +03-06. Add data and a replica; check that the data successfully replicates to the replica. + +10-11. Update cluster to add backups, check that expected K8s objects do/don't exist, e.g., repo-host sts exists; check that the archive command is set to the usual + +20-21. Update cluster to remove backups but without annotation, check that no changes were made, including to the archive command + +22-25. Annotate cluster to remove existing backups, check that expected K8s objects do/don't exist, e.g., repo-host sts doesn't exist; check that the archive command is `true` diff --git a/testing/kuttl/e2e/password-change/00--cluster.yaml b/testing/kuttl/e2e/password-change/00--cluster.yaml index 2777286880..d7b7019b62 100644 --- a/testing/kuttl/e2e/password-change/00--cluster.yaml +++ b/testing/kuttl/e2e/password-change/00--cluster.yaml @@ -12,14 +12,3 @@ spec: resources: requests: storage: 1Gi - backups: - pgbackrest: - repos: - - name: repo1 - volume: - volumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi diff --git a/testing/kuttl/e2e/pgadmin/01--cluster.yaml b/testing/kuttl/e2e/pgadmin/01--cluster.yaml index 2cc932c463..d1afb7be04 100644 --- a/testing/kuttl/e2e/pgadmin/01--cluster.yaml +++ b/testing/kuttl/e2e/pgadmin/01--cluster.yaml @@ -25,12 +25,6 @@ spec: - name: instance1 replicas: 1 dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } - backups: - pgbackrest: - repos: - - name: repo1 - volume: - volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } userInterface: pgAdmin: dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } diff --git a/testing/kuttl/e2e/pgbackrest-backup-standby/00--cluster.yaml b/testing/kuttl/e2e/pgbackrest-backup-standby/00--cluster.yaml new file mode 100644 index 0000000000..9665fac665 --- /dev/null +++ b/testing/kuttl/e2e/pgbackrest-backup-standby/00--cluster.yaml @@ -0,0 +1,28 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: pgbackrest-backup-standby +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + replicas: 1 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + backups: + pgbackrest: + global: + backup-standby: "y" + repos: + - name: repo1 + volume: + volumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi diff --git a/testing/kuttl/e2e/pgbackrest-backup-standby/00-assert.yaml b/testing/kuttl/e2e/pgbackrest-backup-standby/00-assert.yaml new file mode 100644 index 0000000000..d69a3c68b5 --- /dev/null +++ b/testing/kuttl/e2e/pgbackrest-backup-standby/00-assert.yaml @@ -0,0 +1,23 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: pgbackrest-backup-standby +status: + pgbackrest: + repoHost: + apiVersion: apps/v1 + kind: StatefulSet + ready: true + repos: + - bound: true + name: repo1 +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: pgbackrest-backup-standby + postgres-operator.crunchydata.com/pgbackrest-backup: replica-create + postgres-operator.crunchydata.com/pgbackrest-repo: repo1 +status: + phase: Failed diff --git a/testing/kuttl/e2e/pgbackrest-backup-standby/01--check-backup-logs.yaml b/testing/kuttl/e2e/pgbackrest-backup-standby/01--check-backup-logs.yaml new file mode 100644 index 0000000000..72d2050d4a --- /dev/null +++ b/testing/kuttl/e2e/pgbackrest-backup-standby/01--check-backup-logs.yaml @@ -0,0 +1,20 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: +# First, find at least one backup job pod. +# Then, check the logs for the 'unable to find standby cluster' line. +# If this line isn't found, exit 1. +- script: | + retry() { bash -ceu 'printf "$1\nSleeping...\n" && sleep 5' - "$@"; } + contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } + + pod=$(kubectl get pods -o name -n "${NAMESPACE}" \ + -l postgres-operator.crunchydata.com/cluster=pgbackrest-backup-standby \ + -l postgres-operator.crunchydata.com/pgbackrest-backup=replica-create) + [ "$pod" = "" ] && retry "Pod not found" && exit 1 + + logs=$(kubectl logs "${pod}" --namespace "${NAMESPACE}") + { contains "${logs}" 'unable to find standby cluster - cannot proceed'; } || { + echo 'did not find expected standby cluster error ' + exit 1 + } diff --git a/testing/kuttl/e2e/pgbackrest-backup-standby/02--cluster.yaml b/testing/kuttl/e2e/pgbackrest-backup-standby/02--cluster.yaml new file mode 100644 index 0000000000..c986f4a9de --- /dev/null +++ b/testing/kuttl/e2e/pgbackrest-backup-standby/02--cluster.yaml @@ -0,0 +1,28 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: pgbackrest-backup-standby +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + replicas: 2 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + backups: + pgbackrest: + global: + backup-standby: "y" + repos: + - name: repo1 + volume: + volumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi diff --git a/testing/kuttl/e2e/pgbackrest-backup-standby/02-assert.yaml b/testing/kuttl/e2e/pgbackrest-backup-standby/02-assert.yaml new file mode 100644 index 0000000000..92f7b12f5a --- /dev/null +++ b/testing/kuttl/e2e/pgbackrest-backup-standby/02-assert.yaml @@ -0,0 +1,25 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: pgbackrest-backup-standby +status: + pgbackrest: + repoHost: + apiVersion: apps/v1 + kind: StatefulSet + ready: true + repos: + - bound: true + name: repo1 + replicaCreateBackupComplete: true + stanzaCreated: true +--- +apiVersion: batch/v1 +kind: Job +metadata: + labels: + postgres-operator.crunchydata.com/cluster: pgbackrest-backup-standby + postgres-operator.crunchydata.com/pgbackrest-backup: replica-create + postgres-operator.crunchydata.com/pgbackrest-repo: repo1 +status: + succeeded: 1 diff --git a/testing/kuttl/e2e/pgbackrest-backup-standby/README.md b/testing/kuttl/e2e/pgbackrest-backup-standby/README.md new file mode 100644 index 0000000000..39fb8707a8 --- /dev/null +++ b/testing/kuttl/e2e/pgbackrest-backup-standby/README.md @@ -0,0 +1,5 @@ +### pgBackRest backup-standby test + +* 00: Create a cluster with 'backup-standby' set to 'y' but with only one replica. +* 01: Check the backup Job Pod logs for the expected error. +* 02: Update the cluster to have 2 replicas and verify that the cluster can initialize successfully and the backup job can complete. diff --git a/testing/kuttl/e2e/pgbackrest-init/06--check-spool-path.yaml b/testing/kuttl/e2e/pgbackrest-init/06--check-spool-path.yaml new file mode 100644 index 0000000000..e32cc2fc87 --- /dev/null +++ b/testing/kuttl/e2e/pgbackrest-init/06--check-spool-path.yaml @@ -0,0 +1,17 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: +- script: | + PRIMARY=$( + kubectl get pod --namespace "${NAMESPACE}" \ + --output name --selector ' + postgres-operator.crunchydata.com/role=master' + ) + + LIST=$( + kubectl exec --namespace "${NAMESPACE}" -c database "${PRIMARY}" -- \ + ls -l /pgdata + ) + + contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } + contains "$LIST" "pgbackrest-spool" || exit 1 diff --git a/testing/kuttl/e2e/pgbackrest-restore/10--wait-archived.yaml b/testing/kuttl/e2e/pgbackrest-restore/10--wait-archived.yaml index 60c5cce932..446886ead3 100644 --- a/testing/kuttl/e2e/pgbackrest-restore/10--wait-archived.yaml +++ b/testing/kuttl/e2e/pgbackrest-restore/10--wait-archived.yaml @@ -14,5 +14,5 @@ commands: # "pg_stat_archiver" counters, so anything more than zero should suffice. kubectl exec --namespace "${NAMESPACE}" "${PRIMARY}" -- psql -c 'SELECT pg_switch_wal()' while [ 0 = "$( - kubectl exec "${NAMESPACE}" "${PRIMARY}" -- psql -qAt -c 'SELECT archived_count FROM pg_stat_archiver' + kubectl exec --namespace "${NAMESPACE}" "${PRIMARY}" -- psql -qAt -c 'SELECT archived_count FROM pg_stat_archiver' )" ]; do sleep 1; done diff --git a/testing/kuttl/e2e/pgbackrest-restore/14--lose-data.yaml b/testing/kuttl/e2e/pgbackrest-restore/14--lose-data.yaml index 10483bb9c6..4f1eaeaa53 100644 --- a/testing/kuttl/e2e/pgbackrest-restore/14--lose-data.yaml +++ b/testing/kuttl/e2e/pgbackrest-restore/14--lose-data.yaml @@ -26,7 +26,7 @@ commands: --command 'SELECT pg_switch_wal()' while [ 0 = "$( - kubectl exec "${NAMESPACE}" "${PRIMARY}" -- psql -qAt -c 'SELECT archived_count FROM pg_stat_archiver' + kubectl exec --namespace "${NAMESPACE}" "${PRIMARY}" -- psql -qAt -c 'SELECT archived_count FROM pg_stat_archiver' )" ]; do sleep 1; done # The replica should also need to be restored. diff --git a/testing/kuttl/e2e/pgbouncer/00--cluster.yaml b/testing/kuttl/e2e/pgbouncer/00--cluster.yaml index c83bfea9d3..4699d90171 100644 --- a/testing/kuttl/e2e/pgbouncer/00--cluster.yaml +++ b/testing/kuttl/e2e/pgbouncer/00--cluster.yaml @@ -7,14 +7,8 @@ spec: postgresVersion: ${KUTTL_PG_VERSION} instances: - name: instance1 - replicas: 2 + replicas: 1 dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } - backups: - pgbackrest: - repos: - - name: repo1 - volume: - volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } proxy: pgBouncer: replicas: 1 diff --git a/testing/kuttl/e2e/pgbouncer/00-assert.yaml b/testing/kuttl/e2e/pgbouncer/00-assert.yaml index afe492faa0..6c3a33079f 100644 --- a/testing/kuttl/e2e/pgbouncer/00-assert.yaml +++ b/testing/kuttl/e2e/pgbouncer/00-assert.yaml @@ -5,9 +5,9 @@ metadata: status: instances: - name: instance1 - readyReplicas: 2 - replicas: 2 - updatedReplicas: 2 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 --- apiVersion: v1 kind: Service diff --git a/testing/kuttl/e2e/replica-read/00--cluster.yaml b/testing/kuttl/e2e/replica-read/00--cluster.yaml index a79666f4e1..c62f5418cd 100644 --- a/testing/kuttl/e2e/replica-read/00--cluster.yaml +++ b/testing/kuttl/e2e/replica-read/00--cluster.yaml @@ -13,14 +13,3 @@ spec: requests: storage: 1Gi replicas: 2 - backups: - pgbackrest: - repos: - - name: repo1 - volume: - volumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi diff --git a/testing/kuttl/e2e/root-cert-ownership/00--cluster.yaml b/testing/kuttl/e2e/root-cert-ownership/00--cluster.yaml index 461ae7ccba..2d23e1e3d3 100644 --- a/testing/kuttl/e2e/root-cert-ownership/00--cluster.yaml +++ b/testing/kuttl/e2e/root-cert-ownership/00--cluster.yaml @@ -9,12 +9,6 @@ spec: - name: instance1 replicas: 1 dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } - backups: - pgbackrest: - repos: - - name: repo1 - volume: - volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } --- apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster @@ -27,9 +21,3 @@ spec: - name: instance1 replicas: 1 dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } - backups: - pgbackrest: - repos: - - name: repo1 - volume: - volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } diff --git a/testing/kuttl/e2e/standalone-pgadmin-db-uri/00--create-cluster.yaml b/testing/kuttl/e2e/standalone-pgadmin-db-uri/00--create-cluster.yaml new file mode 100644 index 0000000000..c86a544166 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-db-uri/00--create-cluster.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/00-cluster.yaml +assert: +- files/00-cluster-check.yaml diff --git a/testing/kuttl/e2e/standalone-pgadmin-db-uri/01--user-schema.yaml b/testing/kuttl/e2e/standalone-pgadmin-db-uri/01--user-schema.yaml new file mode 100644 index 0000000000..bbddba56c2 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-db-uri/01--user-schema.yaml @@ -0,0 +1,14 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: +# ensure the user schema is created for pgAdmin to use + - script: | + PRIMARY=$( + kubectl get pod --namespace "${NAMESPACE}" \ + --output name --selector ' + postgres-operator.crunchydata.com/cluster=elephant, + postgres-operator.crunchydata.com/role=master' + ) + kubectl exec --namespace "${NAMESPACE}" "${PRIMARY}" \ + -- psql -qAt -d elephant --command 'CREATE SCHEMA elephant AUTHORIZATION elephant' diff --git a/testing/kuttl/e2e/standalone-pgadmin-db-uri/02--create-pgadmin.yaml b/testing/kuttl/e2e/standalone-pgadmin-db-uri/02--create-pgadmin.yaml new file mode 100644 index 0000000000..0ef15853af --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-db-uri/02--create-pgadmin.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/02-pgadmin.yaml +assert: +- files/02-pgadmin-check.yaml diff --git a/testing/kuttl/e2e/standalone-pgadmin-db-uri/03-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin-db-uri/03-assert.yaml new file mode 100644 index 0000000000..6a25871f63 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-db-uri/03-assert.yaml @@ -0,0 +1,21 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +commands: +- script: | + PRIMARY=$( + kubectl get pod --namespace "${NAMESPACE}" \ + --output name --selector ' + postgres-operator.crunchydata.com/cluster=elephant, + postgres-operator.crunchydata.com/role=master' + ) + + NUM_USERS=$( + kubectl exec --namespace "${NAMESPACE}" "${PRIMARY}" -- \ + psql -qAt -d elephant --command 'select count(*) from elephant.user' \ + ) + + if [[ ${NUM_USERS} != 1 ]]; then + echo >&2 'Expected 1 user' + echo "got ${NUM_USERS}" + exit 1 + fi diff --git a/testing/kuttl/e2e/standalone-pgadmin-db-uri/04--update-pgadmin.yaml b/testing/kuttl/e2e/standalone-pgadmin-db-uri/04--update-pgadmin.yaml new file mode 100644 index 0000000000..f8aaf480fd --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-db-uri/04--update-pgadmin.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/04-pgadmin.yaml +assert: +- files/04-pgadmin-check.yaml diff --git a/testing/kuttl/e2e/standalone-pgadmin-db-uri/05-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin-db-uri/05-assert.yaml new file mode 100644 index 0000000000..4d31c5db18 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-db-uri/05-assert.yaml @@ -0,0 +1,36 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +# timeout: 120 +commands: +- script: | + PRIMARY=$( + kubectl get pod --namespace "${NAMESPACE}" \ + --output name --selector ' + postgres-operator.crunchydata.com/cluster=elephant, + postgres-operator.crunchydata.com/role=master' + ) + + NUM_USERS=$( + kubectl exec --namespace "${NAMESPACE}" "${PRIMARY}" -- \ + psql -qAt -d elephant --command 'select count(*) from elephant.user' \ + ) + + if [[ ${NUM_USERS} != 2 ]]; then + echo >&2 'Expected 2 user' + echo "got ${NUM_USERS}" + exit 1 + fi + + contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } + USER_LIST=$( + kubectl exec --namespace "${NAMESPACE}" "${PRIMARY}" -- \ + psql -qAt -d elephant --command 'select email from elephant.user;' \ + ) + + { + contains "${USER_LIST}" "john.doe@example.com" + } || { + echo >&2 'User john.doe@example.com not found. Got:' + echo "${USER_LIST}" + exit 1 + } diff --git a/testing/kuttl/e2e/standalone-pgadmin-db-uri/README.md b/testing/kuttl/e2e/standalone-pgadmin-db-uri/README.md new file mode 100644 index 0000000000..2d7688ae3b --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-db-uri/README.md @@ -0,0 +1,26 @@ +# pgAdmin external database tests + +Notes: +- Due to the (random) namespace being part of the host, we cannot check the configmap using the usual assert/file pattern. +- These tests will only work with pgAdmin version v8 and higher + +## create postgrescluster and add user schema +* 00: + * create a postgrescluster with a label; + * check that the cluster has the label and that the expected user secret is created. +* 01: + * create the user schema for pgAdmin to use + + ## create pgadmin and verify connection to database +* 02: + * create a pgadmin with a selector for the existing cluster's label; + * check the correct existence of the secret, configmap, and pod. +* 03: + * check that pgAdmin only has one user + + ## add a pgadmin user and verify it in the database +* 04: + * update pgadmin with a new user; + * check that the pod is still running as expected. +* 05: + * check that pgAdmin now has two users and that the defined user is present. diff --git a/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/00-cluster-check.yaml b/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/00-cluster-check.yaml new file mode 100644 index 0000000000..8ae250152f --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/00-cluster-check.yaml @@ -0,0 +1,31 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: elephant + labels: + sometest: test1 +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 +--- +apiVersion: v1 +kind: Secret +metadata: + labels: + postgres-operator.crunchydata.com/cluster: elephant + postgres-operator.crunchydata.com/pguser: elephant + postgres-operator.crunchydata.com/role: pguser +type: Opaque +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: elephant + postgres-operator.crunchydata.com/instance-set: instance1 + postgres-operator.crunchydata.com/role: master +status: + phase: Running diff --git a/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/00-cluster.yaml b/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/00-cluster.yaml new file mode 100644 index 0000000000..5f8678e5e9 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/00-cluster.yaml @@ -0,0 +1,11 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: elephant + labels: + sometest: test1 +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } diff --git a/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/02-pgadmin-check.yaml b/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/02-pgadmin-check.yaml new file mode 100644 index 0000000000..6457b2ca20 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/02-pgadmin-check.yaml @@ -0,0 +1,29 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + postgres-operator.crunchydata.com/role: pgadmin + postgres-operator.crunchydata.com/pgadmin: pgadmin1 +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/data: pgadmin + postgres-operator.crunchydata.com/role: pgadmin + postgres-operator.crunchydata.com/pgadmin: pgadmin1 +status: + containerStatuses: + - name: pgadmin + ready: true + started: true + phase: Running +--- +apiVersion: v1 +kind: Secret +metadata: + labels: + postgres-operator.crunchydata.com/role: pgadmin + postgres-operator.crunchydata.com/pgadmin: pgadmin1 +type: Opaque diff --git a/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/02-pgadmin.yaml b/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/02-pgadmin.yaml new file mode 100644 index 0000000000..f1e251b949 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/02-pgadmin.yaml @@ -0,0 +1,20 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGAdmin +metadata: + name: pgadmin1 +spec: + config: + configDatabaseURI: + name: elephant-pguser-elephant + key: uri + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + serverGroups: + - name: kuttl-test + postgresClusterSelector: + matchLabels: + sometest: test1 diff --git a/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/04-pgadmin-check.yaml b/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/04-pgadmin-check.yaml new file mode 100644 index 0000000000..3a3f459441 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/04-pgadmin-check.yaml @@ -0,0 +1,14 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/data: pgadmin + postgres-operator.crunchydata.com/role: pgadmin + postgres-operator.crunchydata.com/pgadmin: pgadmin1 +status: + containerStatuses: + - name: pgadmin + ready: true + started: true + phase: Running diff --git a/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/04-pgadmin.yaml b/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/04-pgadmin.yaml new file mode 100644 index 0000000000..2c62b58b4b --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/04-pgadmin.yaml @@ -0,0 +1,33 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGAdmin +metadata: + name: pgadmin1 +spec: + users: + - username: "john.doe@example.com" + passwordRef: + name: john-doe-password + key: password + config: + configDatabaseURI: + name: elephant-pguser-elephant + key: uri + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + serverGroups: + - name: kuttl-test + postgresClusterSelector: + matchLabels: + sometest: test1 +--- +apiVersion: v1 +kind: Secret +metadata: + name: john-doe-password +type: Opaque +stringData: + password: password diff --git a/testing/kuttl/e2e/standalone-pgadmin-service/00--pgadmin.yaml b/testing/kuttl/e2e/standalone-pgadmin-service/00--pgadmin.yaml new file mode 100644 index 0000000000..9372467a93 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-service/00--pgadmin.yaml @@ -0,0 +1,12 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGAdmin +metadata: + name: pgadmin +spec: + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + serviceName: pgadmin-service diff --git a/testing/kuttl/e2e/standalone-pgadmin-service/00-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin-service/00-assert.yaml new file mode 100644 index 0000000000..758814cad2 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-service/00-assert.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Service +metadata: + name: pgadmin-service + labels: + postgres-operator.crunchydata.com/role: pgadmin + postgres-operator.crunchydata.com/pgadmin: pgadmin + ownerReferences: + - apiVersion: postgres-operator.crunchydata.com/v1beta1 + controller: true + kind: PGAdmin + name: pgadmin +spec: + selector: + postgres-operator.crunchydata.com/pgadmin: pgadmin + ports: + - port: 5050 + targetPort: 5050 + protocol: TCP + name: pgadmin-port + type: ClusterIP diff --git a/testing/kuttl/e2e/standalone-pgadmin-service/01--update-service.yaml b/testing/kuttl/e2e/standalone-pgadmin-service/01--update-service.yaml new file mode 100644 index 0000000000..81db248fd4 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-service/01--update-service.yaml @@ -0,0 +1,12 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGAdmin +metadata: + name: pgadmin +spec: + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + serviceName: pgadmin-service-updated diff --git a/testing/kuttl/e2e/standalone-pgadmin-service/01-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin-service/01-assert.yaml new file mode 100644 index 0000000000..2303ebe9bb --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-service/01-assert.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + name: pgadmin-service-updated + labels: + postgres-operator.crunchydata.com/role: pgadmin + postgres-operator.crunchydata.com/pgadmin: pgadmin +spec: + selector: + postgres-operator.crunchydata.com/pgadmin: pgadmin + ports: + - port: 5050 + targetPort: 5050 + protocol: TCP + name: pgadmin-port + type: ClusterIP diff --git a/testing/kuttl/e2e/standalone-pgadmin-service/02--remove-service.yaml b/testing/kuttl/e2e/standalone-pgadmin-service/02--remove-service.yaml new file mode 100644 index 0000000000..b8cbf4eb41 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-service/02--remove-service.yaml @@ -0,0 +1,11 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGAdmin +metadata: + name: pgadmin +spec: + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi diff --git a/testing/kuttl/e2e/standalone-pgadmin-service/02-errors.yaml b/testing/kuttl/e2e/standalone-pgadmin-service/02-errors.yaml new file mode 100644 index 0000000000..f2795c106d --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-service/02-errors.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + name: pgadmin-service + labels: + postgres-operator.crunchydata.com/role: pgadmin + postgres-operator.crunchydata.com/pgadmin: pgadmin +spec: + selector: + postgres-operator.crunchydata.com/pgadmin: pgadmin + ports: + - port: 5050 + targetPort: 5050 + protocol: TCP + name: pgadmin-port + type: ClusterIP diff --git a/testing/kuttl/e2e/standalone-pgadmin-service/10--manual-service.yaml b/testing/kuttl/e2e/standalone-pgadmin-service/10--manual-service.yaml new file mode 100644 index 0000000000..88d8da6718 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-service/10--manual-service.yaml @@ -0,0 +1,29 @@ +# Manually create a service that should be taken over by pgAdmin +# The manual service is of type LoadBalancer +# Once taken over, the type should change to ClusterIP +apiVersion: v1 +kind: Service +metadata: + name: manual-pgadmin-service +spec: + ports: + - name: pgadmin-port + port: 5050 + protocol: TCP + selector: + postgres-operator.crunchydata.com/pgadmin: rhino + type: LoadBalancer +--- +# Create a pgAdmin that points to an existing un-owned service +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGAdmin +metadata: + name: manual-svc-pgadmin +spec: + serviceName: manual-pgadmin-service + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi diff --git a/testing/kuttl/e2e/standalone-pgadmin-service/10-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin-service/10-assert.yaml new file mode 100644 index 0000000000..95bf241b16 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-service/10-assert.yaml @@ -0,0 +1,22 @@ +# Check that the manually created service has the correct ownerReference +apiVersion: v1 +kind: Service +metadata: + name: manual-pgadmin-service + labels: + postgres-operator.crunchydata.com/role: pgadmin + postgres-operator.crunchydata.com/pgadmin: manual-svc-pgadmin + ownerReferences: + - apiVersion: postgres-operator.crunchydata.com/v1beta1 + controller: true + kind: PGAdmin + name: manual-svc-pgadmin +spec: + selector: + postgres-operator.crunchydata.com/pgadmin: manual-svc-pgadmin + ports: + - port: 5050 + targetPort: 5050 + protocol: TCP + name: pgadmin-port + type: ClusterIP diff --git a/testing/kuttl/e2e/standalone-pgadmin-service/20--owned-service.yaml b/testing/kuttl/e2e/standalone-pgadmin-service/20--owned-service.yaml new file mode 100644 index 0000000000..04f211ffc7 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-service/20--owned-service.yaml @@ -0,0 +1,13 @@ +# Create a pgAdmin that will create and own a service +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGAdmin +metadata: + name: pgadmin-service-owner +spec: + serviceName: pgadmin-owned-service + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi diff --git a/testing/kuttl/e2e/standalone-pgadmin-service/20-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin-service/20-assert.yaml new file mode 100644 index 0000000000..a6ab1653bb --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-service/20-assert.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Service +metadata: + name: pgadmin-owned-service + labels: + postgres-operator.crunchydata.com/role: pgadmin + postgres-operator.crunchydata.com/pgadmin: pgadmin-service-owner + ownerReferences: + - apiVersion: postgres-operator.crunchydata.com/v1beta1 + controller: true + kind: PGAdmin + name: pgadmin-service-owner +spec: + selector: + postgres-operator.crunchydata.com/pgadmin: pgadmin-service-owner + ports: + - port: 5050 + targetPort: 5050 + protocol: TCP + name: pgadmin-port + type: ClusterIP diff --git a/testing/kuttl/e2e/standalone-pgadmin-service/21--service-takeover-fails.yaml b/testing/kuttl/e2e/standalone-pgadmin-service/21--service-takeover-fails.yaml new file mode 100644 index 0000000000..f992521ce8 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-service/21--service-takeover-fails.yaml @@ -0,0 +1,13 @@ +# Create a second pgAdmin that attempts to steal the service +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGAdmin +metadata: + name: pgadmin-service-thief +spec: + serviceName: pgadmin-owned-service + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi diff --git a/testing/kuttl/e2e/standalone-pgadmin-service/21-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin-service/21-assert.yaml new file mode 100644 index 0000000000..060d669987 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-service/21-assert.yaml @@ -0,0 +1,35 @@ +# Original service should still have owner reference +apiVersion: v1 +kind: Service +metadata: + name: pgadmin-owned-service + labels: + postgres-operator.crunchydata.com/role: pgadmin + postgres-operator.crunchydata.com/pgadmin: pgadmin-service-owner + ownerReferences: + - apiVersion: postgres-operator.crunchydata.com/v1beta1 + controller: true + kind: PGAdmin + name: pgadmin-service-owner +spec: + selector: + postgres-operator.crunchydata.com/pgadmin: pgadmin-service-owner + ports: + - port: 5050 + targetPort: 5050 + protocol: TCP + name: pgadmin-port + type: ClusterIP +--- +# An event should be created for the failure to reconcile the Service +apiVersion: v1 +involvedObject: + apiVersion: postgres-operator.crunchydata.com/v1beta1 + kind: PGAdmin + name: pgadmin-service-thief +kind: Event +message: 'Failed to reconcile Service ServiceName: pgadmin-owned-service' +reason: InvalidServiceWarning +source: + component: pgadmin-controller +type: Warning diff --git a/testing/kuttl/e2e-other/standalone-pgadmin/00--create-pgadmin.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/00--create-pgadmin.yaml similarity index 100% rename from testing/kuttl/e2e-other/standalone-pgadmin/00--create-pgadmin.yaml rename to testing/kuttl/e2e/standalone-pgadmin-user-management/00--create-pgadmin.yaml diff --git a/testing/kuttl/e2e/standalone-pgadmin-user-management/01-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/01-assert.yaml new file mode 100644 index 0000000000..244533b7ee --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-user-management/01-assert.yaml @@ -0,0 +1,26 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +commands: +# When setup.py returns users in Json, the Role translation is 1 for Admin, 2 for User +- script: | + pod_name=$(kubectl get pod -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) + secret_name=$(kubectl get secret -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) + + users_in_pgadmin=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py get-users --json") + + bob_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="bob@example.com") | .role') + dave_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="dave@example.com") | .role') + + [ $bob_role = 1 ] && [ $dave_role = 2 ] || exit 1 + + users_in_secret=$(kubectl get "${secret_name}" -n "${NAMESPACE}" -o 'go-template={{index .data "users.json" }}' | base64 -d) + + bob_is_admin=$(printf '%s\n' $users_in_secret | jq '.[] | select(.username=="bob@example.com") | .isAdmin') + dave_is_admin=$(printf '%s\n' $users_in_secret | jq '.[] | select(.username=="dave@example.com") | .isAdmin') + + $bob_is_admin && ! $dave_is_admin || exit 1 + + bob_password=$(printf '%s\n' $users_in_secret | jq -r '.[] | select(.username=="bob@example.com") | .password') + dave_password=$(printf '%s\n' $users_in_secret | jq -r '.[] | select(.username=="dave@example.com") | .password') + + [ "$bob_password" = "password123" ] && [ "$dave_password" = "password456" ] || exit 1 diff --git a/testing/kuttl/e2e/standalone-pgadmin-user-management/02--edit-pgadmin-users.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/02--edit-pgadmin-users.yaml new file mode 100644 index 0000000000..0ef15853af --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-user-management/02--edit-pgadmin-users.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/02-pgadmin.yaml +assert: +- files/02-pgadmin-check.yaml diff --git a/testing/kuttl/e2e/standalone-pgadmin-user-management/03-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/03-assert.yaml new file mode 100644 index 0000000000..01aff25b3b --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-user-management/03-assert.yaml @@ -0,0 +1,29 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +commands: +# When setup.py returns users in Json, the Role translation is 1 for Admin, 2 for User +- script: | + pod_name=$(kubectl get pod -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) + secret_name=$(kubectl get secret -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) + + users_in_pgadmin=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py get-users --json") + + bob_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="bob@example.com") | .role') + dave_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="dave@example.com") | .role') + jimi_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="jimi@example.com") | .role') + + [ $bob_role = 1 ] && [ $dave_role = 1 ] && [ $jimi_role = 2 ] || exit 1 + + users_in_secret=$(kubectl get "${secret_name}" -n "${NAMESPACE}" -o 'go-template={{index .data "users.json" }}' | base64 -d) + + bob_is_admin=$(printf '%s\n' $users_in_secret | jq '.[] | select(.username=="bob@example.com") | .isAdmin') + dave_is_admin=$(printf '%s\n' $users_in_secret | jq '.[] | select(.username=="dave@example.com") | .isAdmin') + jimi_is_admin=$(printf '%s\n' $users_in_secret | jq '.[] | select(.username=="jimi@example.com") | .isAdmin') + + $bob_is_admin && $dave_is_admin && ! $jimi_is_admin || exit 1 + + bob_password=$(printf '%s\n' $users_in_secret | jq -r '.[] | select(.username=="bob@example.com") | .password') + dave_password=$(printf '%s\n' $users_in_secret | jq -r '.[] | select(.username=="dave@example.com") | .password') + jimi_password=$(printf '%s\n' $users_in_secret | jq -r '.[] | select(.username=="jimi@example.com") | .password') + + [ "$bob_password" = "password123" ] && [ "$dave_password" = "password456" ] && [ "$jimi_password" = "password789" ] || exit 1 diff --git a/testing/kuttl/e2e/standalone-pgadmin-user-management/04--change-pgadmin-user-passwords.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/04--change-pgadmin-user-passwords.yaml new file mode 100644 index 0000000000..f8aaf480fd --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-user-management/04--change-pgadmin-user-passwords.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/04-pgadmin.yaml +assert: +- files/04-pgadmin-check.yaml diff --git a/testing/kuttl/e2e/standalone-pgadmin-user-management/05-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/05-assert.yaml new file mode 100644 index 0000000000..1dca13a7b7 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-user-management/05-assert.yaml @@ -0,0 +1,29 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +commands: +# When setup.py returns users in Json, the Role translation is 1 for Admin, 2 for User +- script: | + pod_name=$(kubectl get pod -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) + secret_name=$(kubectl get secret -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) + + users_in_pgadmin=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py get-users --json") + + bob_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="bob@example.com") | .role') + dave_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="dave@example.com") | .role') + jimi_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="jimi@example.com") | .role') + + [ $bob_role = 1 ] && [ $dave_role = 1 ] && [ $jimi_role = 2 ] || exit 1 + + users_in_secret=$(kubectl get "${secret_name}" -n "${NAMESPACE}" -o 'go-template={{index .data "users.json" }}' | base64 -d) + + bob_is_admin=$(printf '%s\n' $users_in_secret | jq '.[] | select(.username=="bob@example.com") | .isAdmin') + dave_is_admin=$(printf '%s\n' $users_in_secret | jq '.[] | select(.username=="dave@example.com") | .isAdmin') + jimi_is_admin=$(printf '%s\n' $users_in_secret | jq '.[] | select(.username=="jimi@example.com") | .isAdmin') + + $bob_is_admin && $dave_is_admin && ! $jimi_is_admin || exit 1 + + bob_password=$(printf '%s\n' $users_in_secret | jq -r '.[] | select(.username=="bob@example.com") | .password') + dave_password=$(printf '%s\n' $users_in_secret | jq -r '.[] | select(.username=="dave@example.com") | .password') + jimi_password=$(printf '%s\n' $users_in_secret | jq -r '.[] | select(.username=="jimi@example.com") | .password') + + [ "$bob_password" = "NEWpassword123" ] && [ "$dave_password" = "NEWpassword456" ] && [ "$jimi_password" = "NEWpassword789" ] || exit 1 diff --git a/testing/kuttl/e2e/standalone-pgadmin-user-management/06--delete-pgadmin-users.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/06--delete-pgadmin-users.yaml new file mode 100644 index 0000000000..a538b7dca4 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-user-management/06--delete-pgadmin-users.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/06-pgadmin.yaml +assert: +- files/06-pgadmin-check.yaml diff --git a/testing/kuttl/e2e/standalone-pgadmin-user-management/07-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/07-assert.yaml new file mode 100644 index 0000000000..5c0e7267e6 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-user-management/07-assert.yaml @@ -0,0 +1,19 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +commands: +# When setup.py returns users in Json, the Role translation is 1 for Admin, 2 for User +- script: | + pod_name=$(kubectl get pod -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) + secret_name=$(kubectl get secret -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) + + users_in_pgadmin=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py get-users --json") + + bob_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="bob@example.com") | .role') + dave_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="dave@example.com") | .role') + jimi_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="jimi@example.com") | .role') + + [ $bob_role = 1 ] && [ $dave_role = 1 ] && [ $jimi_role = 2 ] || exit 1 + + users_in_secret=$(kubectl get "${secret_name}" -n "${NAMESPACE}" -o 'go-template={{index .data "users.json" }}' | base64 -d) + + $(printf '%s\n' $users_in_secret | jq '. == []') || exit 1 diff --git a/testing/kuttl/e2e/standalone-pgadmin-user-management/README.md b/testing/kuttl/e2e/standalone-pgadmin-user-management/README.md new file mode 100644 index 0000000000..0bbdfc2893 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-user-management/README.md @@ -0,0 +1,21 @@ +# pgAdmin User Management tests + +*Note: These tests will only work with pgAdmin version v8 and higher* + +## Create pgAdmin with users + +* Start pgAdmin with a couple users +* Ensure users exist in pgAdmin with correct settings +* Ensure users exist in the `users.json` file in the pgAdmin secret with the correct settings + +## Edit pgAdmin users + +* Add a user and edit an existing user +* Ensure users exist in pgAdmin with correct settings +* Ensure users exist in the `users.json` file in the pgAdmin secret with the correct settings + +## Delete pgAdmin users + +* Remove users from pgAdmin spec +* Ensure users still exist in pgAdmin with correct settings +* Ensure users have been removed from the `users.json` file in the pgAdmin secret diff --git a/testing/kuttl/e2e/standalone-pgadmin-user-management/files/00-pgadmin-check.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/files/00-pgadmin-check.yaml new file mode 100644 index 0000000000..f2c7f28cd1 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-user-management/files/00-pgadmin-check.yaml @@ -0,0 +1,34 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/data: pgadmin + postgres-operator.crunchydata.com/role: pgadmin + postgres-operator.crunchydata.com/pgadmin: pgadmin +status: + containerStatuses: + - name: pgadmin + ready: true + started: true + phase: Running +--- +apiVersion: v1 +kind: Secret +metadata: + labels: + postgres-operator.crunchydata.com/role: pgadmin + postgres-operator.crunchydata.com/pgadmin: pgadmin +type: Opaque +--- +apiVersion: v1 +kind: Secret +metadata: + name: bob-password-secret +type: Opaque +--- +apiVersion: v1 +kind: Secret +metadata: + name: dave-password-secret +type: Opaque diff --git a/testing/kuttl/e2e/standalone-pgadmin-user-management/files/00-pgadmin.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/files/00-pgadmin.yaml new file mode 100644 index 0000000000..ce86d8d894 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-user-management/files/00-pgadmin.yaml @@ -0,0 +1,40 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGAdmin +metadata: + name: pgadmin +spec: + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + serverGroups: [] + users: + - username: bob@example.com + role: Administrator + passwordRef: + name: bob-password-secret + key: password + - username: dave@example.com + passwordRef: + name: dave-password-secret + key: password +--- +apiVersion: v1 +kind: Secret +metadata: + name: bob-password-secret +type: Opaque +data: + # Password is "password123", base64 encoded + password: cGFzc3dvcmQxMjM= +--- +apiVersion: v1 +kind: Secret +metadata: + name: dave-password-secret +type: Opaque +data: + # Password is "password456", base64 encoded + password: cGFzc3dvcmQ0NTY= diff --git a/testing/kuttl/e2e/standalone-pgadmin-user-management/files/02-pgadmin-check.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/files/02-pgadmin-check.yaml new file mode 100644 index 0000000000..9a07b0d994 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-user-management/files/02-pgadmin-check.yaml @@ -0,0 +1,40 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/data: pgadmin + postgres-operator.crunchydata.com/role: pgadmin + postgres-operator.crunchydata.com/pgadmin: pgadmin +status: + containerStatuses: + - name: pgadmin + ready: true + started: true + phase: Running +--- +apiVersion: v1 +kind: Secret +metadata: + labels: + postgres-operator.crunchydata.com/role: pgadmin + postgres-operator.crunchydata.com/pgadmin: pgadmin +type: Opaque +--- +apiVersion: v1 +kind: Secret +metadata: + name: bob-password-secret +type: Opaque +--- +apiVersion: v1 +kind: Secret +metadata: + name: dave-password-secret +type: Opaque +--- +apiVersion: v1 +kind: Secret +metadata: + name: jimi-password-secret +type: Opaque diff --git a/testing/kuttl/e2e/standalone-pgadmin-user-management/files/02-pgadmin.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/files/02-pgadmin.yaml new file mode 100644 index 0000000000..88f75d8092 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-user-management/files/02-pgadmin.yaml @@ -0,0 +1,54 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGAdmin +metadata: + name: pgadmin +spec: + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + serverGroups: [] + users: + - username: bob@example.com + role: Administrator + passwordRef: + name: bob-password-secret + key: password + - username: dave@example.com + role: Administrator + passwordRef: + name: dave-password-secret + key: password + - username: jimi@example.com + passwordRef: + name: jimi-password-secret + key: password +--- +apiVersion: v1 +kind: Secret +metadata: + name: bob-password-secret +type: Opaque +data: + # Password is "password123", base64 encoded + password: cGFzc3dvcmQxMjM= +--- +apiVersion: v1 +kind: Secret +metadata: + name: dave-password-secret +type: Opaque +data: + # Password is "password456", base64 encoded + password: cGFzc3dvcmQ0NTY= +--- +apiVersion: v1 +kind: Secret +metadata: + name: jimi-password-secret +type: Opaque +data: + # Password is "password789", base64 encoded + password: cGFzc3dvcmQ3ODk= diff --git a/testing/kuttl/e2e/standalone-pgadmin-user-management/files/04-pgadmin-check.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/files/04-pgadmin-check.yaml new file mode 100644 index 0000000000..9a07b0d994 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-user-management/files/04-pgadmin-check.yaml @@ -0,0 +1,40 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/data: pgadmin + postgres-operator.crunchydata.com/role: pgadmin + postgres-operator.crunchydata.com/pgadmin: pgadmin +status: + containerStatuses: + - name: pgadmin + ready: true + started: true + phase: Running +--- +apiVersion: v1 +kind: Secret +metadata: + labels: + postgres-operator.crunchydata.com/role: pgadmin + postgres-operator.crunchydata.com/pgadmin: pgadmin +type: Opaque +--- +apiVersion: v1 +kind: Secret +metadata: + name: bob-password-secret +type: Opaque +--- +apiVersion: v1 +kind: Secret +metadata: + name: dave-password-secret +type: Opaque +--- +apiVersion: v1 +kind: Secret +metadata: + name: jimi-password-secret +type: Opaque diff --git a/testing/kuttl/e2e/standalone-pgadmin-user-management/files/04-pgadmin.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/files/04-pgadmin.yaml new file mode 100644 index 0000000000..32b0081f92 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-user-management/files/04-pgadmin.yaml @@ -0,0 +1,54 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGAdmin +metadata: + name: pgadmin +spec: + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + serverGroups: [] + users: + - username: bob@example.com + role: Administrator + passwordRef: + name: bob-password-secret + key: password + - username: dave@example.com + role: Administrator + passwordRef: + name: dave-password-secret + key: password + - username: jimi@example.com + passwordRef: + name: jimi-password-secret + key: password +--- +apiVersion: v1 +kind: Secret +metadata: + name: bob-password-secret +type: Opaque +data: + # Password is "NEWpassword123", base64 encoded + password: TkVXcGFzc3dvcmQxMjM= +--- +apiVersion: v1 +kind: Secret +metadata: + name: dave-password-secret +type: Opaque +data: + # Password is "NEWpassword456", base64 encoded + password: TkVXcGFzc3dvcmQ0NTY= +--- +apiVersion: v1 +kind: Secret +metadata: + name: jimi-password-secret +type: Opaque +data: + # Password is "NEWpassword789", base64 encoded + password: TkVXcGFzc3dvcmQ3ODk= diff --git a/testing/kuttl/e2e/standalone-pgadmin-user-management/files/06-pgadmin-check.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/files/06-pgadmin-check.yaml new file mode 100644 index 0000000000..04481fb4d1 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-user-management/files/06-pgadmin-check.yaml @@ -0,0 +1,22 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/data: pgadmin + postgres-operator.crunchydata.com/role: pgadmin + postgres-operator.crunchydata.com/pgadmin: pgadmin +status: + containerStatuses: + - name: pgadmin + ready: true + started: true + phase: Running +--- +apiVersion: v1 +kind: Secret +metadata: + labels: + postgres-operator.crunchydata.com/role: pgadmin + postgres-operator.crunchydata.com/pgadmin: pgadmin +type: Opaque diff --git a/testing/kuttl/e2e/standalone-pgadmin-user-management/files/06-pgadmin.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/files/06-pgadmin.yaml new file mode 100644 index 0000000000..0513edf050 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-user-management/files/06-pgadmin.yaml @@ -0,0 +1,13 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PGAdmin +metadata: + name: pgadmin +spec: + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + serverGroups: [] + users: [] diff --git a/testing/kuttl/e2e/standalone-pgadmin/00--create-pgadmin.yaml b/testing/kuttl/e2e/standalone-pgadmin/00--create-pgadmin.yaml new file mode 100644 index 0000000000..ee1a03ec64 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin/00--create-pgadmin.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/00-pgadmin.yaml +assert: +- files/00-pgadmin-check.yaml diff --git a/testing/kuttl/e2e/standalone-pgadmin/00-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin/00-assert.yaml new file mode 100644 index 0000000000..5b95b46964 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin/00-assert.yaml @@ -0,0 +1,7 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +collectors: +- type: command + command: kubectl -n $NAMESPACE describe pods --selector postgres-operator.crunchydata.com/pgadmin=pgadmin +- namespace: $NAMESPACE + selector: postgres-operator.crunchydata.com/pgadmin=pgadmin diff --git a/testing/kuttl/e2e-other/standalone-pgadmin/01-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin/01-assert.yaml similarity index 83% rename from testing/kuttl/e2e-other/standalone-pgadmin/01-assert.yaml rename to testing/kuttl/e2e/standalone-pgadmin/01-assert.yaml index 8b75a3e40e..6b7c8c8794 100644 --- a/testing/kuttl/e2e-other/standalone-pgadmin/01-assert.yaml +++ b/testing/kuttl/e2e/standalone-pgadmin/01-assert.yaml @@ -6,7 +6,7 @@ commands: pod_name=$(kubectl get pod -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) - clusters_actual=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py --dump-servers /tmp/dumped.json --user admin@pgadmin.${NAMESPACE}.svc && cat /tmp/dumped.json") + clusters_actual=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py dump-servers /tmp/dumped.json --user admin@pgadmin.${NAMESPACE}.svc && cat /tmp/dumped.json") clusters_expected="\"Servers\": {}" { diff --git a/testing/kuttl/e2e-other/standalone-pgadmin/02--create-cluster.yaml b/testing/kuttl/e2e/standalone-pgadmin/02--create-cluster.yaml similarity index 100% rename from testing/kuttl/e2e-other/standalone-pgadmin/02--create-cluster.yaml rename to testing/kuttl/e2e/standalone-pgadmin/02--create-cluster.yaml diff --git a/testing/kuttl/e2e-other/standalone-pgadmin/03-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin/03-assert.yaml similarity index 95% rename from testing/kuttl/e2e-other/standalone-pgadmin/03-assert.yaml rename to testing/kuttl/e2e/standalone-pgadmin/03-assert.yaml index e9709042a8..169a8261eb 100644 --- a/testing/kuttl/e2e-other/standalone-pgadmin/03-assert.yaml +++ b/testing/kuttl/e2e/standalone-pgadmin/03-assert.yaml @@ -45,7 +45,7 @@ commands: exit 1 } - clusters_actual=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py --dump-servers /tmp/dumped.json --user admin@pgadmin.${NAMESPACE}.svc && cat /tmp/dumped.json") + clusters_actual=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py dump-servers /tmp/dumped.json --user admin@pgadmin.${NAMESPACE}.svc && cat /tmp/dumped.json") clusters_expected=' { @@ -58,6 +58,7 @@ commands: "MaintenanceDB": "postgres", "Username": "pgadmin1", "Shared": true, + "TunnelPort": "22", "KerberosAuthentication": false, "ConnectionParameters": { "sslmode": "prefer" diff --git a/testing/kuttl/e2e-other/standalone-pgadmin/04--create-cluster.yaml b/testing/kuttl/e2e/standalone-pgadmin/04--create-cluster.yaml similarity index 100% rename from testing/kuttl/e2e-other/standalone-pgadmin/04--create-cluster.yaml rename to testing/kuttl/e2e/standalone-pgadmin/04--create-cluster.yaml diff --git a/testing/kuttl/e2e-other/standalone-pgadmin/05-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin/05-assert.yaml similarity index 95% rename from testing/kuttl/e2e-other/standalone-pgadmin/05-assert.yaml rename to testing/kuttl/e2e/standalone-pgadmin/05-assert.yaml index 561cf13593..7fe5b69dc2 100644 --- a/testing/kuttl/e2e-other/standalone-pgadmin/05-assert.yaml +++ b/testing/kuttl/e2e/standalone-pgadmin/05-assert.yaml @@ -57,7 +57,7 @@ commands: exit 1 } - clusters_actual=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py --dump-servers /tmp/dumped.json --user admin@pgadmin.${NAMESPACE}.svc && cat /tmp/dumped.json") + clusters_actual=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py dump-servers /tmp/dumped.json --user admin@pgadmin.${NAMESPACE}.svc && cat /tmp/dumped.json") clusters_expected=' { @@ -70,6 +70,7 @@ commands: "MaintenanceDB": "postgres", "Username": "pgadmin1", "Shared": true, + "TunnelPort": "22", "KerberosAuthentication": false, "ConnectionParameters": { "sslmode": "prefer" @@ -83,6 +84,7 @@ commands: "MaintenanceDB": "postgres", "Username": "pgadmin2", "Shared": true, + "TunnelPort": "22", "KerberosAuthentication": false, "ConnectionParameters": { "sslmode": "prefer" diff --git a/testing/kuttl/e2e-other/standalone-pgadmin/06--create-cluster.yaml b/testing/kuttl/e2e/standalone-pgadmin/06--create-cluster.yaml similarity index 100% rename from testing/kuttl/e2e-other/standalone-pgadmin/06--create-cluster.yaml rename to testing/kuttl/e2e/standalone-pgadmin/06--create-cluster.yaml diff --git a/testing/kuttl/e2e-other/standalone-pgadmin/07-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin/07-assert.yaml similarity index 95% rename from testing/kuttl/e2e-other/standalone-pgadmin/07-assert.yaml rename to testing/kuttl/e2e/standalone-pgadmin/07-assert.yaml index ad75223edd..323237cad4 100644 --- a/testing/kuttl/e2e-other/standalone-pgadmin/07-assert.yaml +++ b/testing/kuttl/e2e/standalone-pgadmin/07-assert.yaml @@ -67,7 +67,7 @@ commands: exit 1 } - clusters_actual=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py --dump-servers /tmp/dumped.json --user admin@pgadmin.${NAMESPACE}.svc && cat /tmp/dumped.json") + clusters_actual=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py dump-servers /tmp/dumped.json --user admin@pgadmin.${NAMESPACE}.svc && cat /tmp/dumped.json") clusters_expected=' { @@ -80,6 +80,7 @@ commands: "MaintenanceDB": "postgres", "Username": "pgadmin1", "Shared": true, + "TunnelPort": "22", "KerberosAuthentication": false, "ConnectionParameters": { "sslmode": "prefer" @@ -93,6 +94,7 @@ commands: "MaintenanceDB": "postgres", "Username": "pgadmin2", "Shared": true, + "TunnelPort": "22", "KerberosAuthentication": false, "ConnectionParameters": { "sslmode": "prefer" @@ -106,6 +108,7 @@ commands: "MaintenanceDB": "postgres", "Username": "pgadmin3", "Shared": true, + "TunnelPort": "22", "KerberosAuthentication": false, "ConnectionParameters": { "sslmode": "prefer" diff --git a/testing/kuttl/e2e-other/standalone-pgadmin/08--delete-cluster.yaml b/testing/kuttl/e2e/standalone-pgadmin/08--delete-cluster.yaml similarity index 100% rename from testing/kuttl/e2e-other/standalone-pgadmin/08--delete-cluster.yaml rename to testing/kuttl/e2e/standalone-pgadmin/08--delete-cluster.yaml diff --git a/testing/kuttl/e2e-other/standalone-pgadmin/09-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin/09-assert.yaml similarity index 95% rename from testing/kuttl/e2e-other/standalone-pgadmin/09-assert.yaml rename to testing/kuttl/e2e/standalone-pgadmin/09-assert.yaml index be1e124125..eca5581cb7 100644 --- a/testing/kuttl/e2e-other/standalone-pgadmin/09-assert.yaml +++ b/testing/kuttl/e2e/standalone-pgadmin/09-assert.yaml @@ -57,7 +57,7 @@ commands: exit 1 } - clusters_actual=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py --dump-servers /tmp/dumped.json --user admin@pgadmin.${NAMESPACE}.svc && cat /tmp/dumped.json") + clusters_actual=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py dump-servers /tmp/dumped.json --user admin@pgadmin.${NAMESPACE}.svc && cat /tmp/dumped.json") clusters_expected=' { @@ -70,6 +70,7 @@ commands: "MaintenanceDB": "postgres", "Username": "pgadmin1", "Shared": true, + "TunnelPort": "22", "KerberosAuthentication": false, "ConnectionParameters": { "sslmode": "prefer" @@ -83,6 +84,7 @@ commands: "MaintenanceDB": "postgres", "Username": "pgadmin3", "Shared": true, + "TunnelPort": "22", "KerberosAuthentication": false, "ConnectionParameters": { "sslmode": "prefer" diff --git a/testing/kuttl/e2e/standalone-pgadmin/10-invalid-pgadmin.yaml b/testing/kuttl/e2e/standalone-pgadmin/10-invalid-pgadmin.yaml new file mode 100644 index 0000000000..118b8d06ef --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin/10-invalid-pgadmin.yaml @@ -0,0 +1,37 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +# Check that invalid spec cannot be applied. +commands: +- script: | + contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } + diff_comp() { bash -ceu 'diff <(echo "$1" ) <(echo "$2")' - "$@"; } + + data_expected='"pgadmin2" is invalid: spec.serverGroups[0]: Invalid value: "object": exactly one of "postgresClusterName" or "postgresClusterSelector" is required' + + data_actual=$(kubectl apply -f - 2>&1 < /pgwal/pgbackrest-spool" || exit 1 diff --git a/testing/policies/kyverno/service_links.yaml b/testing/policies/kyverno/service_links.yaml index 4192eaf973..0ae48796ed 100644 --- a/testing/policies/kyverno/service_links.yaml +++ b/testing/policies/kyverno/service_links.yaml @@ -1,4 +1,4 @@ -# Copyright 2022 - 2023 Crunchy Data Solutions, Inc. +# Copyright 2022 - 2024 Crunchy Data Solutions, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at diff --git a/trivy.yaml b/trivy.yaml new file mode 100644 index 0000000000..b2ef32d785 --- /dev/null +++ b/trivy.yaml @@ -0,0 +1,14 @@ +# https://aquasecurity.github.io/trivy/latest/docs/references/configuration/config-file/ +--- +# Specify an exact list of recognized and acceptable licenses. +# [A GitHub workflow](/.github/workflows/trivy.yaml) rejects pull requests that +# import licenses not in this list. +# +# https://aquasecurity.github.io/trivy/latest/docs/scanner/license/ +license: + ignored: + - Apache-2.0 + - BSD-2-Clause + - BSD-3-Clause + - ISC + - MIT